query
stringlengths 8
6.75k
| document
stringlengths 9
1.89M
| negatives
listlengths 19
19
| metadata
dict |
---|---|---|---|
checkEndpointNameAvailabilityHandleResponse handles the CheckEndpointNameAvailability response.
|
func (client *ManagementClient) checkEndpointNameAvailabilityHandleResponse(resp *http.Response) (ManagementClientCheckEndpointNameAvailabilityResponse, error) {
result := ManagementClientCheckEndpointNameAvailabilityResponse{}
if err := runtime.UnmarshalAsJSON(resp, &result.CheckEndpointNameAvailabilityOutput); err != nil {
return ManagementClientCheckEndpointNameAvailabilityResponse{}, err
}
return result, nil
}
|
[
"func (client *DataConnectionsClient) checkNameAvailabilityHandleResponse(resp *http.Response) (DataConnectionsClientCheckNameAvailabilityResponse, error) {\n\tresult := DataConnectionsClientCheckNameAvailabilityResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CheckNameResult); err != nil {\n\t\treturn DataConnectionsClientCheckNameAvailabilityResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *ManagementClient) checkNameAvailabilityWithSubscriptionHandleResponse(resp *http.Response) (ManagementClientCheckNameAvailabilityWithSubscriptionResponse, error) {\n\tresult := ManagementClientCheckNameAvailabilityWithSubscriptionResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CheckNameAvailabilityOutput); err != nil {\n\t\treturn ManagementClientCheckNameAvailabilityWithSubscriptionResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *CdnManagementClient) checkNameAvailabilityWithSubscriptionHandleResponse(resp *http.Response) (CdnManagementClientCheckNameAvailabilityWithSubscriptionResponse, error) {\n\tresult := CdnManagementClientCheckNameAvailabilityWithSubscriptionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CheckNameAvailabilityOutput); err != nil {\n\t\treturn CdnManagementClientCheckNameAvailabilityWithSubscriptionResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client KustoPoolsClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}",
"func (client *PrivateEndpointConnectionClient) getByNameHandleResponse(resp *http.Response) (PrivateEndpointConnectionClientGetByNameResponse, error) {\n\tresult := PrivateEndpointConnectionClientGetByNameResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PrivateEndpointConnection); err != nil {\n\t\treturn PrivateEndpointConnectionClientGetByNameResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *ResourceClient) checkFilePathAvailabilityHandleResponse(resp *http.Response) (ResourceClientCheckFilePathAvailabilityResponse, error) {\n\tresult := ResourceClientCheckFilePathAvailabilityResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CheckAvailabilityResponse); err != nil {\n\t\treturn ResourceClientCheckFilePathAvailabilityResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *ManagementClient) checkDomainAvailabilityHandleResponse(resp *http.Response) (ManagementClientCheckDomainAvailabilityResponse, error) {\n\tresult := ManagementClientCheckDomainAvailabilityResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DomainAvailability); err != nil {\n\t\treturn ManagementClientCheckDomainAvailabilityResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *ResourceClient) checkQuotaAvailabilityHandleResponse(resp *http.Response) (ResourceClientCheckQuotaAvailabilityResponse, error) {\n\tresult := ResourceClientCheckQuotaAvailabilityResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CheckAvailabilityResponse); err != nil {\n\t\treturn ResourceClientCheckQuotaAvailabilityResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client KustoPoolsClient) CheckNameAvailability(ctx context.Context, location string, kustoPoolName KustoPoolCheckNameRequest) (result CheckNameResult, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/KustoPoolsClient.CheckNameAvailability\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response.Response != nil {\n\t\t\t\tsc = result.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: client.SubscriptionID,\n\t\t\tConstraints: []validation.Constraint{{Target: \"client.SubscriptionID\", Name: validation.MinLength, Rule: 1, Chain: nil}}},\n\t\t{TargetValue: location,\n\t\t\tConstraints: []validation.Constraint{{Target: \"location\", Name: validation.MinLength, Rule: 1, Chain: nil}}},\n\t\t{TargetValue: kustoPoolName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"kustoPoolName.Name\", Name: validation.Null, Rule: true, Chain: nil},\n\t\t\t\t{Target: \"kustoPoolName.Type\", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewError(\"synapse.KustoPoolsClient\", \"CheckNameAvailability\", err.Error())\n\t}\n\n\treq, err := client.CheckNameAvailabilityPreparer(ctx, location, kustoPoolName)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"synapse.KustoPoolsClient\", \"CheckNameAvailability\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.CheckNameAvailabilitySender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"synapse.KustoPoolsClient\", \"CheckNameAvailability\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.CheckNameAvailabilityResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"synapse.KustoPoolsClient\", \"CheckNameAvailability\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\n\treturn\n}",
"func (client *ManagementClient) checkSKUAvailabilityHandleResponse(resp *http.Response) (ManagementClientCheckSKUAvailabilityResponse, error) {\n\tresult := ManagementClientCheckSKUAvailabilityResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SKUAvailabilityListResult); err != nil {\n\t\treturn ManagementClientCheckSKUAvailabilityResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client DomainsClient) CheckAvailabilityResponder(resp *http.Response) (result DomainAvailabilityCheckResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}",
"func (client *ApplicationGatewaysClient) listAvailableResponseHeadersHandleResponse(resp *http.Response) (ApplicationGatewaysListAvailableResponseHeadersResponse, error) {\n\tresult := ApplicationGatewaysListAvailableResponseHeadersResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.StringArray); err != nil {\n\t\treturn ApplicationGatewaysListAvailableResponseHeadersResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *ManagementClient) checkEndpointNameAvailabilityCreateRequest(ctx context.Context, resourceGroupName string, checkEndpointNameAvailabilityInput CheckEndpointNameAvailabilityInput, options *ManagementClientCheckEndpointNameAvailabilityOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/checkEndpointNameAvailability\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, checkEndpointNameAvailabilityInput)\n}",
"func (client *VaultsClient) CheckNameAvailability(ctx context.Context, vaultName VaultCheckNameAvailabilityParameters, options *VaultsCheckNameAvailabilityOptions) (CheckNameAvailabilityResultResponse, error) {\n\treq, err := client.checkNameAvailabilityCreateRequest(ctx, vaultName, options)\n\tif err != nil {\n\t\treturn CheckNameAvailabilityResultResponse{}, err\n\t}\n\tresp, err := client.con.Pipeline().Do(req)\n\tif err != nil {\n\t\treturn CheckNameAvailabilityResultResponse{}, err\n\t}\n\tif !resp.HasStatusCode(http.StatusOK) {\n\t\treturn CheckNameAvailabilityResultResponse{}, client.checkNameAvailabilityHandleError(resp)\n\t}\n\treturn client.checkNameAvailabilityHandleResponse(resp)\n}",
"func GetHealthyEndpoint(response http.ResponseWriter, req *http.Request) {\r\n\tt := time.Now()\r\n\tif t.Sub(lastChecked) > (time.Second * time.Duration(2*period)) {\r\n\t\thealthy = false\r\n\t\thealthmessage = \"Healthcheck not running\"\r\n\t}\r\n\tresponse.Header().Add(\"Content-Type\", \"application/json\")\r\n\tif healthy {\r\n\t\tresponse.WriteHeader(http.StatusOK)\r\n\t\tmessage := fmt.Sprintf(`{ \"message\": \"service up and running\", \"lastCheck\": \"%s\" }`, lastChecked.String())\r\n\t\tresponse.Write([]byte(message))\r\n\t} else {\r\n\t\tresponse.WriteHeader(http.StatusServiceUnavailable)\r\n\t\tmessage := fmt.Sprintf(`{ \"message\": \"service is unavailable: %s\", \"lastCheck\": \"%s\" }`, healthmessage, lastChecked.String())\r\n\t\tresponse.Write([]byte(message))\r\n\t}\r\n}",
"func (h *Handlers) CheckAvailability(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Cache-Control\", helpers.BuildCacheControlHeader(0))\n\tresourceKind := chi.URLParam(r, \"resourceKind\")\n\tvalue := r.FormValue(\"v\")\n\tavailable, err := h.orgManager.CheckAvailability(r.Context(), resourceKind, value)\n\tif err != nil {\n\t\th.logger.Error().Err(err).Str(\"method\", \"CheckAvailability\").Send()\n\t\thelpers.RenderErrorJSON(w, err)\n\t\treturn\n\t}\n\tif available {\n\t\thelpers.RenderErrorWithCodeJSON(w, nil, http.StatusNotFound)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusNoContent)\n}",
"func (client *ClustersClient) listAvailableClusterRegionHandleResponse(resp *http.Response) (ClustersClientListAvailableClusterRegionResponse, error) {\n\tresult := ClustersClientListAvailableClusterRegionResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AvailableClustersList); err != nil {\n\t\treturn ClustersClientListAvailableClusterRegionResponse{}, err\n\t}\n\treturn result, nil\n}",
"func healthcheck_handler(res http.ResponseWriter, req *http.Request) {\n\tres.WriteHeader(http.StatusOK)\n}",
"func (client *VMInsightsClient) getOnboardingStatusHandleResponse(resp *http.Response) (VMInsightsGetOnboardingStatusResponse, error) {\n\tresult := VMInsightsGetOnboardingStatusResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VMInsightsOnboardingStatus); err != nil {\n\t\treturn VMInsightsGetOnboardingStatusResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
checkNameAvailabilityWithSubscriptionCreateRequest creates the CheckNameAvailabilityWithSubscription request.
|
func (client *ManagementClient) checkNameAvailabilityWithSubscriptionCreateRequest(ctx context.Context, checkNameAvailabilityInput CheckNameAvailabilityInput, options *ManagementClientCheckNameAvailabilityWithSubscriptionOptions) (*policy.Request, error) {
urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Cdn/checkNameAvailability"
if client.subscriptionID == "" {
return nil, errors.New("parameter client.subscriptionID cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID))
req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("api-version", "2021-06-01")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
return req, runtime.MarshalAsJSON(req, checkNameAvailabilityInput)
}
|
[
"func (client *CdnManagementClient) checkNameAvailabilityWithSubscriptionCreateRequest(ctx context.Context, checkNameAvailabilityInput CheckNameAvailabilityInput, options *CdnManagementClientCheckNameAvailabilityWithSubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Cdn/checkNameAvailability\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, checkNameAvailabilityInput)\n}",
"func (client *VaultsClient) checkNameAvailabilityCreateRequest(ctx context.Context, vaultName VaultCheckNameAvailabilityParameters, options *VaultsCheckNameAvailabilityOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/checkNameAvailability\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodPost, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2019-09-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(vaultName)\n}",
"func (client *ManagementClient) checkDNSNameAvailabilityCreateRequest(ctx context.Context, location string, domainNameLabel string, options *ManagementClientCheckDNSNameAvailabilityOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/CheckDnsNameAvailability\"\n\tif location == \"\" {\n\t\treturn nil, errors.New(\"parameter location cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{location}\", url.PathEscape(location))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"domainNameLabel\", domainNameLabel)\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *ManagementClient) checkEndpointNameAvailabilityCreateRequest(ctx context.Context, resourceGroupName string, checkEndpointNameAvailabilityInput CheckEndpointNameAvailabilityInput, options *ManagementClientCheckEndpointNameAvailabilityOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/checkEndpointNameAvailability\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, checkEndpointNameAvailabilityInput)\n}",
"func (client *ManagementClient) checkDomainAvailabilityCreateRequest(ctx context.Context, parameters CheckDomainAvailabilityParameter, options *ManagementClientCheckDomainAvailabilityOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.CognitiveServices/checkDomainAvailability\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}",
"func (client *ManagementClient) checkSKUAvailabilityCreateRequest(ctx context.Context, location string, parameters CheckSKUAvailabilityParameter, options *ManagementClientCheckSKUAvailabilityOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.CognitiveServices/locations/{location}/checkSkuAvailability\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif location == \"\" {\n\t\treturn nil, errors.New(\"parameter location cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{location}\", url.PathEscape(location))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}",
"func (client *CdnManagementClient) checkNameAvailabilityWithSubscriptionHandleResponse(resp *http.Response) (CdnManagementClientCheckNameAvailabilityWithSubscriptionResponse, error) {\n\tresult := CdnManagementClientCheckNameAvailabilityWithSubscriptionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CheckNameAvailabilityOutput); err != nil {\n\t\treturn CdnManagementClientCheckNameAvailabilityWithSubscriptionResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *ResourceClient) checkQuotaAvailabilityCreateRequest(ctx context.Context, location string, body QuotaAvailabilityRequest, options *ResourceClientCheckQuotaAvailabilityOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.NetApp/locations/{location}/checkQuotaAvailability\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif location == \"\" {\n\t\treturn nil, errors.New(\"parameter location cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{location}\", url.PathEscape(location))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, body)\n}",
"func (a *Client) SubscriptionCreate(params *SubscriptionCreateParams) (*SubscriptionCreateOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewSubscriptionCreateParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"subscriptionCreate\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/subscription\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &SubscriptionCreateReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*SubscriptionCreateOK), nil\n\n}",
"func (client *ManagementClient) checkNameAvailabilityWithSubscriptionHandleResponse(resp *http.Response) (ManagementClientCheckNameAvailabilityWithSubscriptionResponse, error) {\n\tresult := ManagementClientCheckNameAvailabilityWithSubscriptionResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CheckNameAvailabilityOutput); err != nil {\n\t\treturn ManagementClientCheckNameAvailabilityWithSubscriptionResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *ResourceLinksClient) listAtSubscriptionCreateRequest(ctx context.Context, options *ResourceLinksClientListAtSubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Resources/links\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\treqQP.Set(\"api-version\", \"2016-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *DeploymentOperationsClient) listAtSubscriptionScopeCreateRequest(ctx context.Context, deploymentName string, options *DeploymentOperationsListAtSubscriptionScopeOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations\"\n\turlPath = strings.ReplaceAll(urlPath, \"{deploymentName}\", url.PathEscape(deploymentName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tif options != nil && options.Top != nil {\n\t\tquery.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tquery.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (service SubscriptionsService) Create(s NewSubscription) (*Response, Subscription, error) {\n\treq, err := service.client.newRequest(\"POST\", \"subscriptions\", nil, s)\n\tif err != nil {\n\t\treturn nil, Subscription{}, err\n\t}\n\n\tvar dest Subscription\n\tres, err := service.client.do(req, &dest)\n\n\treturn res, dest, err\n}",
"func (client *VaultsClient) listBySubscriptionCreateRequest(ctx context.Context, options *VaultsListBySubscriptionOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/vaults\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tif options != nil && options.Top != nil {\n\t\tquery.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tquery.Set(\"api-version\", \"2019-09-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (client *DeploymentOperationsClient) listAtSubscriptionScopeCreateRequest(ctx context.Context, deploymentName string, options *DeploymentOperationsListAtSubscriptionScopeOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations\"\n\tif deploymentName == \"\" {\n\t\treturn nil, errors.New(\"parameter deploymentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{deploymentName}\", url.PathEscape(deploymentName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (c *AdminProjectsLocationsSubscriptionsCreateCall) SubscriptionId(subscriptionId string) *AdminProjectsLocationsSubscriptionsCreateCall {\n\tc.urlParams_.Set(\"subscriptionId\", subscriptionId)\n\treturn c\n}",
"func (client *Client) acceptOwnershipStatusCreateRequest(ctx context.Context, subscriptionID string, options *ClientAcceptOwnershipStatusOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Subscription/subscriptions/{subscriptionId}/acceptOwnershipStatus\"\n\tif subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *DeploymentOperationsClient) getAtSubscriptionScopeCreateRequest(ctx context.Context, deploymentName string, operationId string, options *DeploymentOperationsGetAtSubscriptionScopeOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{deploymentName}\", url.PathEscape(deploymentName))\n\turlPath = strings.ReplaceAll(urlPath, \"{operationId}\", url.PathEscape(operationId))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (client *DeploymentOperationsClient) getAtSubscriptionScopeCreateRequest(ctx context.Context, deploymentName string, operationID string, options *DeploymentOperationsGetAtSubscriptionScopeOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}\"\n\tif deploymentName == \"\" {\n\t\treturn nil, errors.New(\"parameter deploymentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{deploymentName}\", url.PathEscape(deploymentName))\n\tif operationID == \"\" {\n\t\treturn nil, errors.New(\"parameter operationID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{operationId}\", url.PathEscape(operationID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
checkNameAvailabilityWithSubscriptionHandleResponse handles the CheckNameAvailabilityWithSubscription response.
|
func (client *ManagementClient) checkNameAvailabilityWithSubscriptionHandleResponse(resp *http.Response) (ManagementClientCheckNameAvailabilityWithSubscriptionResponse, error) {
result := ManagementClientCheckNameAvailabilityWithSubscriptionResponse{}
if err := runtime.UnmarshalAsJSON(resp, &result.CheckNameAvailabilityOutput); err != nil {
return ManagementClientCheckNameAvailabilityWithSubscriptionResponse{}, err
}
return result, nil
}
|
[
"func (client *CdnManagementClient) checkNameAvailabilityWithSubscriptionHandleResponse(resp *http.Response) (CdnManagementClientCheckNameAvailabilityWithSubscriptionResponse, error) {\n\tresult := CdnManagementClientCheckNameAvailabilityWithSubscriptionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CheckNameAvailabilityOutput); err != nil {\n\t\treturn CdnManagementClientCheckNameAvailabilityWithSubscriptionResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *DataConnectionsClient) checkNameAvailabilityHandleResponse(resp *http.Response) (DataConnectionsClientCheckNameAvailabilityResponse, error) {\n\tresult := DataConnectionsClientCheckNameAvailabilityResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CheckNameResult); err != nil {\n\t\treturn DataConnectionsClientCheckNameAvailabilityResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *ManagementClient) checkEndpointNameAvailabilityHandleResponse(resp *http.Response) (ManagementClientCheckEndpointNameAvailabilityResponse, error) {\n\tresult := ManagementClientCheckEndpointNameAvailabilityResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CheckEndpointNameAvailabilityOutput); err != nil {\n\t\treturn ManagementClientCheckEndpointNameAvailabilityResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *CdnManagementClient) checkNameAvailabilityWithSubscriptionCreateRequest(ctx context.Context, checkNameAvailabilityInput CheckNameAvailabilityInput, options *CdnManagementClientCheckNameAvailabilityWithSubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Cdn/checkNameAvailability\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, checkNameAvailabilityInput)\n}",
"func (client *ManagementClient) checkNameAvailabilityWithSubscriptionCreateRequest(ctx context.Context, checkNameAvailabilityInput CheckNameAvailabilityInput, options *ManagementClientCheckNameAvailabilityWithSubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Cdn/checkNameAvailability\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, checkNameAvailabilityInput)\n}",
"func (client *ResourceClient) checkQuotaAvailabilityHandleResponse(resp *http.Response) (ResourceClientCheckQuotaAvailabilityResponse, error) {\n\tresult := ResourceClientCheckQuotaAvailabilityResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CheckAvailabilityResponse); err != nil {\n\t\treturn ResourceClientCheckQuotaAvailabilityResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *ManagementClient) checkSKUAvailabilityHandleResponse(resp *http.Response) (ManagementClientCheckSKUAvailabilityResponse, error) {\n\tresult := ManagementClientCheckSKUAvailabilityResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SKUAvailabilityListResult); err != nil {\n\t\treturn ManagementClientCheckSKUAvailabilityResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client KustoPoolsClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}",
"func (client KustoPoolsClient) CheckNameAvailability(ctx context.Context, location string, kustoPoolName KustoPoolCheckNameRequest) (result CheckNameResult, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/KustoPoolsClient.CheckNameAvailability\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response.Response != nil {\n\t\t\t\tsc = result.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: client.SubscriptionID,\n\t\t\tConstraints: []validation.Constraint{{Target: \"client.SubscriptionID\", Name: validation.MinLength, Rule: 1, Chain: nil}}},\n\t\t{TargetValue: location,\n\t\t\tConstraints: []validation.Constraint{{Target: \"location\", Name: validation.MinLength, Rule: 1, Chain: nil}}},\n\t\t{TargetValue: kustoPoolName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"kustoPoolName.Name\", Name: validation.Null, Rule: true, Chain: nil},\n\t\t\t\t{Target: \"kustoPoolName.Type\", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewError(\"synapse.KustoPoolsClient\", \"CheckNameAvailability\", err.Error())\n\t}\n\n\treq, err := client.CheckNameAvailabilityPreparer(ctx, location, kustoPoolName)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"synapse.KustoPoolsClient\", \"CheckNameAvailability\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.CheckNameAvailabilitySender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"synapse.KustoPoolsClient\", \"CheckNameAvailability\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.CheckNameAvailabilityResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"synapse.KustoPoolsClient\", \"CheckNameAvailability\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\n\treturn\n}",
"func (client *ResourceLinksClient) listAtSubscriptionHandleResponse(resp *http.Response) (ResourceLinksClientListAtSubscriptionResponse, error) {\n\tresult := ResourceLinksClientListAtSubscriptionResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ResourceLinkResult); err != nil {\n\t\treturn ResourceLinksClientListAtSubscriptionResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *GatewayClient) listBySubscriptionHandleResponse(resp *http.Response) (GatewayClientListBySubscriptionResponse, error) {\n\tresult := GatewayClientListBySubscriptionResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GatewayResourceDescriptionList); err != nil {\n\t\treturn GatewayClientListBySubscriptionResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *Client) renameHandleResponse(resp *http.Response) (ClientRenameResponse, error) {\n\tresult := ClientRenameResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RenamedSubscriptionID); err != nil {\n\t\treturn ClientRenameResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *ManagementClient) checkDomainAvailabilityHandleResponse(resp *http.Response) (ManagementClientCheckDomainAvailabilityResponse, error) {\n\tresult := ManagementClientCheckDomainAvailabilityResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DomainAvailability); err != nil {\n\t\treturn ManagementClientCheckDomainAvailabilityResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *DeploymentOperationsClient) listAtSubscriptionScopeHandleResponse(resp *http.Response) (DeploymentOperationsListAtSubscriptionScopeResponse, error) {\n\tresult := DeploymentOperationsListAtSubscriptionScopeResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DeploymentOperationsListResult); err != nil {\n\t\treturn DeploymentOperationsListAtSubscriptionScopeResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *DeploymentOperationsClient) getAtSubscriptionScopeHandleResponse(resp *http.Response) (DeploymentOperationsGetAtSubscriptionScopeResponse, error) {\n\tresult := DeploymentOperationsGetAtSubscriptionScopeResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DeploymentOperation); err != nil {\n\t\treturn DeploymentOperationsGetAtSubscriptionScopeResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *VaultsClient) CheckNameAvailability(ctx context.Context, vaultName VaultCheckNameAvailabilityParameters, options *VaultsCheckNameAvailabilityOptions) (CheckNameAvailabilityResultResponse, error) {\n\treq, err := client.checkNameAvailabilityCreateRequest(ctx, vaultName, options)\n\tif err != nil {\n\t\treturn CheckNameAvailabilityResultResponse{}, err\n\t}\n\tresp, err := client.con.Pipeline().Do(req)\n\tif err != nil {\n\t\treturn CheckNameAvailabilityResultResponse{}, err\n\t}\n\tif !resp.HasStatusCode(http.StatusOK) {\n\t\treturn CheckNameAvailabilityResultResponse{}, client.checkNameAvailabilityHandleError(resp)\n\t}\n\treturn client.checkNameAvailabilityHandleResponse(resp)\n}",
"func (client *ResourceClient) checkFilePathAvailabilityHandleResponse(resp *http.Response) (ResourceClientCheckFilePathAvailabilityResponse, error) {\n\tresult := ResourceClientCheckFilePathAvailabilityResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CheckAvailabilityResponse); err != nil {\n\t\treturn ResourceClientCheckFilePathAvailabilityResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *AlertsClient) getSubscriptionLevelHandleResponse(resp *http.Response) (AlertsClientGetSubscriptionLevelResponse, error) {\n\tresult := AlertsClientGetSubscriptionLevelResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Alert); err != nil {\n\t\treturn AlertsClientGetSubscriptionLevelResponse{}, err\n\t}\n\treturn result, nil\n}",
"func CheckNameAvailability(ctx context.Context, name, resourceType string) (bool, error) {\n\tclient := getCDNClient()\n\tresp, err := client.CheckNameAvailability(ctx, cdn.CheckNameAvailabilityInput{\n\t\tName: to.StringPtr(name),\n\t\tType: to.StringPtr(resourceType),\n\t})\n\n\treturn *resp.NameAvailable, err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
validateProbeCreateRequest creates the ValidateProbe request.
|
func (client *ManagementClient) validateProbeCreateRequest(ctx context.Context, validateProbeInput ValidateProbeInput, options *ManagementClientValidateProbeOptions) (*policy.Request, error) {
urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Cdn/validateProbe"
if client.subscriptionID == "" {
return nil, errors.New("parameter client.subscriptionID cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID))
req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("api-version", "2021-06-01")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
return req, runtime.MarshalAsJSON(req, validateProbeInput)
}
|
[
"func (client *CdnManagementClient) validateProbeCreateRequest(ctx context.Context, validateProbeInput ValidateProbeInput, options *CdnManagementClientValidateProbeOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Cdn/validateProbe\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, validateProbeInput)\n}",
"func CreateVerifyDeviceRequest() (request *VerifyDeviceRequest) {\n\trequest = &VerifyDeviceRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cloudauth\", \"2019-03-07\", \"VerifyDevice\", \"cloudauth\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func CreateDescribeVerifyRecordsRequest() (request *DescribeVerifyRecordsRequest) {\n\trequest = &DescribeVerifyRecordsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cloudauth\", \"2019-03-07\", \"DescribeVerifyRecords\", \"cloudauth\", \"openAPI\")\n\treturn\n}",
"func CreateVerifyRequest() (request *VerifyRequest) {\n\trequest = &VerifyRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"cas\", \"2020-04-07\", \"Verify\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (client *VirtualMachinesClient) claimCreateRequest(ctx context.Context, resourceGroupName string, labName string, name string, options *VirtualMachinesBeginClaimOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}/claim\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif labName == \"\" {\n\t\treturn nil, errors.New(\"parameter labName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{labName}\", url.PathEscape(labName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-09-15\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func CreateDescribeLoadBalancerSpecRequest() (request *DescribeLoadBalancerSpecRequest) {\n\trequest = &DescribeLoadBalancerSpecRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Ens\", \"2017-11-10\", \"DescribeLoadBalancerSpec\", \"ens\", \"openAPI\")\n\trequest.Method = requests.GET\n\treturn\n}",
"func (client *L2IsolationDomainsClient) validateConfigurationCreateRequest(ctx context.Context, resourceGroupName string, l2IsolationDomainName string, options *L2IsolationDomainsClientBeginValidateConfigurationOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetworkFabric/l2IsolationDomains/{l2IsolationDomainName}/validateConfiguration\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif l2IsolationDomainName == \"\" {\n\t\treturn nil, errors.New(\"parameter l2IsolationDomainName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{l2IsolationDomainName}\", url.PathEscape(l2IsolationDomainName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-06-15\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func CreateDescribeRecordRequest() (request *DescribeRecordRequest) {\n\trequest = &DescribeRecordRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"live\", \"2016-11-01\", \"DescribeRecord\", \"live\", \"openAPI\")\n\treturn\n}",
"func (client *ManagementClient) checkDomainAvailabilityCreateRequest(ctx context.Context, parameters CheckDomainAvailabilityParameter, options *ManagementClientCheckDomainAvailabilityOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.CognitiveServices/checkDomainAvailability\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}",
"func CreateDescribeCheckWarningsRequest() (request *DescribeCheckWarningsRequest) {\n\trequest = &DescribeCheckWarningsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"aegis\", \"2016-11-11\", \"DescribeCheckWarnings\", \"vipaegis\", \"openAPI\")\n\treturn\n}",
"func (client *DpsCertificateClient) verifyCertificateCreateRequest(ctx context.Context, certificateName string, ifMatch string, resourceGroupName string, provisioningServiceName string, request VerificationCodeRequest, options *DpsCertificateClientVerifyCertificateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{provisioningServiceName}/certificates/{certificateName}/verify\"\n\tif certificateName == \"\" {\n\t\treturn nil, errors.New(\"parameter certificateName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{certificateName}\", url.PathEscape(certificateName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif provisioningServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter provisioningServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{provisioningServiceName}\", url.PathEscape(provisioningServiceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.CertificateName1 != nil {\n\t\treqQP.Set(\"certificate.name\", *options.CertificateName1)\n\t}\n\tif options != nil && options.CertificateRawBytes != nil {\n\t\treqQP.Set(\"certificate.rawBytes\", base64.StdEncoding.EncodeToString(options.CertificateRawBytes))\n\t}\n\tif options != nil && options.CertificateIsVerified != nil {\n\t\treqQP.Set(\"certificate.isVerified\", strconv.FormatBool(*options.CertificateIsVerified))\n\t}\n\tif options != nil && options.CertificatePurpose != nil {\n\t\treqQP.Set(\"certificate.purpose\", string(*options.CertificatePurpose))\n\t}\n\tif options != nil && options.CertificateCreated != nil {\n\t\treqQP.Set(\"certificate.created\", options.CertificateCreated.Format(time.RFC3339Nano))\n\t}\n\tif options != nil && options.CertificateLastUpdated != nil {\n\t\treqQP.Set(\"certificate.lastUpdated\", options.CertificateLastUpdated.Format(time.RFC3339Nano))\n\t}\n\tif options != nil && options.CertificateHasPrivateKey != nil {\n\t\treqQP.Set(\"certificate.hasPrivateKey\", strconv.FormatBool(*options.CertificateHasPrivateKey))\n\t}\n\tif options != nil && options.CertificateNonce != nil {\n\t\treqQP.Set(\"certificate.nonce\", *options.CertificateNonce)\n\t}\n\treqQP.Set(\"api-version\", \"2023-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"If-Match\"] = []string{ifMatch}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, request)\n}",
"func CreateDeleteHealthCheckRequest() (request *DeleteHealthCheckRequest) {\n\trequest = &DeleteHealthCheckRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Smartag\", \"2018-03-13\", \"DeleteHealthCheck\", \"smartag\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func CreateCreateHoneypotRequest() (request *CreateHoneypotRequest) {\n\trequest = &CreateHoneypotRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Sas\", \"2018-12-03\", \"CreateHoneypot\", \"sas\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func CreateCheckInstanceExistRequest() (request *CheckInstanceExistRequest) {\n\trequest = &CheckInstanceExistRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Rds\", \"2014-08-15\", \"CheckInstanceExist\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func validateCreateRequest(req *encoder.CreateStreamRequest) error {\n\tif req.DeviceToken == \"\" {\n\t\treturn twirp.RequiredArgumentError(\"device_token\")\n\t}\n\n\tif req.DeviceLabel == \"\" {\n\t\treturn twirp.RequiredArgumentError(\"device_label\")\n\t}\n\n\tif req.CommunityId == \"\" {\n\t\treturn twirp.RequiredArgumentError(\"community_id\")\n\t}\n\n\tif req.RecipientPublicKey == \"\" {\n\t\treturn twirp.RequiredArgumentError(\"recipient_public_key\")\n\t}\n\n\tif req.Location == nil {\n\t\treturn twirp.RequiredArgumentError(\"location\")\n\t}\n\n\tif req.Location.Longitude == 0 {\n\t\treturn twirp.RequiredArgumentError(\"longitude\")\n\t}\n\n\tif req.Location.Longitude < -180 || req.Location.Longitude > 180 {\n\t\treturn twirp.InvalidArgumentError(\"longitude\", \"must be between -180 and 180\")\n\t}\n\n\tif req.Location.Latitude == 0 {\n\t\treturn twirp.RequiredArgumentError(\"latitude\")\n\t}\n\n\tif req.Location.Latitude < -90 || req.Location.Latitude > 90 {\n\t\treturn twirp.InvalidArgumentError(\"latitude\", \"must be between -90 and 90\")\n\t}\n\n\treturn nil\n}",
"func (client *ManagementClient) checkEndpointNameAvailabilityCreateRequest(ctx context.Context, resourceGroupName string, checkEndpointNameAvailabilityInput CheckEndpointNameAvailabilityInput, options *ManagementClientCheckEndpointNameAvailabilityOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/checkEndpointNameAvailability\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, checkEndpointNameAvailabilityInput)\n}",
"func (client *BackupInstancesClient) validateForBackupCreateRequest(ctx context.Context, vaultName string, resourceGroupName string, parameters ValidateForBackupRequest, options *BackupInstancesClientBeginValidateForBackupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/validateForBackup\"\n\tif vaultName == \"\" {\n\t\treturn nil, errors.New(\"parameter vaultName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vaultName}\", url.PathEscape(vaultName))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}",
"func ValidateCreateRequest(message *firepb.CreateRequest) (err error) {\n\tif message.CreatedAt != \"\" {\n\t\terr = goa.MergeErrors(err, goa.ValidateFormat(\"message.createdAt\", message.CreatedAt, goa.FormatDateTime))\n\t}\n\tif message.UpdatedAt != \"\" {\n\t\terr = goa.MergeErrors(err, goa.ValidateFormat(\"message.updatedAt\", message.UpdatedAt, goa.FormatDateTime))\n\t}\n\tif message.DeletedAt != \"\" {\n\t\terr = goa.MergeErrors(err, goa.ValidateFormat(\"message.deletedAt\", message.DeletedAt, goa.FormatDateTime))\n\t}\n\tif message.Start != \"\" {\n\t\terr = goa.MergeErrors(err, goa.ValidateFormat(\"message.start\", message.Start, goa.FormatDateTime))\n\t}\n\tif message.End != \"\" {\n\t\terr = goa.MergeErrors(err, goa.ValidateFormat(\"message.end\", message.End, goa.FormatDateTime))\n\t}\n\tfor _, e := range message.Logs {\n\t\tif e != nil {\n\t\t\tif err2 := ValidateLog(e); err2 != nil {\n\t\t\t\terr = goa.MergeErrors(err, err2)\n\t\t\t}\n\t\t}\n\t}\n\tfor _, e := range message.Weathers {\n\t\tif e != nil {\n\t\t\tif err2 := ValidateWeather(e); err2 != nil {\n\t\t\t\terr = goa.MergeErrors(err, err2)\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}",
"func CreateCreateLoadBalancerRequest() (request *CreateLoadBalancerRequest) {\n\trequest = &CreateLoadBalancerRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Ens\", \"2017-11-10\", \"CreateLoadBalancer\", \"ens\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
validateProbeHandleResponse handles the ValidateProbe response.
|
func (client *ManagementClient) validateProbeHandleResponse(resp *http.Response) (ManagementClientValidateProbeResponse, error) {
result := ManagementClientValidateProbeResponse{}
if err := runtime.UnmarshalAsJSON(resp, &result.ValidateProbeOutput); err != nil {
return ManagementClientValidateProbeResponse{}, err
}
return result, nil
}
|
[
"func (client *CdnManagementClient) validateProbeHandleResponse(resp *http.Response) (CdnManagementClientValidateProbeResponse, error) {\n\tresult := CdnManagementClientValidateProbeResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ValidateProbeOutput); err != nil {\n\t\treturn CdnManagementClientValidateProbeResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *DpsCertificateClient) verifyCertificateHandleResponse(resp *http.Response) (DpsCertificateClientVerifyCertificateResponse, error) {\n\tresult := DpsCertificateClientVerifyCertificateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CertificateResponse); err != nil {\n\t\treturn DpsCertificateClientVerifyCertificateResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *ManagementClient) checkDomainAvailabilityHandleResponse(resp *http.Response) (ManagementClientCheckDomainAvailabilityResponse, error) {\n\tresult := ManagementClientCheckDomainAvailabilityResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DomainAvailability); err != nil {\n\t\treturn ManagementClientCheckDomainAvailabilityResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *AppsClient) validateDomainHandleResponse(resp *http.Response) (AppsClientValidateDomainResponse, error) {\n\tresult := AppsClientValidateDomainResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CustomDomainValidateResult); err != nil {\n\t\treturn AppsClientValidateDomainResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (triggerResp TriggerResponse) HandleResponse() {\n\tfmt.Printf(\"Flow Triggered.\\nExecution ID: %v\\n\", triggerResp)\n}",
"func (im *InstanceManager) ValidateMonitoringResponse(response string) error {\n\tschema, _ := ioutil.ReadAll(config.LoadAsset(\"/config/asset/rtc-monitor-response-schema.json\"))\n\tschemaJSON := string(schema[:])\n\n\tschemaLoader := gojsonschema.NewStringLoader(schemaJSON)\n\tdocumentLoader := gojsonschema.NewStringLoader(response)\n\n\tresult, err := gojsonschema.Validate(schemaLoader, documentLoader)\n\tif err != nil {\n\t\tlogger.Debugf(\"response: %s\", response)\n\t\treturn err\n\t}\n\n\tif !result.Valid() {\n\t\treturn errors.Errorf(\"Not valid response: error[%+v]\", result.Errors())\n\t}\n\n\treturn nil\n}",
"func handleVerResp(w http.ResponseWriter, r *http.Request) error {\n\n\treq := getverifyPayload(r)\n\n\tresp := comdef.VerifyResp{}\n\thttpStatus, err := httputilfuncs.PostHTTPJSONString(verifyURL,\n\t\treq, &resp, 10*time.Second)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed on send Record request! err:%v\\n\", err)\n\t\treturn err\n\t}\n\n\tlog.Printf(\"Http Status: %d, resp: %v\", httpStatus, resp)\n\tif httpStatus == 200 {\n\t\tw.Write([]byte(\"Recording verified sucessfully:\\n\"))\n\t\tb, _ := json.Marshal(resp)\n\t\tw.Write(b)\n\n\t} else {\n\t\tw.Write([]byte(\"Recording not verified sucessfully please resubmit\"))\n\t}\n\n\treturn err\n\n}",
"func (client *ManagementClient) checkEndpointNameAvailabilityHandleResponse(resp *http.Response) (ManagementClientCheckEndpointNameAvailabilityResponse, error) {\n\tresult := ManagementClientCheckEndpointNameAvailabilityResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CheckEndpointNameAvailabilityOutput); err != nil {\n\t\treturn ManagementClientCheckEndpointNameAvailabilityResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *InheritanceClient) getValidHandleResponse(resp *http.Response) (InheritanceClientGetValidResponse, error) {\n\tresult := InheritanceClientGetValidResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Siamese); err != nil {\n\t\treturn InheritanceClientGetValidResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *ResourceClient) checkFilePathAvailabilityHandleResponse(resp *http.Response) (ResourceClientCheckFilePathAvailabilityResponse, error) {\n\tresult := ResourceClientCheckFilePathAvailabilityResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CheckAvailabilityResponse); err != nil {\n\t\treturn ResourceClientCheckFilePathAvailabilityResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *DpsCertificateClient) generateVerificationCodeHandleResponse(resp *http.Response) (DpsCertificateClientGenerateVerificationCodeResponse, error) {\n\tresult := DpsCertificateClientGenerateVerificationCodeResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VerificationCodeResponse); err != nil {\n\t\treturn DpsCertificateClientGenerateVerificationCodeResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *FirewallPolicyIdpsSignaturesOverridesClient) patchHandleResponse(resp *http.Response) (FirewallPolicyIdpsSignaturesOverridesPatchResponse, error) {\n\tresult := FirewallPolicyIdpsSignaturesOverridesPatchResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SignaturesOverrides); err != nil {\n\t\treturn FirewallPolicyIdpsSignaturesOverridesPatchResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *Client) acceptOwnershipStatusHandleResponse(resp *http.Response) (ClientAcceptOwnershipStatusResponse, error) {\n\tresult := ClientAcceptOwnershipStatusResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AcceptOwnershipStatusResponse); err != nil {\n\t\treturn ClientAcceptOwnershipStatusResponse{}, err\n\t}\n\treturn result, nil\n}",
"func handleVerify(w http.ResponseWriter, r *http.Request) error {\n\n\tparam := httpctxserver.HandlerProcessInput{\n\t\tReq: &comdef.VerifyReq{},\n\t\tResp: &comdef.VerifyResp{},\n\t\tUsrData: nil}\n\n\treturn httpctxserver.DoJSONReq(w, r, processVerify, param)\n\n}",
"func (client *InputsClient) getHandleResponse(resp *http.Response) (InputsClientGetResponse, error) {\n\tresult := InputsClientGetResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Input); err != nil {\n\t\treturn InputsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *ArrayClient) getValidHandleResponse(resp *http.Response) (ArrayClientGetValidResponse, error) {\n\tresult := ArrayClientGetValidResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ArrayWrapper); err != nil {\n\t\treturn ArrayClientGetValidResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *ResourceClient) checkQuotaAvailabilityHandleResponse(resp *http.Response) (ResourceClientCheckQuotaAvailabilityResponse, error) {\n\tresult := ResourceClientCheckQuotaAvailabilityResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CheckAvailabilityResponse); err != nil {\n\t\treturn ResourceClientCheckQuotaAvailabilityResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *DataConnectionsClient) checkNameAvailabilityHandleResponse(resp *http.Response) (DataConnectionsClientCheckNameAvailabilityResponse, error) {\n\tresult := DataConnectionsClientCheckNameAvailabilityResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CheckNameResult); err != nil {\n\t\treturn DataConnectionsClientCheckNameAvailabilityResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *InputsClient) updateHandleResponse(resp *http.Response) (InputsClientUpdateResponse, error) {\n\tresult := InputsClientUpdateResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Input); err != nil {\n\t\treturn InputsClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewEventHubsClient creates a new instance of EventHubsClient with the specified values. subscriptionID Subscription credentials that uniquely identify a Microsoft Azure subscription. The subscription ID forms part of the URI for every service call. credential used to authorize requests. Usually a credential from azidentity. options pass nil to accept the default values.
|
func NewEventHubsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*EventHubsClient, error) {
cl, err := arm.NewClient(internal.ModuleName+"/armeventhub.EventHubsClient", internal.ModuleVersion, credential, options)
if err != nil {
return nil, err
}
client := &EventHubsClient{
subscriptionID: subscriptionID,
internal: cl,
}
return client, nil
}
|
[
"func NewAzureEventHubs(logger logger.Logger, isBinding bool) *AzureEventHubs {\n\treturn &AzureEventHubs{\n\t\tlogger: logger,\n\t\tisBinding: isBinding,\n\t\tproducersLock: &sync.RWMutex{},\n\t\tproducers: make(map[string]*azeventhubs.ProducerClient, 1),\n\t\tcheckpointStoreLock: &sync.RWMutex{},\n\t}\n}",
"func NewAzureEventHubs(logger logger.Logger) pubsub.PubSub {\n\treturn &AzureEventHubs{\n\t\tAzureEventHubs: impl.NewAzureEventHubs(logger, false),\n\t}\n}",
"func NewEventHubNamespace(ctx *pulumi.Context,\n\tname string, args *EventHubNamespaceArgs, opts ...pulumi.ResourceOption) (*EventHubNamespace, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ResourceGroupName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ResourceGroupName'\")\n\t}\n\tif args.Sku == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Sku'\")\n\t}\n\tsecrets := pulumi.AdditionalSecretOutputs([]string{\n\t\t\"defaultPrimaryConnectionString\",\n\t\t\"defaultPrimaryConnectionStringAlias\",\n\t\t\"defaultPrimaryKey\",\n\t\t\"defaultSecondaryConnectionString\",\n\t\t\"defaultSecondaryConnectionStringAlias\",\n\t\t\"defaultSecondaryKey\",\n\t})\n\topts = append(opts, secrets)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource EventHubNamespace\n\terr := ctx.RegisterResource(\"azure:eventhub/eventHubNamespace:EventHubNamespace\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func NewEventsClient(\n\tapiAddress string,\n\tapiToken string,\n\topts *restmachinery.APIClientOptions,\n) EventsClient {\n\treturn &eventsClient{\n\t\tBaseClient: rm.NewBaseClient(apiAddress, apiToken, opts),\n\t\tworkersClient: NewWorkersClient(apiAddress, apiToken, opts),\n\t\tlogsClient: NewLogsClient(apiAddress, apiToken, opts),\n\t}\n}",
"func NewEventSubscriptionsClient(subscriptionID string) EventSubscriptionsClient {\n\treturn NewEventSubscriptionsClientWithBaseURI(DefaultBaseURI, subscriptionID)\n}",
"func New(adapter consumer.EventAdapter) *MockEventHub {\n\treturn &MockEventHub{Adapter: adapter}\n}",
"func GetEventHubClient(info EventHubInfo) (*eventhub.Hub, error) {\n\t// The user wants to use a connectionstring, not a pod identity\n\tif info.EventHubConnection != \"\" {\n\t\thub, err := eventhub.NewHubFromConnectionString(info.EventHubConnection)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create hub client: %s\", err)\n\t\t}\n\t\treturn hub, nil\n\t}\n\n\t// Since there is no connectionstring, then user wants to use pod identity\n\t// Internally, the JWTProvider will use Managed Service Identity to authenticate if no Service Principal info supplied\n\tprovider, aadErr := aad.NewJWTProvider(func(config *aad.TokenProviderConfiguration) error {\n\t\tif config.Env == nil {\n\t\t\tconfig.Env = &azure.PublicCloud\n\t\t}\n\t\treturn nil\n\t})\n\n\tif aadErr == nil {\n\t\treturn eventhub.NewHub(info.Namespace, info.EventHubName, provider)\n\t}\n\n\treturn nil, aadErr\n}",
"func NewEventClient(router *rpc.HttpRouter) *EventClient {\n\treturn &EventClient{\n\t\tclient: NewClient(router),\n\t\tsession: uuid.New().String(),\n\t}\n}",
"func NewEventGroupClient(c config) *EventGroupClient {\n\treturn &EventGroupClient{config: c}\n}",
"func NewChannelsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) *ChannelsClient {\n\tcp := arm.ClientOptions{}\n\tif options != nil {\n\t\tcp = *options\n\t}\n\tif len(cp.Host) == 0 {\n\t\tcp.Host = arm.AzurePublicCloud\n\t}\n\treturn &ChannelsClient{subscriptionID: subscriptionID, ep: string(cp.Host), pl: armruntime.NewPipeline(module, version, credential, &cp)}\n}",
"func NewEventClient(store store.EventStore, auth authorization.Authorizer, bus Publisher) *EventClient {\n\treturn &EventClient{\n\t\tstore: store,\n\t\tauth: auth,\n\t\tbus: bus,\n\t}\n}",
"func NewSubscriptionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) *SubscriptionsClient {\n\tcp := arm.ClientOptions{}\n\tif options != nil {\n\t\tcp = *options\n\t}\n\tif len(cp.Host) == 0 {\n\t\tcp.Host = arm.AzurePublicCloud\n\t}\n\treturn &SubscriptionsClient{subscriptionID: subscriptionID, ep: string(cp.Host), pl: armruntime.NewPipeline(module, version, credential, &cp)}\n}",
"func NewClient() Client {\n\treturn &client{\n\t\ttokens: make(map[string]DockerHubToken),\n\t\tdigests: make(map[string]DockerImageDigest),\n\t}\n}",
"func NewEventSubscriptionsClientWithBaseURI(baseURI string, subscriptionID string) EventSubscriptionsClient {\n\treturn EventSubscriptionsClient{NewWithBaseURI(baseURI, subscriptionID)}\n}",
"func NewESClient() (*elastic.Client, error) {\n\treturn elastic.NewClient()\n}",
"func NewClient(register, registerTaskState, registerTask, registerTasks, uploadImage, artSearch, artworkGet, download goa.Endpoint) *Client {\n\treturn &Client{\n\t\tRegisterEndpoint: register,\n\t\tRegisterTaskStateEndpoint: registerTaskState,\n\t\tRegisterTaskEndpoint: registerTask,\n\t\tRegisterTasksEndpoint: registerTasks,\n\t\tUploadImageEndpoint: uploadImage,\n\t\tArtSearchEndpoint: artSearch,\n\t\tArtworkGetEndpoint: artworkGet,\n\t\tDownloadEndpoint: download,\n\t}\n}",
"func NewECSClient(verbose bool) *ECSService {\n\tif os.Getenv(\"AWS_REGION\") == \"\" {\n\t\tregion, err := getRegion()\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"unable to get AWS region from metadata\")\n\t\t} else {\n\t\t\tos.Setenv(\"AWS_REGION\", region)\n\t\t}\n\t}\n\tsession := session.New(\n\t\t&aws.Config{\n\t\t\tHTTPClient: &http.Client{\n\t\t\t\tTimeout: time.Second * 20,\n\t\t\t},\n\t\t},\n\t)\n\treturn &ECSService{\n\t\tverbose: verbose,\n\t\tclient: ecs.New(session),\n\t}\n}",
"func NewVirtualHubsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) *VirtualHubsClient {\n\tcp := arm.ClientOptions{}\n\tif options != nil {\n\t\tcp = *options\n\t}\n\tif len(cp.Endpoint) == 0 {\n\t\tcp.Endpoint = arm.AzurePublicCloud\n\t}\n\tclient := &VirtualHubsClient{\n\t\tsubscriptionID: subscriptionID,\n\t\thost: string(cp.Endpoint),\n\t\tpl: armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, &cp),\n\t}\n\treturn client\n}",
"func NewClient(consumerCreds ConsumerCredentials, accessCreds AccessCredentials, httpClient HTTPClient) *Client {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\toauthClient := oauth.Client{\n\t\tCredentials: oauth.Credentials{\n\t\t\tToken: consumerCreds.Key,\n\t\t\tSecret: consumerCreds.Secret,\n\t\t},\n\t}\n\treturn &Client{\n\t\thttpClient: httpClient,\n\t\toauthClient: &oauthClient,\n\t\taccessCreds: &oauth.Credentials{\n\t\t\tToken: accessCreds.Token,\n\t\t\tSecret: accessCreds.Secret,\n\t\t},\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
createOrUpdateAuthorizationRuleCreateRequest creates the CreateOrUpdateAuthorizationRule request.
|
func (client *EventHubsClient) createOrUpdateAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, parameters AuthorizationRule, options *EventHubsClientCreateOrUpdateAuthorizationRuleOptions) (*policy.Request, error) {
urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}"
if resourceGroupName == "" {
return nil, errors.New("parameter resourceGroupName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName))
if namespaceName == "" {
return nil, errors.New("parameter namespaceName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName))
if eventHubName == "" {
return nil, errors.New("parameter eventHubName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{eventHubName}", url.PathEscape(eventHubName))
if authorizationRuleName == "" {
return nil, errors.New("parameter authorizationRuleName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{authorizationRuleName}", url.PathEscape(authorizationRuleName))
if client.subscriptionID == "" {
return nil, errors.New("parameter client.subscriptionID cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID))
req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("api-version", "2017-04-01")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
return req, runtime.MarshalAsJSON(req, parameters)
}
|
[
"func (client *QueuesClient) createOrUpdateAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, parameters SBAuthorizationRule, options *QueuesCreateOrUpdateAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif queueName == \"\" {\n\t\treturn nil, errors.New(\"parameter queueName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{queueName}\", url.PathEscape(queueName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}",
"func (client *EventHubsClient) getAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, options *EventHubsClientGetAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif eventHubName == \"\" {\n\t\treturn nil, errors.New(\"parameter eventHubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{eventHubName}\", url.PathEscape(eventHubName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *RoleDefinitionsClient) createOrUpdateCreateRequest(ctx context.Context, vaultBaseURL string, scope string, roleDefinitionName string, parameters RoleDefinitionCreateParameters, options *RoleDefinitionsClientCreateOrUpdateOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleDefinitions/{roleDefinitionName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif roleDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter roleDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{roleDefinitionName}\", url.PathEscape(roleDefinitionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}",
"func CreateAddDNSAuthorizationRuleRequest() (request *AddDNSAuthorizationRuleRequest) {\n\trequest = &AddDNSAuthorizationRuleRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"CC5G\", \"2022-03-14\", \"AddDNSAuthorizationRule\", \"fivegcc\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (client *EventHubsClient) deleteAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, options *EventHubsClientDeleteAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif eventHubName == \"\" {\n\t\treturn nil, errors.New(\"parameter eventHubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{eventHubName}\", url.PathEscape(eventHubName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *EventHubsClient) listAuthorizationRulesCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, options *EventHubsClientListAuthorizationRulesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif eventHubName == \"\" {\n\t\treturn nil, errors.New(\"parameter eventHubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{eventHubName}\", url.PathEscape(eventHubName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func CreateAddResolverRuleRequest() (request *AddResolverRuleRequest) {\n\trequest = &AddResolverRuleRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"pvtz\", \"2018-01-01\", \"AddResolverRule\", \"pvtz\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (client *GovernanceRulesClient) executeCreateRequest(ctx context.Context, scope string, ruleID string, options *GovernanceRulesClientBeginExecuteOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Security/governanceRules/{ruleId}/execute\"\n\tif scope == \"\" {\n\t\treturn nil, errors.New(\"parameter scope cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", url.PathEscape(scope))\n\tif ruleID == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleId}\", url.PathEscape(ruleID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif options != nil && options.ExecuteGovernanceRuleParams != nil {\n\t\treturn req, runtime.MarshalAsJSON(req, *options.ExecuteGovernanceRuleParams)\n\t}\n\treturn req, nil\n}",
"func (client *QueuesClient) getAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, options *QueuesGetAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif queueName == \"\" {\n\t\treturn nil, errors.New(\"parameter queueName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{queueName}\", url.PathEscape(queueName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (a RuleApi) CreateRule(body Rule) (*Rule, *APIResponse, error) {\n\n\tvar localVarHttpMethod = strings.ToUpper(\"Post\")\n\t// create path and map variables\n\tlocalVarPath := a.Configuration.BasePath + \"/rules\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := make(map[string]string)\n\tvar localVarPostBody interface{}\n\tvar localVarFileName string\n\tvar localVarFileBytes []byte\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\tlocalVarHeaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tvar successPayload = new(Rule)\n\tlocalVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\n\tvar localVarURL, _ = url.Parse(localVarPath)\n\tlocalVarURL.RawQuery = localVarQueryParams.Encode()\n\tvar localVarAPIResponse = &APIResponse{Operation: \"CreateRule\", Method: localVarHttpMethod, RequestURL: localVarURL.String()}\n\tif localVarHttpResponse != nil {\n\t\tlocalVarAPIResponse.Response = localVarHttpResponse.RawResponse\n\t\tlocalVarAPIResponse.Payload = localVarHttpResponse.Body()\n\t}\n\n\tif err != nil {\n\t\treturn successPayload, localVarAPIResponse, err\n\t}\n\terr = json.Unmarshal(localVarHttpResponse.Body(), &successPayload)\n\treturn successPayload, localVarAPIResponse, err\n}",
"func (client *QueuesClient) deleteAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, options *QueuesDeleteAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif queueName == \"\" {\n\t\treturn nil, errors.New(\"parameter queueName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{queueName}\", url.PathEscape(queueName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (client *DataCollectionRuleAssociationsClient) listByRuleCreateRequest(ctx context.Context, resourceGroupName string, dataCollectionRuleName string, options *DataCollectionRuleAssociationsClientListByRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}/associations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif dataCollectionRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter dataCollectionRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dataCollectionRuleName}\", url.PathEscape(dataCollectionRuleName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func CreatePutCustomMetricRuleRequest() (request *PutCustomMetricRuleRequest) {\n\trequest = &PutCustomMetricRuleRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cms\", \"2019-01-01\", \"PutCustomMetricRule\", \"cms\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (r *Rule) CreateOrUpdate() (err error) {\n\tdb := database.DB.Model(r)\n\terr = db.Clauses(clause.OnConflict{\n\t\tColumns: []clause.Column{{Name: \"id\"}},\n\t\tDoUpdates: clause.AssignmentColumns(\n\t\t\t[]string{\n\t\t\t\t\"name\",\n\t\t\t\t\"flag_format\",\n\t\t\t\t\"rank\",\n\t\t\t\t\"response_status_code\",\n\t\t\t\t\"response_headers\",\n\t\t\t\t\"response_body\",\n\t\t\t\t\"push_to_client\",\n\t\t\t\t\"notice\",\n\t\t\t}),\n\t}).Create(r).Error\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = GetServer().UpdateRules()\n\treturn err\n}",
"func (s *RulesService) Create(ctx context.Context, resourceID int, body *CreateRuleBody) (*Rule, *http.Response, error) {\n\treq, err := s.client.NewRequest(ctx,\n\t\thttp.MethodPost,\n\t\tfmt.Sprintf(rulesURL, resourceID), body)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\trule := &Rule{}\n\n\tresp, err := s.client.Do(req, rule)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn rule, resp, nil\n}",
"func CreateCreateRuleActionRequest() (request *CreateRuleActionRequest) {\n\trequest = &CreateRuleActionRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Iot\", \"2018-01-20\", \"CreateRuleAction\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func CreateModifyWebRuleRequest() (request *ModifyWebRuleRequest) {\n\trequest = &ModifyWebRuleRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"ddoscoo\", \"2020-01-01\", \"ModifyWebRule\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (h *GRPCRuleMgr) CreateOrUpdateRule(servicePrefix, forwardDst string, insecure bool) {\n\th.Store(servicePrefix, &GRPCRule{\n\t\tServicePrefix: servicePrefix,\n\t\tForwardDst: forwardDst,\n\t\tInsecure: insecure})\n}",
"func (client *EventHubsClient) createOrUpdateAuthorizationRuleHandleResponse(resp *http.Response) (EventHubsClientCreateOrUpdateAuthorizationRuleResponse, error) {\n\tresult := EventHubsClientCreateOrUpdateAuthorizationRuleResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRule); err != nil {\n\t\treturn EventHubsClientCreateOrUpdateAuthorizationRuleResponse{}, err\n\t}\n\treturn result, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
createOrUpdateAuthorizationRuleHandleResponse handles the CreateOrUpdateAuthorizationRule response.
|
func (client *EventHubsClient) createOrUpdateAuthorizationRuleHandleResponse(resp *http.Response) (EventHubsClientCreateOrUpdateAuthorizationRuleResponse, error) {
result := EventHubsClientCreateOrUpdateAuthorizationRuleResponse{}
if err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRule); err != nil {
return EventHubsClientCreateOrUpdateAuthorizationRuleResponse{}, err
}
return result, nil
}
|
[
"func (client *QueuesClient) createOrUpdateAuthorizationRuleHandleResponse(resp *http.Response) (QueuesCreateOrUpdateAuthorizationRuleResponse, error) {\n\tresult := QueuesCreateOrUpdateAuthorizationRuleResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SBAuthorizationRule); err != nil {\n\t\treturn QueuesCreateOrUpdateAuthorizationRuleResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *ServerVulnerabilityAssessmentsClient) createOrUpdateHandleResponse(resp *http.Response) (ServerVulnerabilityAssessmentsCreateOrUpdateResponse, error) {\n\tresult := ServerVulnerabilityAssessmentsCreateOrUpdateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ServerVulnerabilityAssessment); err != nil {\n\t\treturn ServerVulnerabilityAssessmentsCreateOrUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *EventHubsClient) getAuthorizationRuleHandleResponse(resp *http.Response) (EventHubsClientGetAuthorizationRuleResponse, error) {\n\tresult := EventHubsClientGetAuthorizationRuleResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRule); err != nil {\n\t\treturn EventHubsClientGetAuthorizationRuleResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *SetDefinitionsClient) createOrUpdateAtManagementGroupHandleResponse(resp *http.Response) (SetDefinitionsClientCreateOrUpdateAtManagementGroupResponse, error) {\n\tresult := SetDefinitionsClientCreateOrUpdateAtManagementGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SetDefinition); err != nil {\n\t\treturn SetDefinitionsClientCreateOrUpdateAtManagementGroupResponse{}, err\n\t}\n\treturn result, nil\n}",
"func CreatePutCustomMetricRuleResponse() (response *PutCustomMetricRuleResponse) {\n\tresponse = &PutCustomMetricRuleResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func (client *EventHubsClient) listAuthorizationRulesHandleResponse(resp *http.Response) (EventHubsClientListAuthorizationRulesResponse, error) {\n\tresult := EventHubsClientListAuthorizationRulesResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRuleListResult); err != nil {\n\t\treturn EventHubsClientListAuthorizationRulesResponse{}, err\n\t}\n\treturn result, nil\n}",
"func CreateAddDNSAuthorizationRuleResponse() (response *AddDNSAuthorizationRuleResponse) {\n\tresponse = &AddDNSAuthorizationRuleResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func (client *EventHubsClient) createOrUpdateAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, parameters AuthorizationRule, options *EventHubsClientCreateOrUpdateAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif eventHubName == \"\" {\n\t\treturn nil, errors.New(\"parameter eventHubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{eventHubName}\", url.PathEscape(eventHubName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}",
"func (client *QueuesClient) getAuthorizationRuleHandleResponse(resp *http.Response) (QueuesGetAuthorizationRuleResponse, error) {\n\tresult := QueuesGetAuthorizationRuleResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SBAuthorizationRule); err != nil {\n\t\treturn QueuesGetAuthorizationRuleResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *TagsClient) createOrUpdateAtScopeHandleResponse(resp *http.Response) (TagsClientCreateOrUpdateAtScopeResponse, error) {\n\tresult := TagsClientCreateOrUpdateAtScopeResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TagsResource); err != nil {\n\t\treturn TagsClientCreateOrUpdateAtScopeResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *ApplicationGroupClient) createOrUpdateApplicationGroupHandleResponse(resp *http.Response) (ApplicationGroupClientCreateOrUpdateApplicationGroupResponse, error) {\n\tresult := ApplicationGroupClientCreateOrUpdateApplicationGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ApplicationGroup); err != nil {\n\t\treturn ApplicationGroupClientCreateOrUpdateApplicationGroupResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *QueuesClient) listAuthorizationRulesHandleResponse(resp *http.Response) (QueuesListAuthorizationRulesResponse, error) {\n\tresult := QueuesListAuthorizationRulesResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SBAuthorizationRuleListResult); err != nil {\n\t\treturn QueuesListAuthorizationRulesResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func CreateCreateRuleActionResponse() (response *CreateRuleActionResponse) {\n\tresponse = &CreateRuleActionResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateAddResolverRuleResponse() (response *AddResolverRuleResponse) {\n\tresponse = &AddResolverRuleResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func CreateModifyWebRuleResponse() (response *ModifyWebRuleResponse) {\n\tresponse = &ModifyWebRuleResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func (client *ConfigurationAssignmentsClient) createOrUpdateParentHandleResponse(resp *http.Response) (ConfigurationAssignmentsClientCreateOrUpdateParentResponse, error) {\n\tresult := ConfigurationAssignmentsClientCreateOrUpdateParentResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ConfigurationAssignment); err != nil {\n\t\treturn ConfigurationAssignmentsClientCreateOrUpdateParentResponse{}, err\n\t}\n\treturn result, nil\n}",
"func CreateListDispatchRuleResponse() (response *ListDispatchRuleResponse) {\n\tresponse = &ListDispatchRuleResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func (client *QueuesClient) createOrUpdateAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, parameters SBAuthorizationRule, options *QueuesCreateOrUpdateAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif queueName == \"\" {\n\t\treturn nil, errors.New(\"parameter queueName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{queueName}\", url.PathEscape(queueName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}",
"func (a RuleApi) CreateRule(body Rule) (*Rule, *APIResponse, error) {\n\n\tvar localVarHttpMethod = strings.ToUpper(\"Post\")\n\t// create path and map variables\n\tlocalVarPath := a.Configuration.BasePath + \"/rules\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := make(map[string]string)\n\tvar localVarPostBody interface{}\n\tvar localVarFileName string\n\tvar localVarFileBytes []byte\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\tlocalVarHeaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tvar successPayload = new(Rule)\n\tlocalVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\n\tvar localVarURL, _ = url.Parse(localVarPath)\n\tlocalVarURL.RawQuery = localVarQueryParams.Encode()\n\tvar localVarAPIResponse = &APIResponse{Operation: \"CreateRule\", Method: localVarHttpMethod, RequestURL: localVarURL.String()}\n\tif localVarHttpResponse != nil {\n\t\tlocalVarAPIResponse.Response = localVarHttpResponse.RawResponse\n\t\tlocalVarAPIResponse.Payload = localVarHttpResponse.Body()\n\t}\n\n\tif err != nil {\n\t\treturn successPayload, localVarAPIResponse, err\n\t}\n\terr = json.Unmarshal(localVarHttpResponse.Body(), &successPayload)\n\treturn successPayload, localVarAPIResponse, err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
deleteAuthorizationRuleCreateRequest creates the DeleteAuthorizationRule request.
|
func (client *EventHubsClient) deleteAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, options *EventHubsClientDeleteAuthorizationRuleOptions) (*policy.Request, error) {
urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}"
if resourceGroupName == "" {
return nil, errors.New("parameter resourceGroupName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName))
if namespaceName == "" {
return nil, errors.New("parameter namespaceName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName))
if eventHubName == "" {
return nil, errors.New("parameter eventHubName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{eventHubName}", url.PathEscape(eventHubName))
if authorizationRuleName == "" {
return nil, errors.New("parameter authorizationRuleName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{authorizationRuleName}", url.PathEscape(authorizationRuleName))
if client.subscriptionID == "" {
return nil, errors.New("parameter client.subscriptionID cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID))
req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("api-version", "2017-04-01")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
return req, nil
}
|
[
"func (client *QueuesClient) deleteAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, options *QueuesDeleteAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif queueName == \"\" {\n\t\treturn nil, errors.New(\"parameter queueName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{queueName}\", url.PathEscape(queueName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (c *VPCClient) NewDeleteSnatRuleRequest() *DeleteSnatRuleRequest {\n\treq := &DeleteSnatRuleRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}",
"func CreateDeleteTmMonitorRuleRequest() (request *DeleteTmMonitorRuleRequest) {\n\trequest = &DeleteTmMonitorRuleRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Trademark\", \"2018-07-24\", \"DeleteTmMonitorRule\", \"trademark\", \"openAPI\")\n\treturn\n}",
"func NewExportRuleDestroyRequest() *ExportRuleDestroyRequest {\n\treturn &ExportRuleDestroyRequest{}\n}",
"func (client *EventHubsClient) getAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, options *EventHubsClientGetAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif eventHubName == \"\" {\n\t\treturn nil, errors.New(\"parameter eventHubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{eventHubName}\", url.PathEscape(eventHubName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *EventHubsClient) createOrUpdateAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, parameters AuthorizationRule, options *EventHubsClientCreateOrUpdateAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif eventHubName == \"\" {\n\t\treturn nil, errors.New(\"parameter eventHubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{eventHubName}\", url.PathEscape(eventHubName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}",
"func (c *VPCClient) NewDeleteSnatDnatRuleRequest() *DeleteSnatDnatRuleRequest {\n\treq := &DeleteSnatDnatRuleRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}",
"func (c *VPCClient) NewDeleteNetworkAclRequest() *DeleteNetworkAclRequest {\n\treq := &DeleteNetworkAclRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}",
"func (client *QueuesClient) createOrUpdateAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, parameters SBAuthorizationRule, options *QueuesCreateOrUpdateAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif queueName == \"\" {\n\t\treturn nil, errors.New(\"parameter queueName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{queueName}\", url.PathEscape(queueName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}",
"func (client *EventHubsClient) listAuthorizationRulesCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, options *EventHubsClientListAuthorizationRulesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif eventHubName == \"\" {\n\t\treturn nil, errors.New(\"parameter eventHubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{eventHubName}\", url.PathEscape(eventHubName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *DataCollectionRuleAssociationsClient) listByRuleCreateRequest(ctx context.Context, resourceGroupName string, dataCollectionRuleName string, options *DataCollectionRuleAssociationsClientListByRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}/associations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif dataCollectionRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter dataCollectionRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dataCollectionRuleName}\", url.PathEscape(dataCollectionRuleName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func CreateCheckAccountDeleteRequest() (request *CheckAccountDeleteRequest) {\n\trequest = &CheckAccountDeleteRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"ResourceManager\", \"2020-03-31\", \"CheckAccountDelete\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (client *QueuesClient) getAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, options *QueuesGetAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif queueName == \"\" {\n\t\treturn nil, errors.New(\"parameter queueName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{queueName}\", url.PathEscape(queueName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (client *DatasetClient) deleteDatasetCreateRequest(ctx context.Context, datasetName string, options *DatasetClientBeginDeleteDatasetOptions) (*policy.Request, error) {\n\turlPath := \"/datasets/{datasetName}\"\n\tif datasetName == \"\" {\n\t\treturn nil, errors.New(\"parameter datasetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{datasetName}\", url.PathEscape(datasetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.endpoint, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (s *AllocationRuleEndpoint) Delete(ctx context.Context, division int, id *types.GUID) error {\n\tb, _ := s.client.ResolvePathWithDivision(\"/api/v1/beta/{division}/cashflow/AllocationRule\", division) // #nosec\n\tu, err := api.AddOdataKeyToURL(b, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, r, requestError := s.client.NewRequestAndDo(ctx, \"DELETE\", u.String(), nil, nil)\n\tif requestError != nil {\n\t\treturn requestError\n\t}\n\n\tif r.StatusCode != http.StatusNoContent {\n\t\tbody, _ := ioutil.ReadAll(r.Body) // #nosec\n\t\treturn fmt.Errorf(\"Failed with status %v and body %v\", r.StatusCode, body)\n\t}\n\n\treturn nil\n}",
"func CreateDeleteWorkspaceRequest() (request *DeleteWorkspaceRequest) {\n\trequest = &DeleteWorkspaceRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"AIWorkSpace\", \"2021-02-04\", \"DeleteWorkspace\", \"/api/v1/workspaces/[WorkspaceId]\", \"\", \"\")\n\trequest.Method = requests.DELETE\n\treturn\n}",
"func (c *VPCClient) NewDeleteNetworkAclAssociationRequest() *DeleteNetworkAclAssociationRequest {\n\treq := &DeleteNetworkAclAssociationRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}",
"func (c *ULBClient) NewDeleteSecurityPolicyRequest() *DeleteSecurityPolicyRequest {\n\treq := &DeleteSecurityPolicyRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}",
"func CreateDeleteHealthCheckRequest() (request *DeleteHealthCheckRequest) {\n\trequest = &DeleteHealthCheckRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Smartag\", \"2018-03-13\", \"DeleteHealthCheck\", \"smartag\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
getAuthorizationRuleCreateRequest creates the GetAuthorizationRule request.
|
func (client *EventHubsClient) getAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, options *EventHubsClientGetAuthorizationRuleOptions) (*policy.Request, error) {
urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}"
if resourceGroupName == "" {
return nil, errors.New("parameter resourceGroupName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName))
if namespaceName == "" {
return nil, errors.New("parameter namespaceName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName))
if eventHubName == "" {
return nil, errors.New("parameter eventHubName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{eventHubName}", url.PathEscape(eventHubName))
if authorizationRuleName == "" {
return nil, errors.New("parameter authorizationRuleName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{authorizationRuleName}", url.PathEscape(authorizationRuleName))
if client.subscriptionID == "" {
return nil, errors.New("parameter client.subscriptionID cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("api-version", "2017-04-01")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
return req, nil
}
|
[
"func (client *QueuesClient) getAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, options *QueuesGetAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif queueName == \"\" {\n\t\treturn nil, errors.New(\"parameter queueName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{queueName}\", url.PathEscape(queueName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (client *DataCollectionRuleAssociationsClient) listByRuleCreateRequest(ctx context.Context, resourceGroupName string, dataCollectionRuleName string, options *DataCollectionRuleAssociationsClientListByRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}/associations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif dataCollectionRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter dataCollectionRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dataCollectionRuleName}\", url.PathEscape(dataCollectionRuleName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *EventHubsClient) listAuthorizationRulesCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, options *EventHubsClientListAuthorizationRulesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif eventHubName == \"\" {\n\t\treturn nil, errors.New(\"parameter eventHubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{eventHubName}\", url.PathEscape(eventHubName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *QueuesClient) listAuthorizationRulesCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, options *QueuesListAuthorizationRulesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif queueName == \"\" {\n\t\treturn nil, errors.New(\"parameter queueName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{queueName}\", url.PathEscape(queueName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func CreateGetPermissionRequest() (request *GetPermissionRequest) {\n\trequest = &GetPermissionRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"AIWorkSpace\", \"2021-02-04\", \"GetPermission\", \"/api/v1/workspaces/[WorkspaceId]/permissions/[PermissionCode]\", \"\", \"\")\n\trequest.Method = requests.GET\n\treturn\n}",
"func CreateGetAclRequest() (request *GetAclRequest) {\n\trequest = &GetAclRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Ga\", \"2019-11-20\", \"GetAcl\", \"gaplus\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (client *AlertRuleIncidentsClient) listByAlertRuleCreateRequest(ctx context.Context, resourceGroupName string, ruleName string, options *AlertRuleIncidentsListByAlertRuleOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/alertrules/{ruleName}/incidents\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif ruleName == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleName}\", url.PathEscape(ruleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2016-03-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (client *QueuesClient) createOrUpdateAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, parameters SBAuthorizationRule, options *QueuesCreateOrUpdateAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif queueName == \"\" {\n\t\treturn nil, errors.New(\"parameter queueName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{queueName}\", url.PathEscape(queueName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}",
"func (client *EventHubsClient) deleteAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, options *EventHubsClientDeleteAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif eventHubName == \"\" {\n\t\treturn nil, errors.New(\"parameter eventHubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{eventHubName}\", url.PathEscape(eventHubName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *EventHubsClient) createOrUpdateAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, parameters AuthorizationRule, options *EventHubsClientCreateOrUpdateAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif eventHubName == \"\" {\n\t\treturn nil, errors.New(\"parameter eventHubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{eventHubName}\", url.PathEscape(eventHubName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}",
"func (client *DataCollectionRuleAssociationsClient) listByResourceCreateRequest(ctx context.Context, resourceURI string, options *DataCollectionRuleAssociationsClientListByResourceOptions) (*policy.Request, error) {\n\turlPath := \"/{resourceUri}/providers/Microsoft.Insights/dataCollectionRuleAssociations\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceUri}\", resourceURI)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *QueuesClient) deleteAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, options *QueuesDeleteAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif queueName == \"\" {\n\t\treturn nil, errors.New(\"parameter queueName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{queueName}\", url.PathEscape(queueName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (client *SetDefinitionsClient) getBuiltInCreateRequest(ctx context.Context, policySetDefinitionName string, options *SetDefinitionsClientGetBuiltInOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Authorization/policySetDefinitions/{policySetDefinitionName}\"\n\tif policySetDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policySetDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policySetDefinitionName}\", url.PathEscape(policySetDefinitionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *GovernanceRulesClient) executeCreateRequest(ctx context.Context, scope string, ruleID string, options *GovernanceRulesClientBeginExecuteOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Security/governanceRules/{ruleId}/execute\"\n\tif scope == \"\" {\n\t\treturn nil, errors.New(\"parameter scope cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", url.PathEscape(scope))\n\tif ruleID == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleId}\", url.PathEscape(ruleID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif options != nil && options.ExecuteGovernanceRuleParams != nil {\n\t\treturn req, runtime.MarshalAsJSON(req, *options.ExecuteGovernanceRuleParams)\n\t}\n\treturn req, nil\n}",
"func CreateCreateRuleActionRequest() (request *CreateRuleActionRequest) {\n\trequest = &CreateRuleActionRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Iot\", \"2018-01-20\", \"CreateRuleAction\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (client *RouteFilterRulesClient) listByRouteFilterCreateRequest(ctx context.Context, resourceGroupName string, routeFilterName string, options *RouteFilterRulesListByRouteFilterOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif routeFilterName == \"\" {\n\t\treturn nil, errors.New(\"parameter routeFilterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{routeFilterName}\", url.PathEscape(routeFilterName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (client *DeploymentOperationsClient) getAtManagementGroupScopeCreateRequest(ctx context.Context, groupId string, deploymentName string, operationId string, options *DeploymentOperationsGetAtManagementGroupScopeOptions) (*azcore.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{groupId}\", url.PathEscape(groupId))\n\turlPath = strings.ReplaceAll(urlPath, \"{deploymentName}\", url.PathEscape(deploymentName))\n\turlPath = strings.ReplaceAll(urlPath, \"{operationId}\", url.PathEscape(operationId))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (client *ResourceLinksClient) getCreateRequest(ctx context.Context, linkID string, options *ResourceLinksClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/{linkId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{linkId}\", linkID)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2016-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func CreateAddDNSAuthorizationRuleRequest() (request *AddDNSAuthorizationRuleRequest) {\n\trequest = &AddDNSAuthorizationRuleRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"CC5G\", \"2022-03-14\", \"AddDNSAuthorizationRule\", \"fivegcc\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
getAuthorizationRuleHandleResponse handles the GetAuthorizationRule response.
|
func (client *EventHubsClient) getAuthorizationRuleHandleResponse(resp *http.Response) (EventHubsClientGetAuthorizationRuleResponse, error) {
result := EventHubsClientGetAuthorizationRuleResponse{}
if err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRule); err != nil {
return EventHubsClientGetAuthorizationRuleResponse{}, err
}
return result, nil
}
|
[
"func (client *QueuesClient) getAuthorizationRuleHandleResponse(resp *http.Response) (QueuesGetAuthorizationRuleResponse, error) {\n\tresult := QueuesGetAuthorizationRuleResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SBAuthorizationRule); err != nil {\n\t\treturn QueuesGetAuthorizationRuleResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *EventHubsClient) listAuthorizationRulesHandleResponse(resp *http.Response) (EventHubsClientListAuthorizationRulesResponse, error) {\n\tresult := EventHubsClientListAuthorizationRulesResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRuleListResult); err != nil {\n\t\treturn EventHubsClientListAuthorizationRulesResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *QueuesClient) listAuthorizationRulesHandleResponse(resp *http.Response) (QueuesListAuthorizationRulesResponse, error) {\n\tresult := QueuesListAuthorizationRulesResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SBAuthorizationRuleListResult); err != nil {\n\t\treturn QueuesListAuthorizationRulesResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *EventHubsClient) createOrUpdateAuthorizationRuleHandleResponse(resp *http.Response) (EventHubsClientCreateOrUpdateAuthorizationRuleResponse, error) {\n\tresult := EventHubsClientCreateOrUpdateAuthorizationRuleResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRule); err != nil {\n\t\treturn EventHubsClientCreateOrUpdateAuthorizationRuleResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *QueuesClient) createOrUpdateAuthorizationRuleHandleResponse(resp *http.Response) (QueuesCreateOrUpdateAuthorizationRuleResponse, error) {\n\tresult := QueuesCreateOrUpdateAuthorizationRuleResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SBAuthorizationRule); err != nil {\n\t\treturn QueuesCreateOrUpdateAuthorizationRuleResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client ACGClient) GetRuleListResponder(resp *http.Response) (result AccessControlGroupRuleListResponse, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}",
"func (client *ManagementClient) listActiveSecurityAdminRulesHandleResponse(resp *http.Response) (ManagementClientListActiveSecurityAdminRulesResponse, error) {\n\tresult := ManagementClientListActiveSecurityAdminRulesResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ActiveSecurityAdminRulesListResult); err != nil {\n\t\treturn ManagementClientListActiveSecurityAdminRulesResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *RouteFilterRulesClient) listByRouteFilterHandleResponse(resp *http.Response) (RouteFilterRulesListByRouteFilterResponse, error) {\n\tresult := RouteFilterRulesListByRouteFilterResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RouteFilterRuleListResult); err != nil {\n\t\treturn RouteFilterRulesListByRouteFilterResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *AlertRuleIncidentsClient) listByAlertRuleHandleResponse(resp *azcore.Response) (IncidentListResultResponse, error) {\n\tvar val *IncidentListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn IncidentListResultResponse{}, err\n\t}\n\treturn IncidentListResultResponse{RawResponse: resp.Response, IncidentListResult: val}, nil\n}",
"func (client *ManagementClient) listNetworkManagerEffectiveSecurityAdminRulesHandleResponse(resp *http.Response) (ManagementClientListNetworkManagerEffectiveSecurityAdminRulesResponse, error) {\n\tresult := ManagementClientListNetworkManagerEffectiveSecurityAdminRulesResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagerEffectiveSecurityAdminRulesListResult); err != nil {\n\t\treturn ManagementClientListNetworkManagerEffectiveSecurityAdminRulesResponse{}, err\n\t}\n\treturn result, nil\n}",
"func GetAuthorization(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\tdb := acme.MustDatabaseFromContext(ctx)\n\tlinker := acme.MustLinkerFromContext(ctx)\n\n\tacc, err := accountFromContext(ctx)\n\tif err != nil {\n\t\trender.Error(w, err)\n\t\treturn\n\t}\n\taz, err := db.GetAuthorization(ctx, chi.URLParam(r, \"authzID\"))\n\tif err != nil {\n\t\trender.Error(w, acme.WrapErrorISE(err, \"error retrieving authorization\"))\n\t\treturn\n\t}\n\tif acc.ID != az.AccountID {\n\t\trender.Error(w, acme.NewError(acme.ErrorUnauthorizedType,\n\t\t\t\"account '%s' does not own authorization '%s'\", acc.ID, az.ID))\n\t\treturn\n\t}\n\tif err = az.UpdateStatus(ctx, db); err != nil {\n\t\trender.Error(w, acme.WrapErrorISE(err, \"error updating authorization status\"))\n\t\treturn\n\t}\n\n\tlinker.LinkAuthorization(ctx, az)\n\n\tw.Header().Set(\"Location\", linker.GetLink(ctx, acme.AuthzLinkType, az.ID))\n\trender.JSON(w, az)\n}",
"func (p *NotificationHubsListAuthorizationRulesPager) PageResponse() NotificationHubsListAuthorizationRulesResponse {\n\treturn p.current\n}",
"func (client *AssessmentsClient) listByGroupHandleResponse(resp *http.Response) (AssessmentsClientListByGroupResponse, error) {\n\tresult := AssessmentsClientListByGroupResponse{}\n\tif val := resp.Header.Get(\"x-ms-request-id\"); val != \"\" {\n\t\tresult.XMSRequestID = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AssessmentResultList); err != nil {\n\t\treturn AssessmentsClientListByGroupResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *DeploymentOperationsClient) getAtManagementGroupScopeHandleResponse(resp *azcore.Response) (DeploymentOperationResponse, error) {\n\tvar val *DeploymentOperation\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn DeploymentOperationResponse{}, err\n\t}\n\treturn DeploymentOperationResponse{RawResponse: resp.Response, DeploymentOperation: val}, nil\n}",
"func CreateListDispatchRuleResponse() (response *ListDispatchRuleResponse) {\n\tresponse = &ListDispatchRuleResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func (client *AccountsClient) getPropertiesHandleResponse(resp *http.Response) (AccountsClientGetPropertiesResponse, error) {\n\tresult := AccountsClientGetPropertiesResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Account); err != nil {\n\t\treturn AccountsClientGetPropertiesResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *DeploymentOperationsClient) getAtManagementGroupScopeHandleResponse(resp *http.Response) (DeploymentOperationsGetAtManagementGroupScopeResponse, error) {\n\tresult := DeploymentOperationsGetAtManagementGroupScopeResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DeploymentOperation); err != nil {\n\t\treturn DeploymentOperationsGetAtManagementGroupScopeResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *AuthorizationLoginLinksClient) postHandleResponse(resp *http.Response) (AuthorizationLoginLinksClientPostResponse, error) {\n\tresult := AuthorizationLoginLinksClientPostResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationLoginResponseContract); err != nil {\n\t\treturn AuthorizationLoginLinksClientPostResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *AuthorizationPoliciesClient) listByHubHandleResponse(resp *http.Response) (AuthorizationPoliciesClientListByHubResponse, error) {\n\tresult := AuthorizationPoliciesClientListByHubResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationPolicyListResult); err != nil {\n\t\treturn AuthorizationPoliciesClientListByHubResponse{}, err\n\t}\n\treturn result, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewListAuthorizationRulesPager Gets the authorization rules for an Event Hub. Generated from API version 20170401 resourceGroupName Name of the resource group within the azure subscription. namespaceName The Namespace name eventHubName The Event Hub name options EventHubsClientListAuthorizationRulesOptions contains the optional parameters for the EventHubsClient.NewListAuthorizationRulesPager method.
|
func (client *EventHubsClient) NewListAuthorizationRulesPager(resourceGroupName string, namespaceName string, eventHubName string, options *EventHubsClientListAuthorizationRulesOptions) *runtime.Pager[EventHubsClientListAuthorizationRulesResponse] {
return runtime.NewPager(runtime.PagingHandler[EventHubsClientListAuthorizationRulesResponse]{
More: func(page EventHubsClientListAuthorizationRulesResponse) bool {
return page.NextLink != nil && len(*page.NextLink) > 0
},
Fetcher: func(ctx context.Context, page *EventHubsClientListAuthorizationRulesResponse) (EventHubsClientListAuthorizationRulesResponse, error) {
var req *policy.Request
var err error
if page == nil {
req, err = client.listAuthorizationRulesCreateRequest(ctx, resourceGroupName, namespaceName, eventHubName, options)
} else {
req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)
}
if err != nil {
return EventHubsClientListAuthorizationRulesResponse{}, err
}
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return EventHubsClientListAuthorizationRulesResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return EventHubsClientListAuthorizationRulesResponse{}, runtime.NewResponseError(resp)
}
return client.listAuthorizationRulesHandleResponse(resp)
},
})
}
|
[
"func (client *EventHubsClient) listAuthorizationRulesCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, options *EventHubsClientListAuthorizationRulesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif eventHubName == \"\" {\n\t\treturn nil, errors.New(\"parameter eventHubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{eventHubName}\", url.PathEscape(eventHubName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *AuthorizationPoliciesClient) NewListByHubPager(resourceGroupName string, hubName string, options *AuthorizationPoliciesClientListByHubOptions) *runtime.Pager[AuthorizationPoliciesClientListByHubResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[AuthorizationPoliciesClientListByHubResponse]{\n\t\tMore: func(page AuthorizationPoliciesClientListByHubResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *AuthorizationPoliciesClientListByHubResponse) (AuthorizationPoliciesClientListByHubResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listByHubCreateRequest(ctx, resourceGroupName, hubName, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn AuthorizationPoliciesClientListByHubResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.internal.Pipeline().Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn AuthorizationPoliciesClientListByHubResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn AuthorizationPoliciesClientListByHubResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listByHubHandleResponse(resp)\n\t\t},\n\t})\n}",
"func NewEventHubAuthorizationRule(ctx *pulumi.Context,\n\tname string, args *EventHubAuthorizationRuleArgs, opts ...pulumi.ResourceOption) (*EventHubAuthorizationRule, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.EventHubName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'EventHubName'\")\n\t}\n\tif args.NamespaceName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'NamespaceName'\")\n\t}\n\tif args.ResourceGroupName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ResourceGroupName'\")\n\t}\n\tif args.Rights == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Rights'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:eventhub/v20210601preview:EventHubAuthorizationRule\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:eventhub:EventHubAuthorizationRule\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:eventhub:EventHubAuthorizationRule\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:eventhub/v20140901:EventHubAuthorizationRule\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:eventhub/v20140901:EventHubAuthorizationRule\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:eventhub/v20150801:EventHubAuthorizationRule\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:eventhub/v20150801:EventHubAuthorizationRule\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:eventhub/v20170401:EventHubAuthorizationRule\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:eventhub/v20170401:EventHubAuthorizationRule\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:eventhub/v20180101preview:EventHubAuthorizationRule\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:eventhub/v20180101preview:EventHubAuthorizationRule\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:eventhub/v20210101preview:EventHubAuthorizationRule\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:eventhub/v20210101preview:EventHubAuthorizationRule\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource EventHubAuthorizationRule\n\terr := ctx.RegisterResource(\"azure-native:eventhub/v20210601preview:EventHubAuthorizationRule\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (client *AdminRulesClient) NewListPager(resourceGroupName string, networkManagerName string, configurationName string, ruleCollectionName string, options *AdminRulesClientListOptions) *runtime.Pager[AdminRulesClientListResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[AdminRulesClientListResponse]{\n\t\tMore: func(page AdminRulesClientListResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *AdminRulesClientListResponse) (AdminRulesClientListResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listCreateRequest(ctx, resourceGroupName, networkManagerName, configurationName, ruleCollectionName, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn AdminRulesClientListResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.internal.Pipeline().Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn AdminRulesClientListResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn AdminRulesClientListResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listHandleResponse(resp)\n\t\t},\n\t})\n}",
"func NewAutomationRulesListPage(cur AutomationRulesList, getNextPage func(context.Context, AutomationRulesList) (AutomationRulesList, error)) AutomationRulesListPage {\n\treturn AutomationRulesListPage{\n\t\tfn: getNextPage,\n\t\tarl: cur,\n\t}\n}",
"func (client *AlertRulesClient) NewListByResourceGroupPager(resourceGroupName string, options *AlertRulesClientListByResourceGroupOptions) *runtime.Pager[AlertRulesClientListByResourceGroupResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[AlertRulesClientListByResourceGroupResponse]{\n\t\tMore: func(page AlertRulesClientListByResourceGroupResponse) bool {\n\t\t\treturn false\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *AlertRulesClientListByResourceGroupResponse) (AlertRulesClientListByResourceGroupResponse, error) {\n\t\t\tctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, \"AlertRulesClient.NewListByResourceGroupPager\")\n\t\t\treq, err := client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options)\n\t\t\tif err != nil {\n\t\t\t\treturn AlertRulesClientListByResourceGroupResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.internal.Pipeline().Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn AlertRulesClientListByResourceGroupResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn AlertRulesClientListByResourceGroupResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listByResourceGroupHandleResponse(resp)\n\t\t},\n\t\tTracer: client.internal.Tracer(),\n\t})\n}",
"func (client *FirewallRulesClient) NewListPager(resourceGroupName string, cacheName string, options *FirewallRulesClientListOptions) *runtime.Pager[FirewallRulesClientListResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[FirewallRulesClientListResponse]{\n\t\tMore: func(page FirewallRulesClientListResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *FirewallRulesClientListResponse) (FirewallRulesClientListResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listCreateRequest(ctx, resourceGroupName, cacheName, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn FirewallRulesClientListResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.internal.Pipeline().Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn FirewallRulesClientListResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn FirewallRulesClientListResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listHandleResponse(resp)\n\t\t},\n\t})\n}",
"func (client *QueuesClient) listAuthorizationRulesCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, options *QueuesListAuthorizationRulesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif queueName == \"\" {\n\t\treturn nil, errors.New(\"parameter queueName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{queueName}\", url.PathEscape(queueName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (client *EventHubsClient) NewListByNamespacePager(resourceGroupName string, namespaceName string, options *EventHubsClientListByNamespaceOptions) *runtime.Pager[EventHubsClientListByNamespaceResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[EventHubsClientListByNamespaceResponse]{\n\t\tMore: func(page EventHubsClientListByNamespaceResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *EventHubsClientListByNamespaceResponse) (EventHubsClientListByNamespaceResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listByNamespaceCreateRequest(ctx, resourceGroupName, namespaceName, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn EventHubsClientListByNamespaceResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.internal.Pipeline().Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn EventHubsClientListByNamespaceResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn EventHubsClientListByNamespaceResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listByNamespaceHandleResponse(resp)\n\t\t},\n\t})\n}",
"func (client *PredictionsClient) NewListByHubPager(resourceGroupName string, hubName string, options *PredictionsClientListByHubOptions) *runtime.Pager[PredictionsClientListByHubResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[PredictionsClientListByHubResponse]{\n\t\tMore: func(page PredictionsClientListByHubResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *PredictionsClientListByHubResponse) (PredictionsClientListByHubResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listByHubCreateRequest(ctx, resourceGroupName, hubName, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn PredictionsClientListByHubResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.internal.Pipeline().Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn PredictionsClientListByHubResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn PredictionsClientListByHubResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listByHubHandleResponse(resp)\n\t\t},\n\t})\n}",
"func (client *EventHubsClient) listAuthorizationRulesHandleResponse(resp *http.Response) (EventHubsClientListAuthorizationRulesResponse, error) {\n\tresult := EventHubsClientListAuthorizationRulesResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRuleListResult); err != nil {\n\t\treturn EventHubsClientListAuthorizationRulesResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (a *Adapter) CreateRules(rules []domain.Rule) (result domain.AdapterResult) {\n\tinput := ec2.AuthorizeSecurityGroupIngressInput{\n\t\tIpPermissions: a.createIPPermissions(rules),\n\t\tGroupId: aws.String(a.securityGroupID),\n\t}\n\n\t_, err := a.client.AuthorizeSecurityGroupIngress(context.Background(), &input)\n\tresult.Error = err\n\n\treturn\n}",
"func (client *EventHubsClient) getAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, options *EventHubsClientGetAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif eventHubName == \"\" {\n\t\treturn nil, errors.New(\"parameter eventHubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{eventHubName}\", url.PathEscape(eventHubName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *AlertRulesClient) NewListBySubscriptionPager(options *AlertRulesClientListBySubscriptionOptions) *runtime.Pager[AlertRulesClientListBySubscriptionResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[AlertRulesClientListBySubscriptionResponse]{\n\t\tMore: func(page AlertRulesClientListBySubscriptionResponse) bool {\n\t\t\treturn false\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *AlertRulesClientListBySubscriptionResponse) (AlertRulesClientListBySubscriptionResponse, error) {\n\t\t\tctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, \"AlertRulesClient.NewListBySubscriptionPager\")\n\t\t\treq, err := client.listBySubscriptionCreateRequest(ctx, options)\n\t\t\tif err != nil {\n\t\t\t\treturn AlertRulesClientListBySubscriptionResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.internal.Pipeline().Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn AlertRulesClientListBySubscriptionResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn AlertRulesClientListBySubscriptionResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listBySubscriptionHandleResponse(resp)\n\t\t},\n\t\tTracer: client.internal.Tracer(),\n\t})\n}",
"func (p *NotificationHubsListAuthorizationRulesPager) PageResponse() NotificationHubsListAuthorizationRulesResponse {\n\treturn p.current\n}",
"func ListRules(c *gin.Context) {\n\tvar (\n\t\thttpRule Rule\n\t\tres []Rule\n\t\tcount int64\n\t\torder = c.Query(\"order\")\n\t\tpageSize = 10\n\t)\n\n\tif c.Query(\"pageSize\") != \"\" {\n\t\tif n, err := strconv.Atoi(c.Query(\"pageSize\")); err == nil {\n\t\t\tif n > 0 && n < 100 {\n\t\t\t\tpageSize = n\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := c.ShouldBind(&httpRule); err != nil {\n\t\tc.JSON(400, gin.H{\n\t\t\t\"status\": \"failed\",\n\t\t\t\"error\": err.Error(),\n\t\t\t\"result\": nil,\n\t\t})\n\t\treturn\n\t}\n\n\tdb := database.DB.Model(&httpRule)\n\tif httpRule.Name != \"\" {\n\t\tdb.Where(\"name = ?\", httpRule.Name)\n\t}\n\tdb.Count(&count)\n\n\tpage, err := strconv.Atoi(c.Query(\"page\"))\n\tif err != nil {\n\t\tc.JSON(400, gin.H{\n\t\t\t\"status\": \"failed\",\n\t\t\t\"error\": err.Error(),\n\t\t\t\"result\": nil,\n\t\t})\n\t\treturn\n\t}\n\n\tif order != \"asc\" {\n\t\torder = \"desc\"\n\t}\n\n\tif err := db.Order(\"rank desc\").Order(\"id\" + \" \" + order).Count(&count).Offset((page - 1) * pageSize).Limit(pageSize).Find(&res).Error; err != nil {\n\t\tc.JSON(400, gin.H{\n\t\t\t\"status\": \"failed\",\n\t\t\t\"error\": err.Error(),\n\t\t\t\"data\": nil,\n\t\t})\n\t\treturn\n\t}\n\n\tc.JSON(200, gin.H{\n\t\t\"status\": \"succeed\",\n\t\t\"error\": nil,\n\t\t\"result\": gin.H{\"count\": count, \"data\": res},\n\t})\n}",
"func ListRules(sc *gophercloud.ServiceClient, opts RulesListOptsBuilder, policyId string) pagination.Pager {\n\n\turl := rulesrootURL(sc, policyId)\n\n\tif opts != nil {\n\t\tqueryString, err := opts.ToPolicyRulesListMap()\n\t\tif err != nil {\n\n\t\t\treturn pagination.Pager{Err: err}\n\t\t}\n\n\t\turl += queryString\n\n\t}\n\n\treturn pagination.NewPager(sc, url, func(r pagination.PageResult) pagination.Page {\n\t\treturn PolicyRulesPage{pagination.LinkedPageBase{PageResult: r}}\n\t})\n\n}",
"func (client *EncryptionScopesClient) NewListPager(resourceGroupName string, accountName string, options *EncryptionScopesClientListOptions) *runtime.Pager[EncryptionScopesClientListResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[EncryptionScopesClientListResponse]{\n\t\tMore: func(page EncryptionScopesClientListResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *EncryptionScopesClientListResponse) (EncryptionScopesClientListResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listCreateRequest(ctx, resourceGroupName, accountName, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn EncryptionScopesClientListResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.pl.Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn EncryptionScopesClientListResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn EncryptionScopesClientListResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listHandleResponse(resp)\n\t\t},\n\t})\n}",
"func (client *ApplicationGroupClient) NewListByNamespacePager(resourceGroupName string, namespaceName string, options *ApplicationGroupClientListByNamespaceOptions) *runtime.Pager[ApplicationGroupClientListByNamespaceResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[ApplicationGroupClientListByNamespaceResponse]{\n\t\tMore: func(page ApplicationGroupClientListByNamespaceResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *ApplicationGroupClientListByNamespaceResponse) (ApplicationGroupClientListByNamespaceResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listByNamespaceCreateRequest(ctx, resourceGroupName, namespaceName, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn ApplicationGroupClientListByNamespaceResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.internal.Pipeline().Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn ApplicationGroupClientListByNamespaceResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn ApplicationGroupClientListByNamespaceResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listByNamespaceHandleResponse(resp)\n\t\t},\n\t})\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
listAuthorizationRulesCreateRequest creates the ListAuthorizationRules request.
|
func (client *EventHubsClient) listAuthorizationRulesCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, options *EventHubsClientListAuthorizationRulesOptions) (*policy.Request, error) {
urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules"
if resourceGroupName == "" {
return nil, errors.New("parameter resourceGroupName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName))
if namespaceName == "" {
return nil, errors.New("parameter namespaceName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName))
if eventHubName == "" {
return nil, errors.New("parameter eventHubName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{eventHubName}", url.PathEscape(eventHubName))
if client.subscriptionID == "" {
return nil, errors.New("parameter client.subscriptionID cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("api-version", "2017-04-01")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
return req, nil
}
|
[
"func (client *QueuesClient) listAuthorizationRulesCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, options *QueuesListAuthorizationRulesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif queueName == \"\" {\n\t\treturn nil, errors.New(\"parameter queueName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{queueName}\", url.PathEscape(queueName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func CreateListDispatchRuleRequest() (request *ListDispatchRuleRequest) {\n\trequest = &ListDispatchRuleRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"ARMS\", \"2019-08-08\", \"ListDispatchRule\", \"arms\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (client *ManagementClient) listActiveSecurityAdminRulesCreateRequest(ctx context.Context, resourceGroupName string, networkManagerName string, parameters ActiveConfigurationParameter, options *ManagementClientListActiveSecurityAdminRulesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkManagers/{networkManagerName}/listActiveSecurityAdminRules\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif networkManagerName == \"\" {\n\t\treturn nil, errors.New(\"parameter networkManagerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{networkManagerName}\", url.PathEscape(networkManagerName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}",
"func CreateListAllPrivacyRuleRequest() (request *ListAllPrivacyRuleRequest) {\n\trequest = &ListAllPrivacyRuleRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"lto\", \"2021-07-07\", \"ListAllPrivacyRule\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (client *DataCollectionRuleAssociationsClient) listByRuleCreateRequest(ctx context.Context, resourceGroupName string, dataCollectionRuleName string, options *DataCollectionRuleAssociationsClientListByRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}/associations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif dataCollectionRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter dataCollectionRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dataCollectionRuleName}\", url.PathEscape(dataCollectionRuleName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *RouteFilterRulesClient) listByRouteFilterCreateRequest(ctx context.Context, resourceGroupName string, routeFilterName string, options *RouteFilterRulesListByRouteFilterOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif routeFilterName == \"\" {\n\t\treturn nil, errors.New(\"parameter routeFilterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{routeFilterName}\", url.PathEscape(routeFilterName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (client *EventHubsClient) getAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, options *EventHubsClientGetAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif eventHubName == \"\" {\n\t\treturn nil, errors.New(\"parameter eventHubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{eventHubName}\", url.PathEscape(eventHubName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *QueuesClient) getAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, options *QueuesGetAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif queueName == \"\" {\n\t\treturn nil, errors.New(\"parameter queueName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{queueName}\", url.PathEscape(queueName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (client *ManagementClient) listNetworkManagerEffectiveSecurityAdminRulesCreateRequest(ctx context.Context, resourceGroupName string, virtualNetworkName string, parameters QueryRequestOptions, options *ManagementClientListNetworkManagerEffectiveSecurityAdminRulesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/listNetworkManagerEffectiveSecurityAdminRules\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif virtualNetworkName == \"\" {\n\t\treturn nil, errors.New(\"parameter virtualNetworkName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{virtualNetworkName}\", url.PathEscape(virtualNetworkName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}",
"func (client *ApplicationGatewaysClient) listAvailableWafRuleSetsCreateRequest(ctx context.Context, options *ApplicationGatewaysListAvailableWafRuleSetsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableWafRuleSets\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (client *DataCollectionRuleAssociationsClient) listByResourceCreateRequest(ctx context.Context, resourceURI string, options *DataCollectionRuleAssociationsClientListByResourceOptions) (*policy.Request, error) {\n\turlPath := \"/{resourceUri}/providers/Microsoft.Insights/dataCollectionRuleAssociations\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceUri}\", resourceURI)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (m *ListRulesRequest) Validate() error {\n\treturn m.validate(false)\n}",
"func (client *QueuesClient) createOrUpdateAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, parameters SBAuthorizationRule, options *QueuesCreateOrUpdateAuthorizationRuleOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif queueName == \"\" {\n\t\treturn nil, errors.New(\"parameter queueName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{queueName}\", url.PathEscape(queueName))\n\tif authorizationRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter authorizationRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{authorizationRuleName}\", url.PathEscape(authorizationRuleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}",
"func (client *SetDefinitionsClient) listBuiltInCreateRequest(ctx context.Context, options *SetDefinitionsClientListBuiltInOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Authorization/policySetDefinitions\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func CreateListRepoSyncRuleRequest() (request *ListRepoSyncRuleRequest) {\n\trequest = &ListRepoSyncRuleRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"cr\", \"2018-12-01\", \"ListRepoSyncRule\", \"acr\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (s *RulesService) List(ctx context.Context, resourceID int) ([]*Rule, *http.Response, error) {\n\treq, err := s.client.NewRequest(ctx,\n\t\thttp.MethodGet,\n\t\tfmt.Sprintf(rulesURL, resourceID), nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\trules := make([]*Rule, 0)\n\n\tresp, err := s.client.Do(req, &rules)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn rules, resp, nil\n}",
"func (mr *MockProjectsClientMockRecorder) ListRules(ctx, in interface{}, opts ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{ctx, in}, opts...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListRules\", reflect.TypeOf((*MockProjectsClient)(nil).ListRules), varargs...)\n}",
"func (client *EventHubsClient) NewListAuthorizationRulesPager(resourceGroupName string, namespaceName string, eventHubName string, options *EventHubsClientListAuthorizationRulesOptions) *runtime.Pager[EventHubsClientListAuthorizationRulesResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[EventHubsClientListAuthorizationRulesResponse]{\n\t\tMore: func(page EventHubsClientListAuthorizationRulesResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *EventHubsClientListAuthorizationRulesResponse) (EventHubsClientListAuthorizationRulesResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listAuthorizationRulesCreateRequest(ctx, resourceGroupName, namespaceName, eventHubName, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn EventHubsClientListAuthorizationRulesResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.internal.Pipeline().Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn EventHubsClientListAuthorizationRulesResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn EventHubsClientListAuthorizationRulesResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listAuthorizationRulesHandleResponse(resp)\n\t\t},\n\t})\n}",
"func ListRules(c *gin.Context) {\n\tvar (\n\t\thttpRule Rule\n\t\tres []Rule\n\t\tcount int64\n\t\torder = c.Query(\"order\")\n\t\tpageSize = 10\n\t)\n\n\tif c.Query(\"pageSize\") != \"\" {\n\t\tif n, err := strconv.Atoi(c.Query(\"pageSize\")); err == nil {\n\t\t\tif n > 0 && n < 100 {\n\t\t\t\tpageSize = n\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := c.ShouldBind(&httpRule); err != nil {\n\t\tc.JSON(400, gin.H{\n\t\t\t\"status\": \"failed\",\n\t\t\t\"error\": err.Error(),\n\t\t\t\"result\": nil,\n\t\t})\n\t\treturn\n\t}\n\n\tdb := database.DB.Model(&httpRule)\n\tif httpRule.Name != \"\" {\n\t\tdb.Where(\"name = ?\", httpRule.Name)\n\t}\n\tdb.Count(&count)\n\n\tpage, err := strconv.Atoi(c.Query(\"page\"))\n\tif err != nil {\n\t\tc.JSON(400, gin.H{\n\t\t\t\"status\": \"failed\",\n\t\t\t\"error\": err.Error(),\n\t\t\t\"result\": nil,\n\t\t})\n\t\treturn\n\t}\n\n\tif order != \"asc\" {\n\t\torder = \"desc\"\n\t}\n\n\tif err := db.Order(\"rank desc\").Order(\"id\" + \" \" + order).Count(&count).Offset((page - 1) * pageSize).Limit(pageSize).Find(&res).Error; err != nil {\n\t\tc.JSON(400, gin.H{\n\t\t\t\"status\": \"failed\",\n\t\t\t\"error\": err.Error(),\n\t\t\t\"data\": nil,\n\t\t})\n\t\treturn\n\t}\n\n\tc.JSON(200, gin.H{\n\t\t\"status\": \"succeed\",\n\t\t\"error\": nil,\n\t\t\"result\": gin.H{\"count\": count, \"data\": res},\n\t})\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
listAuthorizationRulesHandleResponse handles the ListAuthorizationRules response.
|
func (client *EventHubsClient) listAuthorizationRulesHandleResponse(resp *http.Response) (EventHubsClientListAuthorizationRulesResponse, error) {
result := EventHubsClientListAuthorizationRulesResponse{}
if err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRuleListResult); err != nil {
return EventHubsClientListAuthorizationRulesResponse{}, err
}
return result, nil
}
|
[
"func (client *QueuesClient) listAuthorizationRulesHandleResponse(resp *http.Response) (QueuesListAuthorizationRulesResponse, error) {\n\tresult := QueuesListAuthorizationRulesResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SBAuthorizationRuleListResult); err != nil {\n\t\treturn QueuesListAuthorizationRulesResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *ManagementClient) listActiveSecurityAdminRulesHandleResponse(resp *http.Response) (ManagementClientListActiveSecurityAdminRulesResponse, error) {\n\tresult := ManagementClientListActiveSecurityAdminRulesResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ActiveSecurityAdminRulesListResult); err != nil {\n\t\treturn ManagementClientListActiveSecurityAdminRulesResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *RouteFilterRulesClient) listByRouteFilterHandleResponse(resp *http.Response) (RouteFilterRulesListByRouteFilterResponse, error) {\n\tresult := RouteFilterRulesListByRouteFilterResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RouteFilterRuleListResult); err != nil {\n\t\treturn RouteFilterRulesListByRouteFilterResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *QueuesClient) getAuthorizationRuleHandleResponse(resp *http.Response) (QueuesGetAuthorizationRuleResponse, error) {\n\tresult := QueuesGetAuthorizationRuleResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SBAuthorizationRule); err != nil {\n\t\treturn QueuesGetAuthorizationRuleResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *ManagementClient) listNetworkManagerEffectiveSecurityAdminRulesHandleResponse(resp *http.Response) (ManagementClientListNetworkManagerEffectiveSecurityAdminRulesResponse, error) {\n\tresult := ManagementClientListNetworkManagerEffectiveSecurityAdminRulesResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagerEffectiveSecurityAdminRulesListResult); err != nil {\n\t\treturn ManagementClientListNetworkManagerEffectiveSecurityAdminRulesResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *EventHubsClient) getAuthorizationRuleHandleResponse(resp *http.Response) (EventHubsClientGetAuthorizationRuleResponse, error) {\n\tresult := EventHubsClientGetAuthorizationRuleResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRule); err != nil {\n\t\treturn EventHubsClientGetAuthorizationRuleResponse{}, err\n\t}\n\treturn result, nil\n}",
"func UnmarshalListZoneAccessRulesResp(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(ListZoneAccessRulesResp)\n\terr = core.UnmarshalPrimitive(m, \"success\", &obj.Success)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"errors\", &obj.Errors)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"messages\", &obj.Messages)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"result\", &obj.Result, UnmarshalZoneAccessRuleObject)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"result_info\", &obj.ResultInfo, UnmarshalListZoneAccessRulesRespResultInfo)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}",
"func (client *AuthorizationPoliciesClient) listByHubHandleResponse(resp *http.Response) (AuthorizationPoliciesClientListByHubResponse, error) {\n\tresult := AuthorizationPoliciesClientListByHubResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationPolicyListResult); err != nil {\n\t\treturn AuthorizationPoliciesClientListByHubResponse{}, err\n\t}\n\treturn result, nil\n}",
"func DisplayRules() ([]Rule, error) {\n\n\texecRequest := ExecuteRequest{}\n\texecRequest.Init()\n\n\tgetFolder := []byte{0x3f, AuthSession.LogonID, 0x00, 0x01, 0x40}\n\tgetFolder = append(getFolder, []byte{0x12, AuthSession.LogonID, 0x01, 0x00, 0x02, 0x00, 0x14, 0x00, 0x74, 0x66, 0x1f, 0x00, 0x82, 0x66}...)\n\tgetFolder = append(getFolder, []byte{0x15, AuthSession.LogonID, 0x01, 0x00, 0x01, 0x32, 0x00}...)\n\texecRequest.RopBuffer.ROP.RopsList = getFolder\n\n\texecRequest.RopBuffer.ROP.ServerObjectHandleTable = []byte{0x01, 0x00, 0x00, AuthSession.LogonID, 0xFF, 0xFF, 0xFF, 0xFF}\n\n\texecRequest.CalcSizes()\n\n\t//fetch folder\n\tif AuthSession.Transport == HTTP { // HTTP\n\t\tresp, rbody := mapiRequestHTTP(AuthSession.URL.String(), \"Execute\", BodyToBytes(execRequest))\n\t\tresponseBody, err := readResponse(resp.Header, rbody)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"[x] A HTTP server side error occurred.\\n %s\", err)\n\t\t}\n\t\texecResponse := ExecuteResponse{}\n\t\texecResponse.Unmarshal(responseBody)\n\n\t\trules, _ := DecodeRulesResponse(execResponse.RopBuffer)\n\t\tif rules == nil {\n\t\t\treturn nil, fmt.Errorf(\"[x] Error retrieving rules\")\n\t\t}\n\t\treturn rules, nil\n\n\t}\n\treturn nil, fmt.Errorf(\"[x] An Unspecified error occurred\")\n}",
"func (client ACGClient) GetRuleListResponder(resp *http.Response) (result AccessControlGroupRuleListResponse, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}",
"func (client *GraphQLAPIResolverPolicyClient) listByResolverHandleResponse(resp *http.Response) (GraphQLAPIResolverPolicyClientListByResolverResponse, error) {\n\tresult := GraphQLAPIResolverPolicyClientListByResolverResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyCollection); err != nil {\n\t\treturn GraphQLAPIResolverPolicyClientListByResolverResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (m *AuthorizationServerResource) ListAuthorizationServerPolicyRules(ctx context.Context, authServerId string, policyId string) ([]*AuthorizationServerPolicyRule, *Response, error) {\n\turl := fmt.Sprintf(\"/api/v1/authorizationServers/%v/policies/%v/rules\", authServerId, policyId)\n\n\trq := m.client.CloneRequestExecutor()\n\n\treq, err := rq.WithAccept(\"application/json\").WithContentType(\"application/json\").NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar authorizationServerPolicyRule []*AuthorizationServerPolicyRule\n\n\tresp, err := rq.Do(ctx, req, &authorizationServerPolicyRule)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn authorizationServerPolicyRule, resp, nil\n}",
"func (client *ApplicationGatewaysClient) listAvailableWafRuleSetsHandleResponse(resp *http.Response) (ApplicationGatewaysListAvailableWafRuleSetsResponse, error) {\n\tresult := ApplicationGatewaysListAvailableWafRuleSetsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ApplicationGatewayAvailableWafRuleSetsResult); err != nil {\n\t\treturn ApplicationGatewaysListAvailableWafRuleSetsResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *DeploymentOperationsClient) listAtManagementGroupScopeHandleResponse(resp *azcore.Response) (DeploymentOperationsListResultResponse, error) {\n\tvar val *DeploymentOperationsListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn DeploymentOperationsListResultResponse{}, err\n\t}\n\treturn DeploymentOperationsListResultResponse{RawResponse: resp.Response, DeploymentOperationsListResult: val}, nil\n}",
"func (client *DeploymentOperationsClient) listAtSubscriptionScopeHandleResponse(resp *azcore.Response) (DeploymentOperationsListResultResponse, error) {\n\tvar val *DeploymentOperationsListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn DeploymentOperationsListResultResponse{}, err\n\t}\n\treturn DeploymentOperationsListResultResponse{RawResponse: resp.Response, DeploymentOperationsListResult: val}, nil\n}",
"func (client *AlertRuleIncidentsClient) listByAlertRuleHandleResponse(resp *azcore.Response) (IncidentListResultResponse, error) {\n\tvar val *IncidentListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn IncidentListResultResponse{}, err\n\t}\n\treturn IncidentListResultResponse{RawResponse: resp.Response, IncidentListResult: val}, nil\n}",
"func (client *QueuesClient) createOrUpdateAuthorizationRuleHandleResponse(resp *http.Response) (QueuesCreateOrUpdateAuthorizationRuleResponse, error) {\n\tresult := QueuesCreateOrUpdateAuthorizationRuleResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SBAuthorizationRule); err != nil {\n\t\treturn QueuesCreateOrUpdateAuthorizationRuleResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (s *RulesService) List(ctx context.Context, resourceID int) ([]*Rule, *http.Response, error) {\n\treq, err := s.client.NewRequest(ctx,\n\t\thttp.MethodGet,\n\t\tfmt.Sprintf(rulesURL, resourceID), nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\trules := make([]*Rule, 0)\n\n\tresp, err := s.client.Do(req, &rules)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn rules, resp, nil\n}",
"func (client *RoleAssignmentScheduleRequestsClient) listForScopeHandleResponse(resp *azcore.Response) (RoleAssignmentScheduleRequestListResultResponse, error) {\n\tvar val *RoleAssignmentScheduleRequestListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn RoleAssignmentScheduleRequestListResultResponse{}, err\n\t}\n\treturn RoleAssignmentScheduleRequestListResultResponse{RawResponse: resp.Response, RoleAssignmentScheduleRequestListResult: val}, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewListByNamespacePager Gets all the Event Hubs in a Namespace. Generated from API version 20170401 resourceGroupName Name of the resource group within the azure subscription. namespaceName The Namespace name options EventHubsClientListByNamespaceOptions contains the optional parameters for the EventHubsClient.NewListByNamespacePager method.
|
func (client *EventHubsClient) NewListByNamespacePager(resourceGroupName string, namespaceName string, options *EventHubsClientListByNamespaceOptions) *runtime.Pager[EventHubsClientListByNamespaceResponse] {
return runtime.NewPager(runtime.PagingHandler[EventHubsClientListByNamespaceResponse]{
More: func(page EventHubsClientListByNamespaceResponse) bool {
return page.NextLink != nil && len(*page.NextLink) > 0
},
Fetcher: func(ctx context.Context, page *EventHubsClientListByNamespaceResponse) (EventHubsClientListByNamespaceResponse, error) {
var req *policy.Request
var err error
if page == nil {
req, err = client.listByNamespaceCreateRequest(ctx, resourceGroupName, namespaceName, options)
} else {
req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)
}
if err != nil {
return EventHubsClientListByNamespaceResponse{}, err
}
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return EventHubsClientListByNamespaceResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return EventHubsClientListByNamespaceResponse{}, runtime.NewResponseError(resp)
}
return client.listByNamespaceHandleResponse(resp)
},
})
}
|
[
"func (client *ApplicationGroupClient) NewListByNamespacePager(resourceGroupName string, namespaceName string, options *ApplicationGroupClientListByNamespaceOptions) *runtime.Pager[ApplicationGroupClientListByNamespaceResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[ApplicationGroupClientListByNamespaceResponse]{\n\t\tMore: func(page ApplicationGroupClientListByNamespaceResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *ApplicationGroupClientListByNamespaceResponse) (ApplicationGroupClientListByNamespaceResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listByNamespaceCreateRequest(ctx, resourceGroupName, namespaceName, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn ApplicationGroupClientListByNamespaceResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.internal.Pipeline().Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn ApplicationGroupClientListByNamespaceResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn ApplicationGroupClientListByNamespaceResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listByNamespaceHandleResponse(resp)\n\t\t},\n\t})\n}",
"func (client *EventHubsClient) listByNamespaceCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, options *EventHubsClientListByNamespaceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-01\")\n\tif options != nil && options.Skip != nil {\n\t\treqQP.Set(\"$skip\", strconv.FormatInt(int64(*options.Skip), 10))\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func ExampleNew_limitToNamespaces() {\n\tcfg, err := config.GetConfig()\n\tif err != nil {\n\t\tlog.Error(err, \"unable to get kubeconfig\")\n\t\tos.Exit(1)\n\t}\n\n\tmgr, err := manager.New(cfg, manager.Options{\n\t\tNewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) {\n\t\t\topts.Namespaces = []string{\"namespace1\", \"namespace2\"}\n\t\t\treturn cache.New(config, opts)\n\t\t}},\n\t)\n\tif err != nil {\n\t\tlog.Error(err, \"unable to set up manager\")\n\t\tos.Exit(1)\n\t}\n\tlog.Info(\"created manager\", \"manager\", mgr)\n}",
"func (client *ApplicationGroupClient) listByNamespaceCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, options *ApplicationGroupClientListByNamespaceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/applicationGroups\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-10-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *ClustersClient) listNamespacesCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersClientListNamespacesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}/namespaces\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (s *API) ListNamespaces(req *ListNamespacesRequest, opts ...scw.RequestOption) (*ListNamespacesResponse, error) {\n\tvar err error\n\n\tif req.Region == \"\" {\n\t\tdefaultRegion, _ := s.client.GetDefaultRegion()\n\t\treq.Region = defaultRegion\n\t}\n\n\tdefaultPageSize, exist := s.client.GetDefaultPageSize()\n\tif (req.PageSize == nil || *req.PageSize == 0) && exist {\n\t\treq.PageSize = &defaultPageSize\n\t}\n\n\tquery := url.Values{}\n\tparameter.AddToQuery(query, \"page\", req.Page)\n\tparameter.AddToQuery(query, \"page_size\", req.PageSize)\n\tparameter.AddToQuery(query, \"order_by\", req.OrderBy)\n\tparameter.AddToQuery(query, \"name\", req.Name)\n\tparameter.AddToQuery(query, \"organization_id\", req.OrganizationID)\n\tparameter.AddToQuery(query, \"project_id\", req.ProjectID)\n\n\tif fmt.Sprint(req.Region) == \"\" {\n\t\treturn nil, errors.New(\"field Region cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/functions/v1beta1/regions/\" + fmt.Sprint(req.Region) + \"/namespaces\",\n\t\tQuery: query,\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ListNamespacesResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}",
"func (s *API) ListNamespaces(req *ListNamespacesRequest, opts ...scw.RequestOption) (*ListNamespacesResponse, error) {\n\tvar err error\n\n\tif req.Region == \"\" {\n\t\tdefaultRegion, _ := s.client.GetDefaultRegion()\n\t\treq.Region = defaultRegion\n\t}\n\n\tdefaultPageSize, exist := s.client.GetDefaultPageSize()\n\tif (req.PageSize == nil || *req.PageSize == 0) && exist {\n\t\treq.PageSize = &defaultPageSize\n\t}\n\n\tquery := url.Values{}\n\tparameter.AddToQuery(query, \"page\", req.Page)\n\tparameter.AddToQuery(query, \"page_size\", req.PageSize)\n\tparameter.AddToQuery(query, \"order_by\", req.OrderBy)\n\tparameter.AddToQuery(query, \"name\", req.Name)\n\tparameter.AddToQuery(query, \"organization_id\", req.OrganizationID)\n\tparameter.AddToQuery(query, \"project_id\", req.ProjectID)\n\n\tif fmt.Sprint(req.Region) == \"\" {\n\t\treturn nil, errors.New(\"field Region cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/containers/v1beta1/regions/\" + fmt.Sprint(req.Region) + \"/namespaces\",\n\t\tQuery: query,\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ListNamespacesResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}",
"func ListNamespace(ns string) ListOption {\n\treturn func(o *ListOptions) {\n\t\to.Namespace = SerializeResourceName(ns)\n\t}\n}",
"func NewNamespace(uriToPrefix map[string]string) *Namespace {\n\tns := &Namespace{\n\t\turiToPrefix: make(map[string]string),\n\t}\n\tfor uri, prefix := range uriToPrefix {\n\t\tns.Register(uri, prefix)\n\t}\n\treturn ns\n}",
"func NewNamespace(opts ...NamespaceOption) (*Namespace, error) {\n\tns := &Namespace{}\n\tns.newClientFn = ns.newClientImpl\n\n\tfor _, opt := range opts {\n\t\terr := opt(ns)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn ns, nil\n}",
"func (o *GetEventsGameTelemetryV1AdminNamespacesNamespaceEventsGetParams) SetNamespace(namespace string) {\n\to.Namespace = namespace\n}",
"func NewNamespaceLister(client kubernetes.Interface) v1.NamespaceLister {\n\treturn NewFilteredNamespaceLister(client, nil)\n}",
"func (s eventNamespaceLister) List(selector labels.Selector) (ret []*eventsv1beta1.Event, err error) {\n\tlistopt := v1.ListOptions{\n\t\tLabelSelector: selector.String(),\n\t}\n\tif s.tweakListOptions != nil {\n\t\ts.tweakListOptions(&listopt)\n\t}\n\tlist, err := s.client.EventsV1beta1().Events(s.namespace).List(listopt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := range list.Items {\n\t\tret = append(ret, &list.Items[i])\n\t}\n\treturn ret, nil\n}",
"func (s awsVpcEndpointNamespaceLister) List(selector labels.Selector) (ret []*v1.AwsVpcEndpoint, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1.AwsVpcEndpoint))\n\t})\n\treturn ret, err\n}",
"func NewModelsNamespace() *ModelsNamespace {\n\tthis := ModelsNamespace{}\n\treturn &this\n}",
"func NewEventHubNamespace(ctx *pulumi.Context,\n\tname string, args *EventHubNamespaceArgs, opts ...pulumi.ResourceOption) (*EventHubNamespace, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ResourceGroupName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ResourceGroupName'\")\n\t}\n\tif args.Sku == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Sku'\")\n\t}\n\tsecrets := pulumi.AdditionalSecretOutputs([]string{\n\t\t\"defaultPrimaryConnectionString\",\n\t\t\"defaultPrimaryConnectionStringAlias\",\n\t\t\"defaultPrimaryKey\",\n\t\t\"defaultSecondaryConnectionString\",\n\t\t\"defaultSecondaryConnectionStringAlias\",\n\t\t\"defaultSecondaryKey\",\n\t})\n\topts = append(opts, secrets)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource EventHubNamespace\n\terr := ctx.RegisterResource(\"azure:eventhub/eventHubNamespace:EventHubNamespace\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func CreateNamespacesEvent(namespaces []string) octant.Event {\n\treturn octant.Event{\n\t\tType: octant.EventTypeNamespaces,\n\t\tData: map[string]interface{}{\n\t\t\t\"namespaces\": namespaces,\n\t\t},\n\t}\n}",
"func (s awsInspectorResourceGroupNamespaceLister) List(selector labels.Selector) (ret []*v1.AwsInspectorResourceGroup, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1.AwsInspectorResourceGroup))\n\t})\n\treturn ret, err\n}",
"func NewNamespaces(key string, opts Options) Namespaces {\n\tinstrumentOpts := opts.InstrumentOptions()\n\tn := &namespaces{\n\t\tkey: key,\n\t\tstore: opts.KVStore(),\n\t\topts: opts,\n\t\tnowFn: opts.ClockOptions().NowFn(),\n\t\tlog: instrumentOpts.Logger(),\n\t\truleSetKeyFn: opts.RuleSetKeyFn(),\n\t\tmatchRangePast: opts.MatchRangePast(),\n\t\tonNamespaceAddedFn: opts.OnNamespaceAddedFn(),\n\t\tonNamespaceRemovedFn: opts.OnNamespaceRemovedFn(),\n\t\tproto: &rulepb.Namespaces{},\n\t\trules: newNamespaceRuleSetsMap(namespaceRuleSetsMapOptions{}),\n\t\tmetrics: newNamespacesMetrics(instrumentOpts.MetricsScope()),\n\t\trequireNamespaceWatchOnInit: opts.RequireNamespaceWatchOnInit(),\n\t\tnsResolver: opts.NamespaceResolver(),\n\t}\n\tvalueOpts := runtime.NewOptions().\n\t\tSetInstrumentOptions(instrumentOpts).\n\t\tSetInitWatchTimeout(opts.InitWatchTimeout()).\n\t\tSetKVStore(n.store).\n\t\tSetUnmarshalFn(n.toNamespaces).\n\t\tSetProcessFn(n.process).\n\t\tSetInterruptedCh(opts.InterruptedCh())\n\tn.Value = runtime.NewValue(key, valueOpts)\n\treturn n\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
listByNamespaceCreateRequest creates the ListByNamespace request.
|
func (client *EventHubsClient) listByNamespaceCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, options *EventHubsClientListByNamespaceOptions) (*policy.Request, error) {
urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs"
if resourceGroupName == "" {
return nil, errors.New("parameter resourceGroupName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName))
if namespaceName == "" {
return nil, errors.New("parameter namespaceName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName))
if client.subscriptionID == "" {
return nil, errors.New("parameter client.subscriptionID cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID))
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("api-version", "2017-04-01")
if options != nil && options.Skip != nil {
reqQP.Set("$skip", strconv.FormatInt(int64(*options.Skip), 10))
}
if options != nil && options.Top != nil {
reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
return req, nil
}
|
[
"func (client *ApplicationGroupClient) listByNamespaceCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, options *ApplicationGroupClientListByNamespaceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/applicationGroups\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-10-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (client *ClustersClient) listNamespacesCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersClientListNamespacesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}/namespaces\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func NewNamespaceListRequest(server string) (*http.Request, error) {\n\tvar err error\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/namespaces\")\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"GET\", queryUrl.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}",
"func (c *Client) NewListNamedspacesRequest(ctx context.Context, path string, pageLimit *int, pageOffset *string) (*http.Request, error) {\n\tscheme := c.Scheme\n\tif scheme == \"\" {\n\t\tscheme = \"http\"\n\t}\n\tu := url.URL{Host: c.Host, Scheme: scheme, Path: path}\n\tvalues := u.Query()\n\tif pageLimit != nil {\n\t\ttmp134 := strconv.Itoa(*pageLimit)\n\t\tvalues.Set(\"page[limit]\", tmp134)\n\t}\n\tif pageOffset != nil {\n\t\tvalues.Set(\"page[offset]\", *pageOffset)\n\t}\n\tu.RawQuery = values.Encode()\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}",
"func NewServiceListRequest(server string, namespace string) (*http.Request, error) {\n\tvar err error\n\n\tvar pathParam0 string\n\n\tpathParam0, err = runtime.StyleParam(\"simple\", false, \"namespace\", namespace)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/namespaces/%s/services\", pathParam0)\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"GET\", queryUrl.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}",
"func NewListNamespaceContext(ctx context.Context, r *http.Request, service *goa.Service) (*ListNamespaceContext, error) {\n\tvar err error\n\tresp := goa.ContextResponse(ctx)\n\tresp.Service = service\n\treq := goa.ContextRequest(ctx)\n\treq.Request = r\n\trctx := ListNamespaceContext{Context: ctx, ResponseData: resp, RequestData: req}\n\tparamProjectid := req.Params[\"projectid\"]\n\tif len(paramProjectid) > 0 {\n\t\trawProjectid := paramProjectid[0]\n\t\trctx.Projectid = rawProjectid\n\t}\n\treturn &rctx, err\n}",
"func (s userRegistrationRequestNamespaceLister) List(selector labels.Selector) (ret []*v1alpha.UserRegistrationRequest, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha.UserRegistrationRequest))\n\t})\n\treturn ret, err\n}",
"func (client *TagsClient) listCreateRequest(ctx context.Context, options *TagsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/tagNames\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func CreateListPackageRequest() (request *ListPackageRequest) {\n\trequest = &ListPackageRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"foas\", \"2018-11-11\", \"ListPackage\", \"/api/v2/projects/[projectName]/packages\", \"foas\", \"openAPI\")\n\trequest.Method = requests.GET\n\treturn\n}",
"func (s certRequestNamespaceLister) List(selector labels.Selector) (ret []*v1.CertRequest, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1.CertRequest))\n\t})\n\treturn ret, err\n}",
"func ListNamespace(ns string) ListOption {\n\treturn func(o *ListOptions) {\n\t\to.Namespace = SerializeResourceName(ns)\n\t}\n}",
"func (s gCPAccessKeyRequestNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.GCPAccessKeyRequest, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.GCPAccessKeyRequest))\n\t})\n\treturn ret, err\n}",
"func NewAppListRequest(server string, namespace string) (*http.Request, error) {\n\tvar err error\n\n\tvar pathParam0 string\n\n\tpathParam0, err = runtime.StyleParam(\"simple\", false, \"namespace\", namespace)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/namespaces/%s/apps\", pathParam0)\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"GET\", queryUrl.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}",
"func (client *CassandraResourcesClient) listCassandraKeyspacesCreateRequest(ctx context.Context, resourceGroupName string, accountName string, options *CassandraResourcesListCassandraKeyspacesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-15\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (client *LinkedServicesClient) listByWorkspaceCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, options *LinkedServicesListByWorkspaceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/linkedServices\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func CreateListProductQuotasRequest() (request *ListProductQuotasRequest) {\n\trequest = &ListProductQuotasRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"quotas\", \"2020-05-10\", \"ListProductQuotas\", \"quotas\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (client *DeploymentOperationsClient) listAtSubscriptionScopeCreateRequest(ctx context.Context, deploymentName string, options *DeploymentOperationsListAtSubscriptionScopeOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations\"\n\tif deploymentName == \"\" {\n\t\treturn nil, errors.New(\"parameter deploymentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{deploymentName}\", url.PathEscape(deploymentName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (client *DeploymentOperationsClient) listAtSubscriptionScopeCreateRequest(ctx context.Context, deploymentName string, options *DeploymentOperationsListAtSubscriptionScopeOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations\"\n\turlPath = strings.ReplaceAll(urlPath, \"{deploymentName}\", url.PathEscape(deploymentName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tif options != nil && options.Top != nil {\n\t\tquery.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tquery.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}",
"func (s *API) ListNamespaces(req *ListNamespacesRequest, opts ...scw.RequestOption) (*ListNamespacesResponse, error) {\n\tvar err error\n\n\tif req.Region == \"\" {\n\t\tdefaultRegion, _ := s.client.GetDefaultRegion()\n\t\treq.Region = defaultRegion\n\t}\n\n\tdefaultPageSize, exist := s.client.GetDefaultPageSize()\n\tif (req.PageSize == nil || *req.PageSize == 0) && exist {\n\t\treq.PageSize = &defaultPageSize\n\t}\n\n\tquery := url.Values{}\n\tparameter.AddToQuery(query, \"page\", req.Page)\n\tparameter.AddToQuery(query, \"page_size\", req.PageSize)\n\tparameter.AddToQuery(query, \"order_by\", req.OrderBy)\n\tparameter.AddToQuery(query, \"name\", req.Name)\n\tparameter.AddToQuery(query, \"organization_id\", req.OrganizationID)\n\tparameter.AddToQuery(query, \"project_id\", req.ProjectID)\n\n\tif fmt.Sprint(req.Region) == \"\" {\n\t\treturn nil, errors.New(\"field Region cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/functions/v1beta1/regions/\" + fmt.Sprint(req.Region) + \"/namespaces\",\n\t\tQuery: query,\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ListNamespacesResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
listByNamespaceHandleResponse handles the ListByNamespace response.
|
func (client *EventHubsClient) listByNamespaceHandleResponse(resp *http.Response) (EventHubsClientListByNamespaceResponse, error) {
result := EventHubsClientListByNamespaceResponse{}
if err := runtime.UnmarshalAsJSON(resp, &result.ListResult); err != nil {
return EventHubsClientListByNamespaceResponse{}, err
}
return result, nil
}
|
[
"func (client *ApplicationGroupClient) listByNamespaceHandleResponse(resp *http.Response) (ApplicationGroupClientListByNamespaceResponse, error) {\n\tresult := ApplicationGroupClientListByNamespaceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ApplicationGroupListResult); err != nil {\n\t\treturn ApplicationGroupClientListByNamespaceResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *ClustersClient) listNamespacesHandleResponse(resp *http.Response) (ClustersClientListNamespacesResponse, error) {\n\tresult := ClustersClientListNamespacesResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.EHNamespaceIDListResult); err != nil {\n\t\treturn ClustersClientListNamespacesResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (w *ServerInterfaceWrapper) NamespaceList(ctx echo.Context) error {\n\tvar err error\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.NamespaceList(ctx)\n\treturn err\n}",
"func (s apiGatewayMethodResponseNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ApiGatewayMethodResponse, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.ApiGatewayMethodResponse))\n\t})\n\treturn ret, err\n}",
"func (s awsApiGatewayMethodResponseNamespaceLister) List(selector labels.Selector) (ret []*v1.AwsApiGatewayMethodResponse, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1.AwsApiGatewayMethodResponse))\n\t})\n\treturn ret, err\n}",
"func (ag *fakeAgent) ListNamespace() []*netproto.Namespace {\n\treturn nil\n}",
"func (client *DeploymentOperationsClient) listAtSubscriptionScopeHandleResponse(resp *azcore.Response) (DeploymentOperationsListResultResponse, error) {\n\tvar val *DeploymentOperationsListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn DeploymentOperationsListResultResponse{}, err\n\t}\n\treturn DeploymentOperationsListResultResponse{RawResponse: resp.Response, DeploymentOperationsListResult: val}, nil\n}",
"func (client *DeploymentOperationsClient) listAtScopeHandleResponse(resp *azcore.Response) (DeploymentOperationsListResultResponse, error) {\n\tvar val *DeploymentOperationsListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn DeploymentOperationsListResultResponse{}, err\n\t}\n\treturn DeploymentOperationsListResultResponse{RawResponse: resp.Response, DeploymentOperationsListResult: val}, nil\n}",
"func ParseNamespaceListResponse(rsp *http.Response) (*namespaceListResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &namespaceListResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 200:\n\t\tvar dest []Namespace\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON200 = &dest\n\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 500:\n\t\tvar dest struct {\n\n\t\t\t// HTTP status code\n\t\t\tCode *int32 `json:\"code,omitempty\"`\n\t\t\tMessage *string `json:\"message,omitempty\"`\n\t\t}\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON500 = &dest\n\n\t}\n\n\treturn response, nil\n}",
"func ListNamespace(ns string) ListOption {\n\treturn func(o *ListOptions) {\n\t\to.Namespace = SerializeResourceName(ns)\n\t}\n}",
"func (s *API) ListNamespaces(req *ListNamespacesRequest, opts ...scw.RequestOption) (*ListNamespacesResponse, error) {\n\tvar err error\n\n\tif req.Region == \"\" {\n\t\tdefaultRegion, _ := s.client.GetDefaultRegion()\n\t\treq.Region = defaultRegion\n\t}\n\n\tdefaultPageSize, exist := s.client.GetDefaultPageSize()\n\tif (req.PageSize == nil || *req.PageSize == 0) && exist {\n\t\treq.PageSize = &defaultPageSize\n\t}\n\n\tquery := url.Values{}\n\tparameter.AddToQuery(query, \"page\", req.Page)\n\tparameter.AddToQuery(query, \"page_size\", req.PageSize)\n\tparameter.AddToQuery(query, \"order_by\", req.OrderBy)\n\tparameter.AddToQuery(query, \"name\", req.Name)\n\tparameter.AddToQuery(query, \"organization_id\", req.OrganizationID)\n\tparameter.AddToQuery(query, \"project_id\", req.ProjectID)\n\n\tif fmt.Sprint(req.Region) == \"\" {\n\t\treturn nil, errors.New(\"field Region cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/functions/v1beta1/regions/\" + fmt.Sprint(req.Region) + \"/namespaces\",\n\t\tQuery: query,\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ListNamespacesResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}",
"func (s servicebusNamespaceNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ServicebusNamespace, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.ServicebusNamespace))\n\t})\n\treturn ret, err\n}",
"func (s *serviceController) NamespaceList(ns string, selector labels.Selector) (ret []*commtypes.BcsService, err error) {\n\terr = ListAllByNamespace(s.indexer, ns, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*commtypes.BcsService))\n\t})\n\treturn ret, err\n}",
"func (client *DeploymentOperationsClient) listAtSubscriptionScopeHandleResponse(resp *http.Response) (DeploymentOperationsListAtSubscriptionScopeResponse, error) {\n\tresult := DeploymentOperationsListAtSubscriptionScopeResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DeploymentOperationsListResult); err != nil {\n\t\treturn DeploymentOperationsListAtSubscriptionScopeResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (ms *NameSpaceStore) List() ([]*domain.Namespace, error) {\n\tnamespaces := []*domain.Namespace{}\n\tfor _, ns := range ms.namespaces {\n\t\tif ns != nil {\n\t\t\tnamespaces = append(namespaces, ns)\n\t\t}\n\t}\n\n\treturn namespaces, nil\n}",
"func (client *DeploymentOperationsClient) listAtScopeHandleResponse(resp *http.Response) (DeploymentOperationsListAtScopeResponse, error) {\n\tresult := DeploymentOperationsListAtScopeResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DeploymentOperationsListResult); err != nil {\n\t\treturn DeploymentOperationsListAtScopeResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (c *Client) NamespaceList() ([]*NamespaceStruct, error) {\n\tvar err error\n\tvar result []*NamespaceStruct\n\tobj, err := c.executeListAndReturnResults(\"/namespace\")\n\tfor _, i := range obj {\n\t\tresult = append(result, i.(*NamespaceStruct))\n\t}\n\treturn result, err\n}",
"func (client *DeploymentOperationsClient) listAtManagementGroupScopeHandleResponse(resp *azcore.Response) (DeploymentOperationsListResultResponse, error) {\n\tvar val *DeploymentOperationsListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn DeploymentOperationsListResultResponse{}, err\n\t}\n\treturn DeploymentOperationsListResultResponse{RawResponse: resp.Response, DeploymentOperationsListResult: val}, nil\n}",
"func (s *API) ListNamespaces(req *ListNamespacesRequest, opts ...scw.RequestOption) (*ListNamespacesResponse, error) {\n\tvar err error\n\n\tif req.Region == \"\" {\n\t\tdefaultRegion, _ := s.client.GetDefaultRegion()\n\t\treq.Region = defaultRegion\n\t}\n\n\tdefaultPageSize, exist := s.client.GetDefaultPageSize()\n\tif (req.PageSize == nil || *req.PageSize == 0) && exist {\n\t\treq.PageSize = &defaultPageSize\n\t}\n\n\tquery := url.Values{}\n\tparameter.AddToQuery(query, \"page\", req.Page)\n\tparameter.AddToQuery(query, \"page_size\", req.PageSize)\n\tparameter.AddToQuery(query, \"order_by\", req.OrderBy)\n\tparameter.AddToQuery(query, \"name\", req.Name)\n\tparameter.AddToQuery(query, \"organization_id\", req.OrganizationID)\n\tparameter.AddToQuery(query, \"project_id\", req.ProjectID)\n\n\tif fmt.Sprint(req.Region) == \"\" {\n\t\treturn nil, errors.New(\"field Region cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/containers/v1beta1/regions/\" + fmt.Sprint(req.Region) + \"/namespaces\",\n\t\tQuery: query,\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ListNamespacesResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
regenerateKeysHandleResponse handles the RegenerateKeys response.
|
func (client *EventHubsClient) regenerateKeysHandleResponse(resp *http.Response) (EventHubsClientRegenerateKeysResponse, error) {
result := EventHubsClientRegenerateKeysResponse{}
if err := runtime.UnmarshalAsJSON(resp, &result.AccessKeys); err != nil {
return EventHubsClientRegenerateKeysResponse{}, err
}
return result, nil
}
|
[
"func (client *QueuesClient) regenerateKeysHandleResponse(resp *http.Response) (QueuesRegenerateKeysResponse, error) {\n\tresult := QueuesRegenerateKeysResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AccessKeys); err != nil {\n\t\treturn QueuesRegenerateKeysResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *AccountsClient) regenerateKeyHandleResponse(resp *http.Response) (AccountsClientRegenerateKeyResponse, error) {\n\tresult := AccountsClientRegenerateKeyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AccountListKeysResult); err != nil {\n\t\treturn AccountsClientRegenerateKeyResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *AuthorizationPoliciesClient) regeneratePrimaryKeyHandleResponse(resp *http.Response) (AuthorizationPoliciesClientRegeneratePrimaryKeyResponse, error) {\n\tresult := AuthorizationPoliciesClientRegeneratePrimaryKeyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationPolicy); err != nil {\n\t\treturn AuthorizationPoliciesClientRegeneratePrimaryKeyResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *SSHPublicKeysClient) generateKeyPairHandleResponse(resp *http.Response) (SSHPublicKeysClientGenerateKeyPairResponse, error) {\n\tresult := SSHPublicKeysClientGenerateKeyPairResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SSHPublicKeyGenerateKeyPairResult); err != nil {\n\t\treturn SSHPublicKeysClientGenerateKeyPairResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *SSHPublicKeysClient) generateKeyPairHandleResponse(resp *http.Response) (SSHPublicKeysGenerateKeyPairResponse, error) {\n\tresult := SSHPublicKeysGenerateKeyPairResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SSHPublicKeyGenerateKeyPairResult); err != nil {\n\t\treturn SSHPublicKeysGenerateKeyPairResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client *GatewayClient) listKeysHandleResponse(resp *http.Response) (GatewayClientListKeysResponse, error) {\n\tresult := GatewayClientListKeysResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GatewayKeysContract); err != nil {\n\t\treturn GatewayClientListKeysResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *AuthorizationPoliciesClient) regenerateSecondaryKeyHandleResponse(resp *http.Response) (AuthorizationPoliciesClientRegenerateSecondaryKeyResponse, error) {\n\tresult := AuthorizationPoliciesClientRegenerateSecondaryKeyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationPolicy); err != nil {\n\t\treturn AuthorizationPoliciesClientRegenerateSecondaryKeyResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *LocalUsersClient) regeneratePasswordHandleResponse(resp *http.Response) (LocalUsersClientRegeneratePasswordResponse, error) {\n\tresult := LocalUsersClientRegeneratePasswordResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.LocalUserRegeneratePasswordResult); err != nil {\n\t\treturn LocalUsersClientRegeneratePasswordResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client SyncAgentsClient) GenerateKeyResponder(resp *http.Response) (result SyncAgentKeyProperties, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}",
"func (client *AccountsClient) listKeysHandleResponse(resp *http.Response) (AccountsClientListKeysResponse, error) {\n\tresult := AccountsClientListKeysResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AccountListKeysResult); err != nil {\n\t\treturn AccountsClientListKeysResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client *GatewayClient) generateTokenHandleResponse(resp *http.Response) (GatewayClientGenerateTokenResponse, error) {\n\tresult := GatewayClientGenerateTokenResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GatewayTokenContract); err != nil {\n\t\treturn GatewayClientGenerateTokenResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (r *Registry) handleResponse(image string, res response) {\n\tr.inFlightLock.Lock()\n\tdefer r.inFlightLock.Unlock()\n\n\tfor _, recv := range r.inFlight[image] {\n\t\trecv <- response{\n\t\t\t// DeepCopy to ensure clients can work concurrently on the returned files map.\n\t\t\tFiles: res.Files.DeepCopy(),\n\t\t\tErr: res.Err,\n\t\t}\n\t}\n\n\tdelete(r.inFlight, image)\n}",
"func (client *VirtualNetworkGatewaysClient) generatevpnclientpackageHandleResponse(resp *azcore.Response) (StringResponse, error) {\n\tvar val *string\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn StringResponse{}, err\n\t}\n\treturn StringResponse{RawResponse: resp.Response, Value: val}, nil\n}",
"func KeypairGenerateHandler(w http.ResponseWriter, r *http.Request) {\n\n\tkeypair, ok := verifyKeypair(w, r)\n\tif !ok {\n\t\treturn\n\t}\n\n\tgo datastore.GenerateKeypair(keypair.AuthorityID, \"\", keypair.KeyName)\n\n\t// Return the URL to watch for the response\n\tstatusURL := fmt.Sprintf(\"/v1/keypairs/status/%s/%s\", keypair.AuthorityID, keypair.KeyName)\n\tw.WriteHeader(http.StatusAccepted)\n\tw.Header().Set(\"Location\", statusURL)\n\tformatBooleanResponse(true, \"\", \"\", statusURL, w)\n}",
"func GenerateResponse(d *deps.Dependencies, message string, sig crypt.BinarySignature, pubKey crypt.PEMEncoded) error {\n\tresponse := SignedMessage{\n\t\tMessage: message,\n\t\tSignature: sig.Base64(),\n\t\tPubkey: pubKey.String(),\n\t}\n\tbuff, err := json.MarshalIndent(&response, \"\", \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tn, err := d.Os.Stdout.Write(buff)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif n < len(buff) {\n\t\treturn errors.New(\"failed to write all of response. Should have produced an error to explain failure\")\n\t}\n\treturn nil\n}",
"func (p *ManagedInstanceKeysCreateOrUpdatePoller) FinalResponse(ctx context.Context) (ManagedInstanceKeysCreateOrUpdateResponse, error) {\n\trespType := ManagedInstanceKeysCreateOrUpdateResponse{}\n\tresp, err := p.pt.FinalResponse(ctx, &respType.ManagedInstanceKey)\n\tif err != nil {\n\t\treturn ManagedInstanceKeysCreateOrUpdateResponse{}, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}",
"func (sk *PrivateKey) GenerateReKey(publicKey *PublicKey) *ReEncryptionKey {\n rkk := &ReEncryptionKey{}\n rkk.cm = sk.cm\n rkk.pointer = C.cryptomagic_get_re_encryption_key(sk.cm.pointer, sk.pointer, publicKey.pointer)\n return rkk\n}",
"func (client Client) ListKeysResponder(resp *http.Response) (result Keys, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}",
"func (h *OIDCRouter) HandlePublicKeys(w http.ResponseWriter, r *http.Request) {\n\tresp := h.server.NewResponse()\n\tdefer resp.Close()\n\n\tresp.Output[\"keys\"] = h.pubKeys\n\n\terr := osin.OutputJSON(resp, w, r)\n\tif err != nil {\n\t\th.logger.Error().Printf(\"%s: %v\", KeysEndpoint, err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewWriter wraps a Go Writer, coupling it with an output function
|
func NewWriter(w io.Writer, o OutputFunc) Writer {
wrapped := &wrappedWriter{
writer: bufio.NewWriter(w),
output: o,
}
if c, ok := w.(io.Closer); ok {
return &wrappedClosingWriter{
wrappedWriter: wrapped,
closer: c,
}
}
return wrapped
}
|
[
"func New(w io.Writer) OutputWriter {\n\treturn OutputWriter{\n\t\tw: w,\n\t}\n}",
"func NewWriter(w io.Writer, order Order, litWidth int) io.WriteCloser {\n\treturn newWriter(w, order, litWidth)\n}",
"func newWriter(w io.Writer) *writer {\n\treturn &writer{\n\t\tbufio.NewWriter(w),\n\t\tmake([]byte, 0),\n\t}\n}",
"func NewWriter(w io.Writer, f *Format) (io.WriteCloser, error) {\n\ttw := &writer{\n\t\tw: w,\n\t\tbuf: make([]byte, bufLen(f.LineLength)),\n\t\tcrc: crc24.New(),\n\t\tdata: templateData{\n\t\t\tInput: f.input,\n\t\t\tLines: make(chan line),\n\t\t},\n\t\tt: template.New(\"text\"),\n\t}\n\n\ttw.t.Funcs(sprig.TxtFuncMap())\n\n\tif err := tw.parse(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tgo tw.render()\n\n\treturn tw, nil\n}",
"func NewWriter(w io.Writer, s Cipher) io.Writer {\n\treturn &writer{\n\t\tWriter: w,\n\t\tCipher: s,\n\t}\n}",
"func NewWriter(w io.Writer, protocol *Protocol) *Writer {\n\treturn &Writer{\n\t\tprotocol: protocol,\n\t\tbw: bufio.NewWriter(w),\n\t}\n}",
"func NewWriter(out io.Writer) io.WriteCloser {\n\tcmd := exec.Command(\"/usr/bin/bzip2\")\n\tcmd.Stdout = out\n\twc, _ := cmd.StdinPipe()\n\n\tcmd.Start()\n\treturn &writer{cmd, wc}\n}",
"func NewWriter(w io.Writer) *Writer {\n\treturn &Writer{*zip.NewWriter(w), \"\"}\n}",
"func (c *CompressionCodec) NewWriter(w io.Writer) io.WriteCloser {\n\tx := writerPool.Get().(*xerialWriter)\n\tx.Reset(w)\n\tx.framed = c.framing == Framed\n\treturn &writer{x}\n}",
"func WrapWriter(w io.Writer) FullWriter {\n\tif fw, ok := w.(FullWriter); ok {\n\t\treturn fw\n\t}\n\n\tfw := &wrapper{Writer: w}\n\n\t// If the writer is a byte writer call the function.\n\tif bw, ok := w.(io.ByteWriter); ok {\n\t\tfw.bw = bw\n\t} else {\n\t\tfw.byteBuf = make([]byte, 1)\n\t}\n\n\t// If the writer is a string writer call the function directly.\n\tif sw, ok := w.(io.StringWriter); ok {\n\t\tfw.sw = sw\n\t} else {\n\t\tfw.stringBuf = make([]byte, 0, 32)\n\t}\n\n\treturn fw\n}",
"func NewWriter(w io.Writer) *Writer {\n\ttgzw := &Writer{WrappedWriter: w}\n\t// gzip writer\n\ttgzw.GzipWriter = gzip.NewWriter(w)\n\t// tar writer\n\ttgzw.Writer = tar.NewWriter(tgzw.GzipWriter)\n\treturn tgzw\n}",
"func newIoWriter(w io.Writer) (*ioWriter, error) {\n\treturn &ioWriter{w}, nil\n}",
"func NewWriter(w io.Writer, width int, header bool) *Writer {\n\tgw := &Writer{\n\t\tw: w,\n\t\tWidth: width,\n\t\tTimeFormat: Astronomical,\n\t\tPrecision: -1,\n\t}\n\n\tif header {\n\t\tgw.WriteMetaData(Version)\n\t}\n\n\treturn gw\n}",
"func NewWriter(w io.Writer, h *sam.Header, wc int) (*Writer, error) {\n\treturn NewWriterLevel(w, h, gzip.DefaultCompression, wc)\n}",
"func wrapWriter(w http.ResponseWriter) writerProxy {\n\treturn &basicWriter{ResponseWriter: w}\n}",
"func NewWriter(w io.Writer, header string) *Writer {\n\tfmt.Fprintf(w, \"{\\\"stream\\\":\\\"%v\\\",\\\"entries\\\":[\\n\", header) //nolint:errcheck\n\treturn &Writer{\n\t\theader: header,\n\t\toutput: w,\n\t}\n}",
"func WrapWriter(w io.WriteCloser, alg string, dict []byte, bufferSize int) (io.WriteCloser, error) {\n\tif bufferSize < 0 {\n\t\treturn nil, fmt.Errorf(\"error wrapping writer: invalid buffer size of %d\", bufferSize)\n\t}\n\tswitch alg {\n\tcase pkgalg.AlgorithmBzip2:\n\t\treturn nil, &ErrWriterNotImplemented{Algorithm: alg}\n\tcase pkgalg.AlgorithmFlate:\n\t\tif len(dict) > 0 {\n\t\t\tfw, err := flate.NewWriterDict(bufio.NewWriter(w), flate.DefaultCompression, dict)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"error wrapping writer using compression %q with dictionary %q: %w\", alg, string(dict), err)\n\t\t\t}\n\t\t\treturn fw, nil\n\t\t}\n\t\tfw, err := flate.NewWriter(bufio.NewWriter(w))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error wrapping writer using compression %q: %w\", alg, err)\n\t\t}\n\t\treturn fw, nil\n\tcase pkgalg.AlgorithmGzip:\n\t\treturn gzip.NewWriter(bufio.NewWriter(w)), nil\n\tcase pkgalg.AlgorithmSnappy:\n\t\treturn snappy.NewBufferedWriter(bufio.NewWriter(w)), nil\n\tcase pkgalg.AlgorithmZip:\n\t\treturn nil, &ErrWriterNotImplemented{Algorithm: alg}\n\tcase pkgalg.AlgorithmZlib:\n\t\tif len(dict) > 0 {\n\t\t\tzw, err := zlib.NewWriterDict(bufio.NewWriter(w), dict)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"error wrapping writer using compression %q with dictionary %q: %w\", alg, string(dict), err)\n\t\t\t}\n\t\t\treturn zw, nil\n\t\t}\n\t\treturn zlib.NewWriter(bufio.NewWriter(w)), nil\n\tcase pkgalg.AlgorithmNone:\n\t\tif bufferSize > 0 {\n\t\t\treturn bufio.NewWriter(w), nil\n\t\t}\n\t\treturn w, nil\n\t}\n\treturn nil, &pkgalg.ErrUnknownAlgorithm{Algorithm: alg}\n}",
"func NewWriter(w io.Writer, blockSize int) *Writer {\n\treturn &Writer{new(bytes.Buffer), w, 0, 0, blockSize}\n}",
"func NewWriter(w io.Writer, prec, width int, header bool) *Writer {\n\tgw := gff.NewWriter(w, width, header)\n\tgw.Precision = prec\n\treturn &Writer{\n\t\tw: gw,\n\t\tt: &gff.Feature{Source: \"pals\", Feature: \"hit\"},\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
StrOutput is the standard stringbased output function
|
func StrOutput(w *bufio.Writer, v data.Value) {
_, _ = w.Write(stringToBytes(v.String()))
}
|
[
"func (s Script) StandardOutput() string {\n\treturn fmt.Sprintf(\"%s\", s.StandardOut)\n}",
"func (t *Test) OutputString() string {\n\tvar output string\n\tfor _, conf := range strings.Split(t.ConfString, \"\\n\") {\n\t\tconf = strings.TrimSpace(conf)\n\t\toutput += fmt.Sprintf(\"conf: %s\\n\", conf)\n\t}\n\tfor i, command := range t.Commands {\n\t\tfor j, outputLine := range command.Output {\n\t\t\tif i == 0 || j != 0 {\n\t\t\t\toutput += fmt.Sprintf(\"%.6f\\t%s\\n\", outputLine.SimTime, outputLine.Line)\n\t\t\t} else {\n\t\t\t\toutput += fmt.Sprintf(\"%s\\n\", outputLine.Line)\n\t\t\t}\n\t\t}\n\t}\n\tif len(output) > 0 && string(output[len(output)-1]) != \"\\n\" {\n\t\toutput += \"\\n\"\n\t}\n\tif len(t.Status) > 0 {\n\t\tstatus := t.Status[len(t.Status)-1]\n\t\toutput += fmt.Sprintf(\"%.6f\\t%s\", t.SimTime, status.Status)\n\t\tif status.Message != \"\" {\n\t\t\toutput += fmt.Sprintf(\": %s\", status.Message)\n\t\t}\n\t\toutput += \"\\n\"\n\t}\n\treturn output\n}",
"func (f *FakeOut) GetOutput() string {\n\treturn string(f.content)\n}",
"func (obj *scriptValue) Output() string {\n\treturn obj.output\n}",
"func (auth *AuthService) Stdout() string {\n\tauth.mu.Lock()\n\tdefer auth.mu.Unlock()\n\treturn auth.stdout.String()\n}",
"func (dfo *DefaultFormatOutput) String() string {\n\treturn GetFormattedOutput(dfo)\n}",
"func RunOutput(cmdStr string) ([]byte, error) {\n\tcmd := buildCommand(cmdStr)\n\treturn cmd.Output()\n}",
"func CombineOutputStr(ctx context.Context, t *testing.T, out KfTestOutput) []string {\n\tlines := CombineOutput(ctx, t, out)\n\n\tvar result []string\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn result\n\t\tcase <-out.Done:\n\t\t\treturn result\n\t\tcase line, ok := <-lines:\n\t\t\tif ok {\n\t\t\t\tresult = append(result, line)\n\t\t\t}\n\t\t}\n\t}\n}",
"func CmdRunOut(s string) string {\n\tfmt.Println(\"Running command: \" + s)\n\tout, err := cmd(s)\n\tif err != nil {\n\t\tlog.Println(\"Command: \" + s + \": Failed :: \" + err.Error())\n\t}\n\tfmt.Println(string(out))\n\treturn string(out)\n}",
"func (tsc *TSC) ToString() string {\n\treturn tsc.output\n}",
"func (c Command) Output(command string, args ...string) ([]byte, error) {\n\treturn exec.Command(command, args...).Output()\n}",
"func printOut(cmd string, out string) string {\n\tret := \"\"\n\tout = strings.TrimSpace(out)\n\tfor _, line := range strings.Split(out, \"\\n\") {\n\t\tret = ret + cmd + \": \" + line + \"\\n\"\n\t}\n\tif ret == \"\" {\n\t\tret = cmd + \": empty output\"\n\t}\n\treturn ret\n}",
"func (c *WSClient) StdOut() io.Writer {\n\treturn c.stdOut\n}",
"func (b Basic) Output(msg string) {\n\tif !b.Verbose {\n\t\treturn\n\t}\n\t_, _ = b.Writer.Write([]byte(msg + \"\\n\"))\n}",
"func (w *Worker) Out(id string) (string, error) {\n\tbuf, err := w.log.getOutputBuffer(id)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tout := buf.String()\n\n\tif len(out) > 0 && out[len(out)-1:] != \"\\n\" {\n\t\tout = out + \"\\n\"\n\t}\n\n\treturn out, nil\n}",
"func cmdOut(cmd string, args ...string) (string, error) {\n\tif *verbose {\n\t\tlog.Printf(\"running %s %q\", cmd, args)\n\t}\n\n\tout, err := exec.Command(cmd, args...).CombinedOutput()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(bytes.TrimSpace(out)), nil\n}",
"func (sh *Shell) Out(script string) string {\n\tcmd := sh.Cmd(script)\n\tcmd.Stderr = sh.Stderr\n\tout, err := cmd.Output()\n\tsh.onError(err)\n\treturn sh.trim(out)\n}",
"func (n *NagiosOutputStruct) NagiosOutput() string {\n\t// \"OK - db: 23 424mb | \",\n\treturn fmt.Sprintf(\"%s %s - %s | %s \\n %s\",\n\t\tn.ServiceName, NagiosStatus(n.Status), n.Serviceoutput, n.Serviceperfdata, n.Longserviceoutput,\n\t)\n}",
"func cmdOutput(command string, args ...string) string {\n\treturn cmdOutputDir(\".\", command, args...)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewPostHyperflexUcsmConfigPoliciesCreated creates a PostHyperflexUcsmConfigPoliciesCreated with default headers values
|
func NewPostHyperflexUcsmConfigPoliciesCreated() *PostHyperflexUcsmConfigPoliciesCreated {
return &PostHyperflexUcsmConfigPoliciesCreated{}
}
|
[
"func NewPostHyperflexUcsmConfigPoliciesDefault(code int) *PostHyperflexUcsmConfigPoliciesDefault {\n\treturn &PostHyperflexUcsmConfigPoliciesDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func NewPostHyperflexClusterNetworkPoliciesCreated() *PostHyperflexClusterNetworkPoliciesCreated {\n\treturn &PostHyperflexClusterNetworkPoliciesCreated{}\n}",
"func (a *Client) PostHyperflexExtIscsiStoragePolicies(params *PostHyperflexExtIscsiStoragePoliciesParams) (*PostHyperflexExtIscsiStoragePoliciesCreated, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostHyperflexExtIscsiStoragePoliciesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PostHyperflexExtIscsiStoragePolicies\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/hyperflex/ExtIscsiStoragePolicies\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &PostHyperflexExtIscsiStoragePoliciesReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*PostHyperflexExtIscsiStoragePoliciesCreated)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*PostHyperflexExtIscsiStoragePoliciesDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}",
"func (a *Client) PostSnmpPolicies(params *PostSnmpPoliciesParams) (*PostSnmpPoliciesCreated, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostSnmpPoliciesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PostSnmpPolicies\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/snmp/Policies\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &PostSnmpPoliciesReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*PostSnmpPoliciesCreated)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*PostSnmpPoliciesDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}",
"func NewPostHyperflexClusterNetworkPoliciesDefault(code int) *PostHyperflexClusterNetworkPoliciesDefault {\n\treturn &PostHyperflexClusterNetworkPoliciesDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func (a *Client) PostVnicEthQosPolicies(params *PostVnicEthQosPoliciesParams) (*PostVnicEthQosPoliciesCreated, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostVnicEthQosPoliciesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PostVnicEthQosPolicies\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/vnic/EthQosPolicies\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &PostVnicEthQosPoliciesReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*PostVnicEthQosPoliciesCreated)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*PostVnicEthQosPoliciesDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}",
"func AddDefaultPolicies(config *state.RepoConfig) {\n\tissueRefPath := plumbing.MakeIssueReferencePath()\n\tmergeReqRefPath := plumbing.MakeMergeRequestReferencePath()\n\tconfig.Policies = append(\n\t\tconfig.Policies,\n\n\t\t// Everyone can create issues or merge request\n\t\t&state.Policy{Subject: \"all\", Object: issueRefPath, Action: PolicyActionWrite},\n\t\t&state.Policy{Subject: \"all\", Object: mergeReqRefPath, Action: PolicyActionWrite},\n\n\t\t// Contributors default branch policies\n\t\t&state.Policy{Subject: \"contrib\", Object: \"refs/heads\", Action: PolicyActionWrite}, // can create branches\n\t\t&state.Policy{Subject: \"contrib\", Object: \"refs/heads/master\", Action: PolicyActionDenyDelete}, // cannot delete master branch\n\t\t&state.Policy{Subject: \"contrib\", Object: \"refs/heads\", Action: PolicyActionDelete}, // can delete any branches\n\n\t\t// Contributor default tag policies\n\t\t&state.Policy{Subject: \"contrib\", Object: \"refs/tags\", Action: PolicyActionWrite}, // can create tags\n\t\t&state.Policy{Subject: \"contrib\", Object: \"refs/tags\", Action: PolicyActionDelete}, // can delete any tags\n\t\t&state.Policy{Subject: \"contrib\", Object: \"refs/notes\", Action: PolicyActionWrite}, // can create notes\n\t\t&state.Policy{Subject: \"contrib\", Object: \"refs/notes\", Action: PolicyActionDelete}, // can delete any notes\n\n\t\t// Contributor default issue policies\n\t\t&state.Policy{Subject: \"contrib\", Object: issueRefPath, Action: PolicyActionDelete}, // can delete issues\n\t\t&state.Policy{Subject: \"contrib\", Object: issueRefPath, Action: PolicyActionUpdate}, // can update issue admin fields.\n\n\t\t// Creator default issue policies\n\t\t&state.Policy{Subject: \"creator\", Object: issueRefPath, Action: PolicyActionDelete}, // can delete own issue\n\t\t&state.Policy{Subject: \"creator\", Object: issueRefPath, Action: PolicyActionUpdate}, // can update own issue admin fields\n\n\t\t// Creator default merge request policies\n\t\t&state.Policy{Subject: \"creator\", Object: mergeReqRefPath, Action: PolicyActionDelete}, // can delete merge request\n\t\t&state.Policy{Subject: \"creator\", Object: mergeReqRefPath, Action: PolicyActionUpdate}, // can update own merge request admin fields\n\n\t\t// Contributor default merge request policies\n\t\t&state.Policy{Subject: \"contrib\", Object: mergeReqRefPath, Action: PolicyActionUpdate}, // can update any merge requests\n\t\t&state.Policy{Subject: \"contrib\", Object: mergeReqRefPath, Action: PolicyActionDelete}, // can delete any merge requests\n\t)\n}",
"func CreateDescribeBackupPoliciesRequest() (request *DescribeBackupPoliciesRequest) {\n\trequest = &DescribeBackupPoliciesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Sas\", \"2018-12-03\", \"DescribeBackupPolicies\", \"sas\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func NewHyperflexVcenterConfigPolicyAllOf(classId string, objectType string) *HyperflexVcenterConfigPolicyAllOf {\n\tthis := HyperflexVcenterConfigPolicyAllOf{}\n\tthis.ClassId = classId\n\tthis.ObjectType = objectType\n\treturn &this\n}",
"func newMigrationPolicies(c *MigrationsV1alpha1Client) *migrationPolicies {\n\treturn &migrationPolicies{\n\t\tclient: c.RESTClient(),\n\t}\n}",
"func (b *netPol) Create() ([]*netv1.NetworkPolicy, error) { // nolint:golint,unparam\n\tif !b.settings.NetworkPoliciesEnabled {\n\t\treturn []*netv1.NetworkPolicy{}, nil\n\t}\n\n\tconst ingressLabelName = \"app.kubernetes.io/name\"\n\tconst ingressLabelValue = \"ingress-nginx\"\n\n\tresult := []*netv1.NetworkPolicy{\n\t\t{\n\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: akashDeploymentPolicyName,\n\t\t\t\tLabels: b.labels(),\n\t\t\t\tNamespace: LidNS(b.lid),\n\t\t\t},\n\t\t\tSpec: netv1.NetworkPolicySpec{\n\t\t\t\tPodSelector: metav1.LabelSelector{},\n\t\t\t\tPolicyTypes: []netv1.PolicyType{\n\t\t\t\t\tnetv1.PolicyTypeIngress,\n\t\t\t\t\tnetv1.PolicyTypeEgress,\n\t\t\t\t},\n\t\t\t\tIngress: []netv1.NetworkPolicyIngressRule{\n\t\t\t\t\t{ // Allow Network Connections from same Namespace\n\t\t\t\t\t\tFrom: []netv1.NetworkPolicyPeer{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tNamespaceSelector: &metav1.LabelSelector{\n\t\t\t\t\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\t\t\t\takashNetworkNamespace: LidNS(b.lid),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{ // Allow Network Connections from NGINX ingress controller\n\t\t\t\t\t\tFrom: []netv1.NetworkPolicyPeer{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tNamespaceSelector: &metav1.LabelSelector{\n\t\t\t\t\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\t\t\t\tingressLabelName: ingressLabelValue,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tPodSelector: &metav1.LabelSelector{\n\t\t\t\t\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\t\t\t\tingressLabelName: ingressLabelValue,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tEgress: []netv1.NetworkPolicyEgressRule{\n\t\t\t\t\t{ // Allow Network Connections to same Namespace\n\t\t\t\t\t\tTo: []netv1.NetworkPolicyPeer{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tNamespaceSelector: &metav1.LabelSelector{\n\t\t\t\t\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\t\t\t\takashNetworkNamespace: LidNS(b.lid),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{ // Allow DNS to internal server\n\t\t\t\t\t\tPorts: []netv1.NetworkPolicyPort{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tProtocol: &udpProtocol,\n\t\t\t\t\t\t\t\tPort: &dnsPort,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tProtocol: &tcpProtocol,\n\t\t\t\t\t\t\t\tPort: &dnsPort,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTo: []netv1.NetworkPolicyPeer{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tPodSelector: &metav1.LabelSelector{\n\t\t\t\t\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\t\t\t\t\"kubernetes.io/metadata.name\": \"kube-system\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tNamespaceSelector: &metav1.LabelSelector{\n\t\t\t\t\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\t\t\t\t\"k8s-app\": \"kube-dns\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{ // Allow access to IPV4 Public addresses only\n\t\t\t\t\t\tTo: []netv1.NetworkPolicyPeer{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tPodSelector: nil,\n\t\t\t\t\t\t\t\tNamespaceSelector: nil,\n\t\t\t\t\t\t\t\tIPBlock: &netv1.IPBlock{\n\t\t\t\t\t\t\t\t\tCIDR: \"0.0.0.0/0\",\n\t\t\t\t\t\t\t\t\tExcept: []string{\n\t\t\t\t\t\t\t\t\t\t\"10.0.0.0/8\",\n\t\t\t\t\t\t\t\t\t\t\"192.168.0.0/16\",\n\t\t\t\t\t\t\t\t\t\t\"172.16.0.0/12\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, service := range b.group.Services {\n\t\t// find all the ports that are exposed directly\n\t\tports := make([]netv1.NetworkPolicyPort, 0)\n\t\tfor _, expose := range service.Expose {\n\t\t\tif !expose.Global || util.ShouldBeIngress(expose) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tportToOpen := util.ExposeExternalPort(expose)\n\t\t\tportAsIntStr := intstr.FromInt(int(portToOpen))\n\n\t\t\tvar exposeProto corev1.Protocol\n\t\t\tswitch expose.Proto {\n\t\t\tcase manitypes.TCP:\n\t\t\t\texposeProto = corev1.ProtocolTCP\n\t\t\tcase manitypes.UDP:\n\t\t\t\texposeProto = corev1.ProtocolUDP\n\n\t\t\t}\n\t\t\tentry := netv1.NetworkPolicyPort{\n\t\t\t\tPort: &portAsIntStr,\n\t\t\t\tProtocol: &exposeProto,\n\t\t\t}\n\t\t\tports = append(ports, entry)\n\t\t}\n\n\t\t// If no ports are found, skip this service\n\t\tif len(ports) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Make a network policy just to open these ports to incoming traffic\n\t\tserviceName := service.Name\n\t\tpolicyName := fmt.Sprintf(\"akash-%s-np\", serviceName)\n\t\tpolicy := netv1.NetworkPolicy{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tLabels: b.labels(),\n\t\t\t\tName: policyName,\n\t\t\t\tNamespace: LidNS(b.lid),\n\t\t\t},\n\t\t\tSpec: netv1.NetworkPolicySpec{\n\n\t\t\t\tIngress: []netv1.NetworkPolicyIngressRule{\n\t\t\t\t\t{ // Allow Network Connections to same Namespace\n\t\t\t\t\t\tPorts: ports,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tPodSelector: metav1.LabelSelector{\n\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\tAkashManifestServiceLabelName: serviceName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tPolicyTypes: []netv1.PolicyType{\n\t\t\t\t\tnetv1.PolicyTypeIngress,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tresult = append(result, &policy)\n\t}\n\n\treturn result, nil\n}",
"func NewWindowsUpdatesUpdatePoliciesItemAudienceMicrosoftGraphWindowsUpdatesUpdateAudienceUpdateAudiencePostRequestBody()(*WindowsUpdatesUpdatePoliciesItemAudienceMicrosoftGraphWindowsUpdatesUpdateAudienceUpdateAudiencePostRequestBody) {\n m := &WindowsUpdatesUpdatePoliciesItemAudienceMicrosoftGraphWindowsUpdatesUpdateAudienceUpdateAudiencePostRequestBody{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}",
"func (a *Client) PostHyperflexExtIscsiStoragePoliciesMoid(params *PostHyperflexExtIscsiStoragePoliciesMoidParams) (*PostHyperflexExtIscsiStoragePoliciesMoidCreated, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostHyperflexExtIscsiStoragePoliciesMoidParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PostHyperflexExtIscsiStoragePoliciesMoid\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/hyperflex/ExtIscsiStoragePolicies/{moid}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &PostHyperflexExtIscsiStoragePoliciesMoidReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*PostHyperflexExtIscsiStoragePoliciesMoidCreated)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*PostHyperflexExtIscsiStoragePoliciesMoidDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}",
"func newPolicies(config *viper.Viper) (PoliciesConnecter, error) {\n\tif config == nil {\n\t\treturn nil, errors.New(\"Must provide config to mf2c.newPolicies()\")\n\t}\n\n\tif config.GetString(isLeaderProp) != \"\" {\n\t\treturn NewPoliciesMock(config.GetBool(isLeaderProp)), nil\n\t}\n\treturn NewPolicies(config.GetString(policiesURLProp))\n}",
"func newVaultPolicies(c *VaultPolicyV1Client) *vaultpolicies {\n\treturn &vaultpolicies{\n\t\tclient: c.RESTClient(),\n\t}\n}",
"func CreateListSystemSecurityPoliciesRequest() (request *ListSystemSecurityPoliciesRequest) {\n\trequest = &ListSystemSecurityPoliciesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Alb\", \"2020-06-16\", \"ListSystemSecurityPolicies\", \"alb\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func NewPostRoutingpoliciesCreated() *PostRoutingpoliciesCreated {\n\treturn &PostRoutingpoliciesCreated{}\n}",
"func (a *Client) PostReplicationPolicies(params *PostReplicationPoliciesParams, authInfo runtime.ClientAuthInfoWriter) (*PostReplicationPoliciesCreated, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostReplicationPoliciesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PostReplicationPolicies\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/replication/policies\",\n\t\tProducesMediaTypes: []string{\"application/json\", \"text/plain\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &PostReplicationPoliciesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*PostReplicationPoliciesCreated), nil\n\n}",
"func NewGetHyperflexNodeConfigPoliciesMoidDefault(code int) *GetHyperflexNodeConfigPoliciesMoidDefault {\n\treturn &GetHyperflexNodeConfigPoliciesMoidDefault{\n\t\t_statusCode: code,\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewPostHyperflexUcsmConfigPoliciesDefault creates a PostHyperflexUcsmConfigPoliciesDefault with default headers values
|
func NewPostHyperflexUcsmConfigPoliciesDefault(code int) *PostHyperflexUcsmConfigPoliciesDefault {
return &PostHyperflexUcsmConfigPoliciesDefault{
_statusCode: code,
}
}
|
[
"func NewPostHyperflexClusterNetworkPoliciesDefault(code int) *PostHyperflexClusterNetworkPoliciesDefault {\n\treturn &PostHyperflexClusterNetworkPoliciesDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func NewPostHyperflexUcsmConfigPoliciesCreated() *PostHyperflexUcsmConfigPoliciesCreated {\n\treturn &PostHyperflexUcsmConfigPoliciesCreated{}\n}",
"func NewGetHyperflexNodeConfigPoliciesMoidDefault(code int) *GetHyperflexNodeConfigPoliciesMoidDefault {\n\treturn &GetHyperflexNodeConfigPoliciesMoidDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func AddDefaultPolicies(config *state.RepoConfig) {\n\tissueRefPath := plumbing.MakeIssueReferencePath()\n\tmergeReqRefPath := plumbing.MakeMergeRequestReferencePath()\n\tconfig.Policies = append(\n\t\tconfig.Policies,\n\n\t\t// Everyone can create issues or merge request\n\t\t&state.Policy{Subject: \"all\", Object: issueRefPath, Action: PolicyActionWrite},\n\t\t&state.Policy{Subject: \"all\", Object: mergeReqRefPath, Action: PolicyActionWrite},\n\n\t\t// Contributors default branch policies\n\t\t&state.Policy{Subject: \"contrib\", Object: \"refs/heads\", Action: PolicyActionWrite}, // can create branches\n\t\t&state.Policy{Subject: \"contrib\", Object: \"refs/heads/master\", Action: PolicyActionDenyDelete}, // cannot delete master branch\n\t\t&state.Policy{Subject: \"contrib\", Object: \"refs/heads\", Action: PolicyActionDelete}, // can delete any branches\n\n\t\t// Contributor default tag policies\n\t\t&state.Policy{Subject: \"contrib\", Object: \"refs/tags\", Action: PolicyActionWrite}, // can create tags\n\t\t&state.Policy{Subject: \"contrib\", Object: \"refs/tags\", Action: PolicyActionDelete}, // can delete any tags\n\t\t&state.Policy{Subject: \"contrib\", Object: \"refs/notes\", Action: PolicyActionWrite}, // can create notes\n\t\t&state.Policy{Subject: \"contrib\", Object: \"refs/notes\", Action: PolicyActionDelete}, // can delete any notes\n\n\t\t// Contributor default issue policies\n\t\t&state.Policy{Subject: \"contrib\", Object: issueRefPath, Action: PolicyActionDelete}, // can delete issues\n\t\t&state.Policy{Subject: \"contrib\", Object: issueRefPath, Action: PolicyActionUpdate}, // can update issue admin fields.\n\n\t\t// Creator default issue policies\n\t\t&state.Policy{Subject: \"creator\", Object: issueRefPath, Action: PolicyActionDelete}, // can delete own issue\n\t\t&state.Policy{Subject: \"creator\", Object: issueRefPath, Action: PolicyActionUpdate}, // can update own issue admin fields\n\n\t\t// Creator default merge request policies\n\t\t&state.Policy{Subject: \"creator\", Object: mergeReqRefPath, Action: PolicyActionDelete}, // can delete merge request\n\t\t&state.Policy{Subject: \"creator\", Object: mergeReqRefPath, Action: PolicyActionUpdate}, // can update own merge request admin fields\n\n\t\t// Contributor default merge request policies\n\t\t&state.Policy{Subject: \"contrib\", Object: mergeReqRefPath, Action: PolicyActionUpdate}, // can update any merge requests\n\t\t&state.Policy{Subject: \"contrib\", Object: mergeReqRefPath, Action: PolicyActionDelete}, // can delete any merge requests\n\t)\n}",
"func NewDeleteHyperflexProxySettingPoliciesMoidDefault(code int) *DeleteHyperflexProxySettingPoliciesMoidDefault {\n\treturn &DeleteHyperflexProxySettingPoliciesMoidDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func NewGetHyperflexSoftwareVersionPoliciesMoidDefault(code int) *GetHyperflexSoftwareVersionPoliciesMoidDefault {\n\treturn &GetHyperflexSoftwareVersionPoliciesMoidDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func DefaultPostSignerConfig() SignerConfig {\n\treturn SignerConfig{\n\t\tAlgorithms: []httpsig.Algorithm{httpsig.ED25519},\n\t\tDigestAlgorithm: httpsig.DigestSha256,\n\t\tHeaders: []string{\"(request-target)\", \"Date\", \"Digest\"},\n\t}\n}",
"func (a *Client) PostSnmpPolicies(params *PostSnmpPoliciesParams) (*PostSnmpPoliciesCreated, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostSnmpPoliciesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PostSnmpPolicies\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/snmp/Policies\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &PostSnmpPoliciesReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*PostSnmpPoliciesCreated)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*PostSnmpPoliciesDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}",
"func NewPatchKvmPoliciesMoidDefault(code int) *PatchKvmPoliciesMoidDefault {\n\treturn &PatchKvmPoliciesMoidDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func NewHyperflexVcenterConfigPolicyAllOfWithDefaults() *HyperflexVcenterConfigPolicyAllOf {\n\tthis := HyperflexVcenterConfigPolicyAllOf{}\n\tvar classId string = \"hyperflex.VcenterConfigPolicy\"\n\tthis.ClassId = classId\n\tvar objectType string = \"hyperflex.VcenterConfigPolicy\"\n\tthis.ObjectType = objectType\n\treturn &this\n}",
"func NewPostHyperflexClusterNetworkPoliciesCreated() *PostHyperflexClusterNetworkPoliciesCreated {\n\treturn &PostHyperflexClusterNetworkPoliciesCreated{}\n}",
"func MakeDefaultConfig(log *logging.MasterLogger, sk *cipher.SecKey, usrEnv bool, pkgEnv bool, testEnv bool, dmsgHTTP bool, hypervisor bool, confPath, hypervisorPKs string, services *Services) (*V1, error) {\n\tif usrEnv && pkgEnv {\n\t\tlog.Fatal(\"usrEnv and pkgEnv are mutually exclusive\")\n\t}\n\tcc, err := NewCommon(log, confPath, sk)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar dmsgHTTPServersList *DmsgHTTPServers\n\n\tdnsServer := utilenv.DNSServer\n\tif services != nil {\n\t\tif services.DNSServer != \"\" {\n\t\t\tdnsServer = services.DNSServer\n\t\t}\n\t}\n\n\tif dmsgHTTP {\n\t\tdmsgHTTPPath := DMSGHTTPName\n\t\tif pkgEnv {\n\t\t\tdmsgHTTPPath = SkywirePath + \"/\" + DMSGHTTPName\n\t\t}\n\t\tserversListJSON, err := os.ReadFile(filepath.Clean(dmsgHTTPPath))\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Fatal(\"Failed to read dmsghttp-config.json file.\")\n\t\t}\n\t\terr = json.Unmarshal(serversListJSON, &dmsgHTTPServersList)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Fatal(\"Error during parsing servers list\")\n\t\t}\n\t}\n\t// Actual config generation.\n\tconf := MakeBaseConfig(cc, testEnv, dmsgHTTP, services, dmsgHTTPServersList)\n\n\tconf.Launcher.Apps = makeDefaultLauncherAppsConfig(dnsServer)\n\n\tconf.Hypervisors = make([]cipher.PubKey, 0)\n\n\t// Manipulate Hypervisor PKs\n\tif hypervisorPKs != \"\" {\n\t\tkeys := strings.Split(hypervisorPKs, \",\")\n\t\tfor _, key := range keys {\n\t\t\tif key != \"\" {\n\t\t\t\tkeyParsed, err := coinCipher.PubKeyFromHex(strings.TrimSpace(key))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.WithError(err).Fatalf(\"Failed to parse hypervisor public key: %s.\", key)\n\t\t\t\t}\n\t\t\t\tconf.Hypervisors = append(conf.Hypervisors, cipher.PubKey(keyParsed))\n\n\t\t\t\t// Compare key value and visor PK, if same, then this visor should be hypervisor\n\t\t\t\tif key == conf.PK.Hex() {\n\t\t\t\t\thypervisor = true\n\t\t\t\t\tconf.Hypervisors = []cipher.PubKey{}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif hypervisor {\n\t\tconfig := GenerateWorkDirConfig(false)\n\t\tconf.Hypervisor = &config\n\t}\n\tif pkgEnv {\n\t\tpkgConfig := PackageConfig()\n\t\tconf.LocalPath = pkgConfig.LocalPath\n\t\tconf.DmsgHTTPServerPath = pkgConfig.LocalPath + \"/\" + Custom\n\t\tconf.Launcher.BinPath = pkgConfig.LauncherBinPath\n\t\tconf.Transport.LogStore.Location = pkgConfig.LocalPath + \"/\" + TpLogStore\n\t\tif conf.Hypervisor != nil {\n\t\t\tconf.Hypervisor.EnableAuth = pkgConfig.Hypervisor.EnableAuth\n\t\t\tconf.Hypervisor.DBPath = pkgConfig.Hypervisor.DbPath\n\t\t}\n\t}\n\tif usrEnv {\n\t\tusrConfig := UserConfig()\n\t\tconf.LocalPath = usrConfig.LocalPath\n\t\tconf.DmsgHTTPServerPath = usrConfig.LocalPath + \"/\" + Custom\n\t\tconf.Launcher.BinPath = usrConfig.LauncherBinPath\n\t\tconf.Transport.LogStore.Location = usrConfig.LocalPath + \"/\" + TpLogStore\n\t\tif conf.Hypervisor != nil {\n\t\t\tconf.Hypervisor.EnableAuth = usrConfig.Hypervisor.EnableAuth\n\t\t\tconf.Hypervisor.DBPath = usrConfig.Hypervisor.DbPath\n\t\t}\n\t}\n\treturn conf, nil\n}",
"func (a *Client) PostHyperflexExtIscsiStoragePolicies(params *PostHyperflexExtIscsiStoragePoliciesParams) (*PostHyperflexExtIscsiStoragePoliciesCreated, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostHyperflexExtIscsiStoragePoliciesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PostHyperflexExtIscsiStoragePolicies\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/hyperflex/ExtIscsiStoragePolicies\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &PostHyperflexExtIscsiStoragePoliciesReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*PostHyperflexExtIscsiStoragePoliciesCreated)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*PostHyperflexExtIscsiStoragePoliciesDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}",
"func NewUpdateCSPMPolicySettingsDefault(code int) *UpdateCSPMPolicySettingsDefault {\n\treturn &UpdateCSPMPolicySettingsDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func NewGetHyperflexLocalCredentialPoliciesDefault(code int) *GetHyperflexLocalCredentialPoliciesDefault {\n\treturn &GetHyperflexLocalCredentialPoliciesDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func (a *Client) PostVnicEthQosPolicies(params *PostVnicEthQosPoliciesParams) (*PostVnicEthQosPoliciesCreated, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostVnicEthQosPoliciesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PostVnicEthQosPolicies\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/vnic/EthQosPolicies\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &PostVnicEthQosPoliciesReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*PostVnicEthQosPoliciesCreated)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*PostVnicEthQosPoliciesDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}",
"func NewPatchVnicFcQosPoliciesMoidDefault(code int) *PatchVnicFcQosPoliciesMoidDefault {\n\treturn &PatchVnicFcQosPoliciesMoidDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func NewDeleteSMTPPoliciesMoidDefault(code int) *DeleteSMTPPoliciesMoidDefault {\n\treturn &DeleteSMTPPoliciesMoidDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func NewHyperflexVcenterConfigPolicyAllOf(classId string, objectType string) *HyperflexVcenterConfigPolicyAllOf {\n\tthis := HyperflexVcenterConfigPolicyAllOf{}\n\tthis.ClassId = classId\n\tthis.ObjectType = objectType\n\treturn &this\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Code gets the status code for the post hyperflex ucsm config policies default response
|
func (o *PostHyperflexUcsmConfigPoliciesDefault) Code() int {
return o._statusCode
}
|
[
"func (o *PostHyperflexClusterNetworkPoliciesDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *PostAppsAppIDConfigurationsDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *PostPartSupportConfigurationDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *GetHyperflexNodeConfigPoliciesMoidDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *PostSecurityUnitsMoidDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *DeleteHyperflexProxySettingPoliciesMoidDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *UpdateCSPMPolicySettingsDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *MultiAdminVerifyConfigModifyDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *PostTamSecurityAdvisoriesDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *ConsentsPostConsentsDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *PostIamLdapProvidersDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *ExportPolicyCreateDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *GetRTResponsePoliciesDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *PostClustersUUIDCertificatesCaCertsDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *GetHyperflexSoftwareVersionPoliciesMoidDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *UpdateHTTPSettingsDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *PostPortSubGroupsMoidDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *PostIPIPDelegationDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *SnapmirrorPolicyModifyDefault) Code() int {\n\treturn o._statusCode\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Validate validates this party product event party relationship
|
func (m *PartyProductEventPartyRelationship) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateData(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
|
[
"func (m *PartyProductEventPartyRelationshipDataItems0) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *Product) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for Id\n\n\tif err := m._validateUuid(m.GetEditorID()); err != nil {\n\t\treturn ProductValidationError{\n\t\t\tfield: \"EditorID\",\n\t\t\treason: \"value must be a valid UUID\",\n\t\t\tcause: err,\n\t\t}\n\t}\n\n\tif len(m.GetName()) > 200 {\n\t\treturn ProductValidationError{\n\t\t\tfield: \"Name\",\n\t\t\treason: \"value length must be at most 200 bytes\",\n\t\t}\n\t}\n\n\tfor idx, item := range m.GetMetrics() {\n\t\t_, _ = idx, item\n\n\t\tif len(item) > 200 {\n\t\t\treturn ProductValidationError{\n\t\t\t\tfield: fmt.Sprintf(\"Metrics[%v]\", idx),\n\t\t\t\treason: \"value length must be at most 200 bytes\",\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(m.GetGenearlInformation()) > 1000 {\n\t\treturn ProductValidationError{\n\t\t\tfield: \"GenearlInformation\",\n\t\t\treason: \"value length must be at most 1000 bytes\",\n\t\t}\n\t}\n\n\tif len(m.GetContracttTips()) > 200 {\n\t\treturn ProductValidationError{\n\t\t\tfield: \"ContracttTips\",\n\t\t\treason: \"value length must be at most 200 bytes\",\n\t\t}\n\t}\n\n\tif _, ok := _Product_LocationType_InLookup[m.GetLocationType()]; !ok {\n\t\treturn ProductValidationError{\n\t\t\tfield: \"LocationType\",\n\t\t\treason: \"value must be in list [NONE SAAS On Premise]\",\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetOpenSource()).(interface {\n\t\tValidate() error\n\t}); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn ProductValidationError{\n\t\t\t\tfield: \"OpenSource\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif _, ok := _Product_Licensing_InLookup[m.GetLicensing()]; !ok {\n\t\treturn ProductValidationError{\n\t\t\tfield: \"Licensing\",\n\t\t\treason: \"value must be in list [NONE CLOSEDSOURCE OPENSOURCE]\",\n\t\t}\n\t}\n\n\tfor idx, item := range m.GetVersion() {\n\t\t_, _ = idx, item\n\n\t\tif v, ok := interface{}(item).(interface {\n\t\t\tValidate() error\n\t\t}); ok {\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\treturn ProductValidationError{\n\t\t\t\t\tfield: fmt.Sprintf(\"Version[%v]\", idx),\n\t\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\t\tcause: err,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif _, ok := _Product_Recommendation_InLookup[m.GetRecommendation()]; !ok {\n\t\treturn ProductValidationError{\n\t\t\tfield: \"Recommendation\",\n\t\t\treason: \"value must be in list [NONE AUTHORIZED BLACKLISTED RECOMMENDED]\",\n\t\t}\n\t}\n\n\tfor idx, item := range m.GetSupportVendors() {\n\t\t_, _ = idx, item\n\n\t\tif len(item) > 200 {\n\t\t\treturn ProductValidationError{\n\t\t\t\tfield: fmt.Sprintf(\"SupportVendors[%v]\", idx),\n\t\t\t\treason: \"value length must be at most 200 bytes\",\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif v, ok := interface{}(m.GetCreatedOn()).(interface {\n\t\tValidate() error\n\t}); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn ProductValidationError{\n\t\t\t\tfield: \"CreatedOn\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetUpdatedOn()).(interface {\n\t\tValidate() error\n\t}); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn ProductValidationError{\n\t\t\t\tfield: \"UpdatedOn\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(m.GetSwidtagProduct()) > 200 {\n\t\treturn ProductValidationError{\n\t\t\tfield: \"SwidtagProduct\",\n\t\t\treason: \"value length must be at most 200 bytes\",\n\t\t}\n\t}\n\n\t// no validation rules for EditorName\n\n\treturn nil\n}",
"func (p *product) Validate() (err error) {\n\treturn\n}",
"func (e OrderSide) Validate() error {\n\tswitch e {\n\tcase OrderSideBuy:\n\tcase OrderSideSell:\n\tdefault:\n\t\treturn fmt.Errorf(\"OrderSide, %q, is not known\", e)\n\t}\n\treturn nil\n}",
"func (m *LolLobbyJoinPartyAnalytics) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *PaymentAttributesBeneficiaryParty) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func RelationshipValidator(model coal.Model, models []coal.Model, exclude ...string) *Callback {\n\t// build index\n\tindex := make(map[string]coal.Model, len(models))\n\tfor _, model := range models {\n\t\tindex[coal.GetMeta(model).PluralName] = model\n\t}\n\n\t// prepare lists\n\tresources := make(map[coal.Model]string)\n\treferences := make(map[string]coal.Model)\n\n\t// iterate through all fields\n\tfor _, field := range coal.GetMeta(model).Relationships {\n\t\t// continue if relationship is excluded\n\t\tif stick.Contains(exclude, field.Name) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// handle has-one and has-many relationships\n\t\tif field.HasOne || field.HasMany {\n\t\t\t// get related model\n\t\t\trelatedModel := index[field.RelType]\n\t\t\tif relatedModel == nil {\n\t\t\t\tpanic(fmt.Sprintf(`fire: missing model: \"%s\"`, field.RelType))\n\t\t\t}\n\n\t\t\t// get related field\n\t\t\trelatedField := \"\"\n\t\t\tfor _, relationship := range coal.GetMeta(relatedModel).Relationships {\n\t\t\t\tif relationship.RelName == field.RelInverse {\n\t\t\t\t\trelatedField = relationship.Name\n\t\t\t\t}\n\t\t\t}\n\t\t\tif relatedField == \"\" {\n\t\t\t\tpanic(fmt.Sprintf(`fire: missing field for inverse relationship: \"%s\"`, field.RelInverse))\n\t\t\t}\n\n\t\t\t// add resource\n\t\t\tresources[relatedModel] = relatedField\n\t\t}\n\n\t\t// handle to-one and to-many relationships\n\t\tif field.ToOne || field.ToMany {\n\t\t\t// get related model\n\t\t\trelatedModel := index[field.RelType]\n\t\t\tif relatedModel == nil {\n\t\t\t\tpanic(fmt.Sprintf(`fire: missing model in catalog: \"%s\"`, field.RelType))\n\t\t\t}\n\n\t\t\t// add reference\n\t\t\treferences[field.Name] = relatedModel\n\t\t}\n\t}\n\n\t// create callbacks\n\tdrv := DependentResourcesValidator(resources)\n\trrv := ReferencedResourcesValidator(references)\n\n\t// combine callbacks\n\tcb := Combine(\"fire/RelationshipValidator\", Validator, drv, rrv)\n\n\treturn cb\n}",
"func (m *PaymentAttributesDebtorParty) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (e Enrollment) Validate() error {\n\terrs := validation.Errors{\n\t\t\"adminContact\": validation.Validate(e.AdminContact, validation.Required),\n\t\t\"certificateType\": validation.Validate(e.CertificateType, validation.Required),\n\t\t\"csr\": validation.Validate(e.CSR, validation.Required),\n\t\t\"networkConfiguration\": validation.Validate(e.NetworkConfiguration, validation.Required),\n\t\t\"org\": validation.Validate(e.Org, validation.Required),\n\t\t\"ra\": validation.Validate(e.RA, validation.Required),\n\t\t\"techContact\": validation.Validate(e.TechContact, validation.Required),\n\t\t\"validationType\": validation.Validate(e.ValidationType, validation.Required),\n\t\t\"thirdParty\": validation.Validate(e.ThirdParty),\n\t}\n\n\tif e.CSR != nil {\n\t\terrs[\"csr.preferredTrustChain\"] = validation.Validate(e.CSR.PreferredTrustChain,\n\t\t\tvalidation.When(e.ValidationType != \"dv\", validation.Empty.Error(\"must be blank when 'validationType' is not 'dv'\")))\n\t}\n\n\treturn errs.Filter()\n}",
"func (m *EventstoreEvent) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateOriginator(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *SepaSctAssociationRelationshipsSponsor) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateData(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func ValidateProductView(result *ProductView) (err error) {\n\n\treturn\n}",
"func validatePublishEvent(publishBuilder *PublishBuilder, strictValidation bool) error {\n\tpublishEvent := struct {\n\t\tTopic []string `valid:\"required\"`\n\t\tEventName string `valid:\"alphanum,stringlength(1|256),required\"`\n\t\tNamespace string `valid:\"alphanum,stringlength(1|256),required\"`\n\t\tClientID string\n\t\tUserID string\n\t\tSessionID string\n\t\tTraceID string\n\t}{\n\t\tTopic: publishBuilder.topic,\n\t\tEventName: publishBuilder.eventName,\n\t\tNamespace: publishBuilder.namespace,\n\t\tClientID: publishBuilder.clientID,\n\t\tTraceID: publishBuilder.traceID,\n\t\tUserID: publishBuilder.userID,\n\t\tSessionID: publishBuilder.sessionID,\n\t}\n\n\tvalid, err := validator.ValidateStruct(publishEvent)\n\tif err != nil {\n\t\tlogrus.Errorf(\"unable to validate publish event. error : %v\", err)\n\t\treturn errInvalidPubStruct\n\t}\n\n\tif !valid {\n\t\treturn errInvalidPubStruct\n\t}\n\n\t// only additional validation that included to strictValidation\n\tif strictValidation {\n\t\tif publishEvent.UserID != \"\" && !validator.IsUUID4WithoutHyphens(publishEvent.UserID) {\n\t\t\treturn errInvalidUserID\n\t\t}\n\n\t\tif publishEvent.ClientID != \"\" && !validator.IsUUID4WithoutHyphens(publishEvent.ClientID) {\n\t\t\treturn errInvalidClientID\n\t\t}\n\n\t\tif publishEvent.SessionID != \"\" && !validator.IsUUID4WithoutHyphens(publishEvent.SessionID) {\n\t\t\treturn errInvalidSessionID\n\t\t}\n\n\t\tif publishEvent.TraceID != \"\" && !validator.IsUUID4WithoutHyphens(publishEvent.TraceID) {\n\t\t\treturn errInvalidTraceID\n\t\t}\n\t}\n\treturn nil\n}",
"func (m *ProductOption) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *ProvenanceLinkDTO) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (c *Client) validTwoPartyProposal(\n\tproposal ChannelProposal,\n\tourIdx int,\n\tpeerAddr wallet.Address,\n) error {\n\tbase := proposal.Base()\n\tif err := base.Valid(); err != nil {\n\t\treturn err\n\t}\n\n\tpeers := c.proposalPeers(proposal)\n\tif proposal.Base().NumPeers() != len(peers) {\n\t\treturn errors.Errorf(\"participants (%d) and peers (%d) dimension mismatch\",\n\t\t\tproposal.Base().NumPeers(), len(peers))\n\t}\n\tif len(peers) != 2 {\n\t\treturn errors.Errorf(\"expected 2 peers, got %d\", len(peers))\n\t}\n\n\tpeerIdx := ourIdx ^ 1\n\t// In the 2PCPP, the proposer is expected to have index 0\n\tif !peers[peerIdx].Equals(peerAddr) {\n\t\treturn errors.Errorf(\"remote peer doesn't have peer index %d\", peerIdx)\n\t}\n\n\t// In the 2PCPP, the receiver is expected to have index 1\n\tif !peers[ourIdx].Equals(c.address) {\n\t\treturn errors.Errorf(\"we don't have peer index %d\", ourIdx)\n\t}\n\n\tif proposal.Type() == wire.SubChannelProposal {\n\t\tif err := c.validSubChannelProposal(proposal.(*SubChannelProposal)); err != nil {\n\t\t\treturn errors.WithMessage(err, \"validate subchannel proposal\")\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (p *Public) Validate() error {\n\tif p.ID == \"\" {\n\t\treturn errors.New(\"party.Public: ID cannot be empty\")\n\t}\n\n\tif p.preKeygen() {\n\t\treturn nil\n\t}\n\n\t// nil checks\n\tif p.ECDSA == nil {\n\t\treturn errors.New(\"party.Public: ECDSA public share cannot be nil\")\n\t}\n\tif p.Paillier == nil {\n\t\treturn errors.New(\"party.Public: Paillier public key cannot be nil\")\n\t}\n\tif p.Pedersen == nil {\n\t\treturn errors.New(\"party.Public: Pedersen parameters cannot be nil\")\n\t}\n\n\t// ECDSA is not identity\n\tif p.ECDSA.IsIdentity() {\n\t\treturn errors.New(\"party.Public: ECDSA public key is identity\")\n\t}\n\n\t// Paillier check\n\tif err := p.Paillier.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"party.Public: %w\", err)\n\t}\n\n\t// Pedersen check\n\tif err := p.Pedersen.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"party.Public: %w\", err)\n\t}\n\n\t// Both N's are the same\n\tif p.Paillier.N.Cmp(p.Pedersen.N) != 0 {\n\t\treturn errors.New(\"party.Public: Pedersen and Paillier should share the same N\")\n\t}\n\n\treturn nil\n}",
"func (o *GetProductsGroupsProductGroupFidProductsOKBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// validation for a type composition with models.Envelope\n\tif err := o.Envelope.Validate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateData(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (p *Products) MiddlewareProductValidation(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tprod := &data.Product{}\n\t\terr := data.FromJSON(prod, r.Body)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Error reading product\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\terr = prod.Validate()\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"Error validationg product: %s\", err), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tctx := context.WithValue(r.Context(), KeyProduct{}, prod)\n\t\tr = r.WithContext(ctx)\n\n\t\tnext.ServeHTTP(w, r)\n\t})\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Validate validates this party product event party relationship data items0
|
func (m *PartyProductEventPartyRelationshipDataItems0) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateID(formats); err != nil {
res = append(res, err)
}
if err := m.validateType(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
|
[
"func (m *PartyProductEventPartyRelationship) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateData(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *ThinRelationshipDataItems0) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *DataItems0) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *RefundItemsItems0) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateItemType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *DataItems0) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// validation for a type composition with models.RoleParams\n\tif err := o.RoleParams.Validate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateDefault(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *RefundPaymentsItems0) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *AppcompsItems0) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *UpdateCartLineItemOKBodyLineItemsItems0PhysicalItemsItems0DiscountsItems0) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *WishlistsPostCreatedBodyDataItemsItems0) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *PostV1FunctionalitiesExternalResourcesItems0) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateRemoteID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *InventoryOKBodyItems0) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *UpdateCartLineItemParamsBodyCustomItemsItems0) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *GetAllOrderProductsOKBodyItems0) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateAppliedDiscounts(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateEventDate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateProductOptions(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateUpc(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *LifecycleEventsItems0) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *UpdateCartLineItemOKBodyLineItemsItems0PhysicalItemsItems0) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateCoupons(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateDiscounts(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateImageURL(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateOptions(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateProductID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateQuantity(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateURL(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateVariantID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateGiftWrapping(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *NotificationsItems0) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *GetPamAssetsOKBodyGetPamAssetsOKBodyAO1EmbeddedItemsItems0VariationFilesItems0Link) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateDownload(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateSelf(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetPamAssetsOKBodyGetPamAssetsOKBodyAO1EmbeddedItemsItems0) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateLinks(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateCode(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateReferenceFiles(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateVariationFiles(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *UpdateCartLineItemOKBodyLineItemsItems0DigitalItemsItems0) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateCoupons(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateDiscounts(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateImageURL(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateOptions(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateProductID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateQuantity(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateURL(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateVariantID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/ Table management Consider an ID u1/u2/u3/t1/t2/t3, in any order that preserves the relative ordering of u1,u2,u3 and t1,t2,t3. To compute the ciphertext for this ID, we compute the group element for each component, and multiply them together. The group element for each component depends both on the element itself (i.e., whether it is a URI component of time component, and its ordering relative to other components of the same type) and its position in the final ID. The index of the correct group element can be found with the following functions. URIComponentIndex computes the index of a certain component in the table. M is the maximum URI depth, N is the maximum expiry depth, C is the index of the URI component, and P is the position of the component in the ID. Given M and N, this function returns the index in the table of the group element for URI component C at position P in an ID. The formula is (N+1)C + (PC), which simplifies to NC + P
|
func URIComponentIndex(m int, n int, c int, p int) int {
return n*c + p
}
|
[
"func EncryptDecomposed(message *bn256.GT, params *hibe.Params, uriPath ID, timePath ID) *DecomposedCiphertext {\n\tif params.Pairing == nil {\n\t\tpanic(\"Pairing must be Precached before calling EncryptDecomposed()\")\n\t}\n\n\t// Randomly choose s in Zp\n\ts, err := rand.Int(rand.Reader, bn256.Order)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tciphertext := new(DecomposedCiphertext)\n\n\tciphertext.A = new(bn256.GT)\n\tciphertext.A.ScalarMult(params.Pairing, s)\n\tciphertext.A.Add(ciphertext.A, message)\n\n\tciphertext.B = new(bn256.G2).ScalarMult(params.G, s)\n\n\tciphertext.D = new(bn256.G1).ScalarMult(params.G3, s)\n\n\tm := len(uriPath)\n\tn := len(timePath)\n\thCacheSize := m + n\n\ttableSize := m + n + ((m * n) << 1)\n\n\turiHashed := uriPath.HashToZp()\n\ttimeHashed := timePath.HashToZp()\n\n\tciphertext.NumURIComponents = uint8(m)\n\tciphertext.NumTimeComponents = uint8(n)\n\n\tciphertext.Table = make([]*bn256.G1, tableSize, tableSize)\n\n\thCache := make([]*bn256.G1, m+n)\n\tfor i := 0; i != hCacheSize; i++ {\n\t\thCache[i] = new(bn256.G1).ScalarMult(params.H[i], s)\n\t}\n\n\tfor j, uriComponentHash := range uriHashed {\n\t\t// pos varies over all positions in the final ID at which this URI\n\t\t// component could be.\n\t\tfor pos := j; pos != j+n+1; pos++ {\n\t\t\tindex := URIComponentIndex(m, n, j, pos)\n\t\t\tciphertext.Table[index] = new(bn256.G1)\n\t\t\tciphertext.Table[index].ScalarMult(hCache[pos], uriComponentHash)\n\t\t}\n\t}\n\n\tfor j, timeComponentHash := range timeHashed {\n\t\t// pos varies over all positions in the final ID at which this time\n\t\t// component could be.\n\t\tfor pos := j; pos != j+m+1; pos++ {\n\t\t\tindex := TimeComponentIndex(m, n, j, pos)\n\t\t\tciphertext.Table[index] = new(bn256.G1)\n\t\t\tciphertext.Table[index].ScalarMult(hCache[pos], timeComponentHash)\n\t\t}\n\t}\n\n\treturn ciphertext\n}",
"func AssembleCiphertext(ciphertext *DecomposedCiphertext, id ID) *hibe.Ciphertext {\n\tthirdElement := new(bn256.G1).ScalarMult(ciphertext.D, big.NewInt(1))\n\n\tvar uriRelPos URIComponentPosition = 0\n\tvar timeRelPos TimeComponentPosition = 0\n\tfor pos, idComponent := range id {\n\t\tvar elem *bn256.G1\n\t\tif idComponent.Type() == URIComponentType {\n\t\t\telem = ciphertext.URIComponentElement(uriRelPos, pos)\n\t\t\turiRelPos++\n\t\t} else {\n\t\t\telem = ciphertext.TimeComponentElement(timeRelPos, pos)\n\t\t\ttimeRelPos++\n\t\t}\n\t\tthirdElement.Add(thirdElement, elem)\n\t}\n\n\treturn &hibe.Ciphertext{\n\t\tA: ciphertext.A,\n\t\tB: ciphertext.B,\n\t\tC: thirdElement,\n\t}\n}",
"func (dct *DecomposedCiphertext) TimeComponentElement(c TimeComponentPosition, p int) *bn256.G1 {\n\ti := TimeComponentIndex(int(dct.NumURIComponents), int(dct.NumTimeComponents), int(c), p)\n\treturn dct.Table[i]\n}",
"func (u *UF) Components() map[int]int {\n\tgrps := make(map[int]int) // component id :-> component size\n\tgid := 0 // group id\n\n\tfor i := 0; i < len(u.id); i++ {\n\t\tgid = u.Root(i)\n\t\tgrps[gid] = u.size[gid]\n\t}\n\treturn grps\n}",
"func indexBlockEnc(block int64, prefix int64, id int, blockId int) {\n\tvar i int64\n\n\tvar tagPos []int = make([]int, tPLength) // the current available space of each tag's list\n\tfor i = 0; i < tPLength; i++ {\n\t\ttagPos[i] = 0\n\t}\n\n\tvar cipherPos = 0 // the current available space of ciphertext list\n\tgamma, _ := rand.Int(rand.Reader, bn256.Order) // the nonce\n\tindex[id].PubKey[blockId] = new(bn256.G1).ScalarMult(g1s, gamma).Marshal() // calculate the public key of one block in one index item\n\n\tfor i = 0; i < tPLength; i++ { // initialize the tag list with 100 (one value out of range)\n\t\tfor j := 0; j < int(tPLength); j++ {\n\t\t\tindex[id].Tag[blockId][i][j] = 100\n\t\t}\n\t}\n\n\t// iStr includes the operator > or <\n\tfor i = 0; i < blockPossValue; i++ {\n\t\tif i == block { // do not encrypt the equal block\n\t\t\tcontinue\n\t\t} else if i < block { // the current variable is smaller than the current block\n\t\t\tiStr := strconv.FormatInt(i, 10) + \"<\" // add the inequality operator into the string to be hashed\n\t\t\texp := getHashedValue(iStr, prefix, blockId) // calculate the hash value in tag and ciphertext\n\t\t\t// calculate the tag\n\t\t\ttag, _ := strconv.Atoi(new(big.Int).Mod(exp, big.NewInt(int64(tPLength))).String()) // the tag\n\t\t\tindex[id].Tag[blockId][tag][tagPos[tag]] = uint8(cipherPos) // store the list number of the ciphertext in the current available space of corresponding tag\n\t\t\ttagPos[tag]++\n\n\t\t\t// generate the ciphertext\n\t\t\tt := new(bn256.G1).ScalarBaseMult(exp)\n\t\t\tindex[id].Ciphertext[blockId][cipherPos] = new(bn256.G1).ScalarMult(t, gamma).Marshal()\n\t\t\tcipherPos++\n\t\t} else if i > block { // the current variable is larger than the current block (the process procedure is similar)\n\t\t\tiStr := strconv.FormatInt(i, 10) + \">\"\n\t\t\texp := getHashedValue(iStr, prefix, blockId)\n\t\t\t// calculate the tag\n\t\t\ttag, _ := strconv.Atoi(new(big.Int).Mod(exp, big.NewInt(int64(tPLength))).String()) // the tag\n\t\t\tindex[id].Tag[blockId][tag][tagPos[tag]] = uint8(cipherPos)\n\t\t\ttagPos[tag]++\n\n\t\t\t// generate the ciphertext\n\t\t\tt := new(bn256.G1).ScalarBaseMult(exp)\n\t\t\tindex[id].Ciphertext[blockId][cipherPos] = new(bn256.G1).ScalarMult(t, gamma).Marshal()\n\t\t\tcipherPos++\n\t\t}\n\t}\n}",
"func UA_NodeId_hash(n []UA_NodeId) (c4goDefaultReturn UA_UInt32) {\n\tswitch uint32(int((n[0].identifierType))) {\n\tcase uint32(int((UA_NODEIDTYPE_NUMERIC))):\n\t\tfallthrough\n\tdefault:\n\t\t{\n\t\t\treturn UA_UInt32((uint32_t((uint32((uint32((uint32((uint32_t((UA_UInt32((u32(((uint32(uint16((uint16((uint16_t((UA_UInt16(n[0].namespaceIndex)))))))) + uint32((uint32((uint32_t((UA_UInt32((*n[0].identifier.numeric()))))))))*uint32((uint64((uint64_t((UA_UInt64((u64(2654435761)))))))))>>uint64(32)) & uint32(4294967295)))))))))))))))))\n\t\t}\n\tcase uint32(int((UA_NODEIDTYPE_STRING))):\n\t\tfallthrough\n\tcase uint32(int((UA_NODEIDTYPE_BYTESTRING))):\n\t\t{\n\t\t\t// shift knuth multiplication to use highest 32 bits and after addition make sure we don't have an integer overflow\n\t\t\t// Knuth's multiplicative hashing\n\t\t\treturn UA_UInt32((uint32_t((uint32((uint32((uint32((uint32_t((UA_UInt32((fnv32(u32(UA_UInt16(n[0].namespaceIndex)), (*n[0].identifier.string()).data, uint((*n[0].identifier.string()).length))))))))))))))))\n\t\t}\n\tcase uint32(int((UA_NODEIDTYPE_GUID))):\n\t\t{\n\t\t\treturn UA_UInt32((uint32_t((uint32((uint32((uint32((uint32_t((UA_UInt32((fnv32(u32(UA_UInt16(n[0].namespaceIndex)), (*[100000000]UA_Guid)(unsafe.Pointer(&(*n[0].identifier.guid())))[:], uint((16)))))))))))))))))\n\t\t}\n\t}\n\treturn\n}",
"func deriveGroupElementPair(privElement big.Int, blindingF scrypto.Hash256, curve ec.Curve) (*ecdsa.PublicKey, big.Int) {\n\tvar pointBlindingF big.Int\n\tpointBlindingF.SetBytes(blindingF[:])\n\tprivElement.Mul(&privElement, &pointBlindingF)\n\tprivElement.Mod(&privElement, curve.Params().N)\n\n\tx, y := curve.Params().ScalarBaseMult(privElement.Bytes())\n\tpubkey := ecdsa.PublicKey{Curve: curve, X: x, Y: y}\n\n\treturn &pubkey, privElement\n}",
"func deriveIndex(slice uint16, hash1, hash2 uint64) (uint64, uint64) {\n\ts := uint64(slice)\n\tz := (s*s + s) / 2\n\tderivedIdx := hash1 + s*hash2 + (z*(s-1))/3\n\treturn derivedIdx, hash2 + z\n}",
"func TimeComponentIndex(m int, n int, c int, p int) int {\n\treturn m*(n+1+c) + p\n}",
"func GeScalarMult(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement) {\n\tvar e [64]int8\n\tvar carry, carry2, i int\n\tvar Ai [8]CachedGroupElement\n\tvar t CompletedGroupElement\n\tvar u ExtendedGroupElement\n\n\tcarry = 0\n\tfor i = 0; i < 31; i++ {\n\t\tcarry += (int)(a[i])\n\t\tcarry2 = (carry + 8) >> 4\n\t\te[2*i] = (int8)(carry - (carry2 << 4))\n\t\tcarry = (carry2 + 8) >> 4\n\t\te[2*i+1] = (int8)(carry2 - (carry << 4))\n\t}\n\tcarry += (int)(a[31])\n\tcarry2 = (carry + 8) >> 4\n\te[62] = (int8)(carry - (carry2 << 4))\n\te[63] = (int8)(carry2)\n\n\tA.ToCached(&Ai[0])\n\tfor i = 0; i < 7; i++ {\n\t\tGeAdd(&t, A, &Ai[i])\n\t\tt.ToExtended(&u)\n\t\tu.ToCached(&Ai[i+1])\n\t}\n\n\tr.Zero()\n\tfor i = 63; i >= 0; i-- {\n\t\tb := e[i]\n\t\tbnegative := int8(negative8(b))\n\t\tbabs := b - (((-bnegative) & b) << 1)\n\t\tvar cur, minuscur CachedGroupElement\n\t\tr.Double(&t)\n\t\tt.ToProjective(r)\n\t\tr.Double(&t)\n\t\tt.ToProjective(r)\n\t\tr.Double(&t)\n\t\tt.ToProjective(r)\n\t\tr.Double(&t)\n\t\tt.ToExtended(&u)\n\t\tcur.Zero()\n\t\tfor n := 0; n < 8; n++ {\n\t\t\tCachedGroupElementCMove(&cur, &Ai[n], equal(int32(babs), int32(n+1)))\n\t\t}\n\t\tFeCopy(&minuscur.yPlusX, &cur.yMinusX)\n\t\tFeCopy(&minuscur.yMinusX, &cur.yPlusX)\n\t\tFeCopy(&minuscur.Z, &cur.Z)\n\t\tFeNeg(&minuscur.T2d, &cur.T2d)\n\t\tCachedGroupElementCMove(&cur, &minuscur, int32(bnegative))\n\t\tGeAdd(&t, &u, &cur)\n\t\tt.ToProjective(r)\n\t}\n}",
"func GetChainId(parts [] string) ([]byte, error) {\n if len(parts)<2 {\n return nil, fmt.Errorf(\"No Chain Specification provided\")\n }\n sum := sha256.New()\n\n for i, str := range parts {\n if i > 0 {\n x := sha256.Sum256([]byte(str))\n sum.Write(x[:])\n }\n }\n \n return sum.Sum(nil), nil\n}",
"func TestEncodeSpaceID(t *testing.T) {\n\tre := require.New(t)\n\tre.Equal(\"keyspaces/meta/00000000\", endpoint.KeyspaceMetaPath(0))\n\tre.Equal(\"keyspaces/meta/16777215\", endpoint.KeyspaceMetaPath(1<<24-1))\n\tre.Equal(\"keyspaces/meta/00000100\", endpoint.KeyspaceMetaPath(100))\n\tre.Equal(\"keyspaces/meta/00000011\", endpoint.KeyspaceMetaPath(11))\n\tre.Equal(\"keyspaces/meta/00000010\", endpoint.KeyspaceMetaPath(10))\n}",
"func encodeKeyspaceGroupID(groupID uint32) string {\n\treturn fmt.Sprintf(\"%05d\", groupID)\n}",
"func CdlTakuri(inOpen []float64, inHigh []float64, inLow []float64, inClose []float64) []int {\n var outBegIdx int\n var outNBElement int\n n := len(inOpen)\n outInteger := make([]int, n)\n ta_CdlTakuri(0, n - 1, (*float64)(&inOpen[0]), (*float64)(&inHigh[0]), (*float64)(&inLow[0]), (*float64)(&inClose[0]), &outBegIdx, &outNBElement, (*int)(&outInteger[0]))\n outInteger = append(make([]int, outBegIdx), outInteger[:outNBElement]...)\n return outInteger\n}",
"func group(mbi []*models.Alr, keyValue []utils.KvPair, lastUpdated time.Time) *r4Models.Group {\n\tgroup := &r4Models.Group{}\n\tgroup.Id = &r4Datatypes.Id{Value: \"example-id-group\"}\n\tmember := []*r4Models.Group_Member{}\n\textension := []*r4Datatypes.Extension{}\n\treasonCodes := &r4Datatypes.Extension{\n\t\tUrl: &r4Datatypes.Uri{\n\t\t\tValue: \"http://alr.cms.gov/ig/StructureDefinition/ext-changeReason\",\n\t\t}}\n\tgroup.Meta = &r4Datatypes.Meta{\n\t\tLastUpdated: &r4Datatypes.Instant{\n\t\t\tPrecision: r4Datatypes.Instant_SECOND,\n\t\t\tValueUs: lastUpdated.UnixNano() / int64(time.Microsecond),\n\t\t},\n\t\tProfile: []*r4Datatypes.Canonical{\n\t\t\t{Value: \"http://alr.cms.gov/ig/StructureDefinition/alr-Group\"},\n\t\t}}\n\n\tfor _, kv := range keyValue {\n\t\tswitch {\n\t\tcase changeTypeP.MatchString(kv.Key):\n\t\t\t// ext - changeType\n\t\t\tvar val = \"nochange\"\n\t\t\t// Mapping to DaVinci ATR\n\t\t\tif kv.Value == \"1\" {\n\t\t\t\tval = \"dropped\"\n\t\t\t}\n\n\t\t\text := extensionMaker(\"http://hl7.org/fhir/us/davinci-atr/STU1/StructureDefinition-ext-changeType.html\",\n\t\t\t\t\"\", \"\", \"\", \"\")\n\t\t\text.Value = &r4Datatypes.Extension_ValueX{\n\t\t\t\tChoice: &r4Datatypes.Extension_ValueX_Code{\n\t\t\t\t\tCode: &r4Datatypes.Code{Value: val},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\textension = append(extension, ext)\n\t\tcase changeReasonP.MatchString(kv.Key):\n\t\t\t// ext - changeReason\n\n\t\t\t// Data with a value of 0 should not be included in the FHIR resource\n\t\t\tif kv.Value != \"0\" {\n\t\t\t\t// get the variable name from the map set in mapping.go\n\t\t\t\tdisplay := utils.GroupPatternDescriptions[kv.Key]\n\t\t\t\tsubExt := extensionMaker(\"reasonCode\",\n\t\t\t\t\t\"\", kv.Key, \"https://bluebutton.cms.gov/resources/variables/alr/changeReason/\", display)\n\n\t\t\t\treasonCodes.Extension = append(reasonCodes.Extension, subExt)\n\t\t\t}\n\t\tcase claimsBasedAssignmentFlagP.MatchString(kv.Key):\n\t\t\t// ext - claimsBasedAssignmentFlag\n\t\t\tvar val = true\n\t\t\tif kv.Value == \"0\" {\n\t\t\t\tval = false\n\t\t\t}\n\n\t\t\text := extensionMaker(\"http://alr.cms.gov/ig/StructureDefinition/ext-claimsBasedAssignmentFlag\",\n\t\t\t\t\"\", \"\", \"\", \"\")\n\t\t\text.Value = &r4Datatypes.Extension_ValueX{\n\t\t\t\tChoice: &r4Datatypes.Extension_ValueX_Boolean{\n\t\t\t\t\tBoolean: &r4Datatypes.Boolean{Value: val},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\textension = append(extension, ext)\n\t\tcase claimsBasedAssignmentStepP.MatchString(kv.Key):\n\t\t\t// ext - claimsBasedAssignmentStep\n\n\t\t\tval, err := strconv.ParseInt(kv.Value, 10, 32)\n\t\t\tif err != nil {\n\t\t\t\tlog.API.Warnf(\"Could convert string to int for {}: {}\", mbi, err)\n\t\t\t}\n\t\t\text := extensionMaker(\"http://alr.cms.gov/ig/StructureDefinition/ext-claimsBasedAssignmentStep\",\n\t\t\t\t\"\", \"\", \"\", \"\")\n\t\t\text.Value = &r4Datatypes.Extension_ValueX{\n\t\t\t\tChoice: &r4Datatypes.Extension_ValueX_Integer{\n\t\t\t\t\tInteger: &r4Datatypes.Integer{Value: int32(val)},\n\t\t\t\t},\n\t\t\t}\n\t\tcase newlyAssignedBeneficiaryFlagP.MatchString(kv.Key):\n\t\t\t// ext - newlyAssignedBeneficiaryFlag\n\t\t\tvar val = true\n\t\t\tif kv.Value == \"0\" {\n\t\t\t\tval = false\n\t\t\t}\n\n\t\t\text := extensionMaker(\"http://alr.cms.gov/ig/StructureDefinition/ext-newlyAssignedBeneficiaryFlag\",\n\t\t\t\t\"\", \"\", \"\", \"\")\n\t\t\text.Value = &r4Datatypes.Extension_ValueX{\n\t\t\t\tChoice: &r4Datatypes.Extension_ValueX_Boolean{\n\t\t\t\t\tBoolean: &r4Datatypes.Boolean{Value: val},\n\t\t\t\t},\n\t\t\t}\n\t\tcase pervAssignedBeneficiaryFlagP.MatchString(kv.Key):\n\t\t\t// ext - pervAssignedBeneficiaryFlag\n\t\t\tvar val = true\n\t\t\tif kv.Value == \"0\" {\n\t\t\t\tval = false\n\t\t\t}\n\n\t\t\text := extensionMaker(\"http://alr.cms.gov/ig/StructureDefinition/ext-prevAssignedBeneficiaryFlag\",\n\t\t\t\t\"\", \"\", \"\", \"\")\n\t\t\text.Value = &r4Datatypes.Extension_ValueX{\n\t\t\t\tChoice: &r4Datatypes.Extension_ValueX_Boolean{\n\t\t\t\t\tBoolean: &r4Datatypes.Boolean{Value: val},\n\t\t\t\t},\n\t\t\t}\n\t\tcase voluntaryAlignmentFlagP.MatchString(kv.Key):\n\t\t\t// ext - voluntaryAlignmentFlag\n\t\t\tvar val = true\n\t\t\tif kv.Value == \"0\" {\n\t\t\t\tval = false\n\t\t\t}\n\n\t\t\text := extensionMaker(\"http://alr.cms.gov/ig/StructureDefinition/ext-newlyAssignedBeneficiaryFlag\",\n\t\t\t\t\"\", \"\", \"\", \"\")\n\t\t\text.Value = &r4Datatypes.Extension_ValueX{\n\t\t\t\tChoice: &r4Datatypes.Extension_ValueX_Boolean{\n\t\t\t\t\tBoolean: &r4Datatypes.Boolean{Value: val},\n\t\t\t\t},\n\t\t\t}\n\t\tcase vaSelectionOnlyP.MatchString(kv.Key):\n\t\t\t// ext - vaSelectionOnlyFlag\n\t\t\tvar val = true\n\t\t\tif kv.Value == \"0\" {\n\t\t\t\tval = false\n\t\t\t}\n\n\t\t\text := extensionMaker(\"http://alr.cms.gov/ig/StructureDefinition/ext-vaSelectionOnlyFlag\",\n\t\t\t\t\"\", \"\", \"\", \"\")\n\t\t\text.Value = &r4Datatypes.Extension_ValueX{\n\t\t\t\tChoice: &r4Datatypes.Extension_ValueX_Boolean{\n\t\t\t\t\tBoolean: &r4Datatypes.Boolean{Value: val},\n\t\t\t\t},\n\t\t\t}\n\t\t}\n\n\t}\n\textension = append(extension, reasonCodes)\n\n\t// NOTE: there is only one element in Member slice\n\tfor i := range mbi {\n\n\t\tm := &r4Models.Group_Member{}\n\n\t\tm.Extension = extension\n\t\tm.Entity = &r4Datatypes.Reference{Reference: &r4Datatypes.Reference_PatientId{\n\t\t\tPatientId: &r4Datatypes.ReferenceId{Value: mbi[i].BeneMBI},\n\t\t}}\n\n\t\tmember = append(member, m)\n\t}\n\n\tgroup.Member = member\n\n\treturn group\n}",
"func getURIKey(m *descriptor.Method, ver string, req bool) (URIKey, error) {\n\tvar output []KeyComponent\n\tvar out URIKey\n\n\tparams, err := getMethodParams(m)\n\tif err != nil {\n\t\treturn out, err\n\t}\n\tmsg := m.RequestType\n\tif params.Oper == \"ListOper\" || params.Oper == \"WatchOper\" {\n\t\tmsgtype := \"\"\n\t\tif params.Oper == \"ListOper\" {\n\t\t\tmsgtype, err = getListType(m.ResponseType, true)\n\t\t\tif err != nil {\n\t\t\t\treturn out, err\n\t\t\t}\n\t\t\tmsgtype = \".\" + msgtype\n\t\t}\n\t\tif params.Oper == \"WatchOper\" {\n\t\t\tmsgtype, err = getWatchType(m.ResponseType, true)\n\t\t\tif err != nil {\n\t\t\t\treturn out, err\n\t\t\t}\n\t\t\tmsgtype = \".\" + msgtype\n\t\t}\n\t\tmsg, err = m.Service.File.Reg.LookupMsg(\"\", msgtype)\n\t\tif err != nil {\n\t\t\treturn out, err\n\t\t}\n\t}\n\tsvcParams, err := common.GetSvcParams(m.Service)\n\tif err != nil {\n\t\treturn out, err\n\t}\n\tif req {\n\t\tr, err := reg.GetExtension(\"google.api.http\", m)\n\t\trule := r.(*googapi.HttpRule)\n\t\tpattern := \"\"\n\t\tswitch params.Oper {\n\t\tcase \"CreateOper\", \"LabelOper\":\n\t\t\tpattern = rule.GetPost()\n\t\tcase \"GetOper\", \"ListOper\", \"WatchOper\":\n\t\t\tpattern = rule.GetGet()\n\t\tcase \"DeleteOper\":\n\t\t\tpattern = rule.GetDelete()\n\t\tcase \"UpdateOper\":\n\t\t\tpattern = rule.GetPut()\n\t\t}\n\t\tif output, err = findComponentsHelper(msg, pattern); err != nil {\n\t\t\treturn out, err\n\t\t}\n\t} else {\n\t\tif params.Oper == \"LabelOper\" {\n\t\t\t// Response type has the URI, request type is api.Label\n\t\t\tmsg = m.ResponseType\n\t\t}\n\n\t\tif output, err = getMsgURI(msg, ver, svcParams.Prefix); err != nil {\n\t\t\treturn out, err\n\t\t}\n\t}\n\n\tout.Str = \"\"\n\tout.Ref = false\n\tsep := \"\"\n\tfor _, v := range output {\n\t\tif v.Type == \"prefix\" {\n\t\t\tout.Str = fmt.Sprintf(\"%s%s\\\"%s\\\"\", out.Str, sep, v.Val)\n\t\t} else if v.Type == \"field\" {\n\t\t\tout.Ref = true\n\t\t\tout.Str = fmt.Sprintf(\"%s%sin.%s\", out.Str, sep, v.Val)\n\t\t}\n\t\tsep = \", \"\n\t}\n\treturn out, nil\n}",
"func ConstructCompositeKey(ns string, key string, blocknum uint64, trannum uint64) string {\n\t// TODO - We will likely want sortable varint encoding, rather then a simple number, in order to support sorted key scans\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(ns)\n\tbuffer.WriteByte(0)\n\tbuffer.WriteString(key)\n\tbuffer.WriteByte(0)\n\tbuffer.WriteString(strconv.Itoa(int(blocknum)))\n\tbuffer.WriteByte(0)\n\tbuffer.WriteString(strconv.Itoa(int(trannum)))\n\n\treturn buffer.String()\n}",
"func (d *Dht) getHighestAllowableBucketIndex(otherId []byte) int {\n\tsameBytes := 0\n\tfor i := 0; i < keysize; i++ {\n\t\tif d.Node.Id[i] != otherId[i] {\n\t\t\tsameBits := 0\n\t\t\t// get the first differing bit\n\t\t\tfor j := 7; j >= 0; j-- {\n\t\t\t\tif d.Node.Id[i]&(1<<j) != otherId[i]&(1<<j) {\n\t\t\t\t\treturn numBuckets - 8*sameBytes - sameBits - 1\n\t\t\t\t} else {\n\t\t\t\t\tsameBits++\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tsameBytes++\n\t\t}\n\t}\n\n\treturn 0\n}",
"func (l *GroupLookup) lookupGroup(key query.GroupKey) int {\n if l.lastIndex >= 0 {\n kg := l.groups[l.lastIndex]\n if !key.Less(kg.First()) {\n // If the next group doesn't exist or has a first value that is\n // greater than this key, then we can return the last index and\n // avoid performing a binary search.\n if l.lastIndex == len(l.groups)-1 || key.Less(l.groups[l.lastIndex+1].First()) {\n return l.lastIndex\n }\n }\n }\n\n // Find the last group where the first key is less than or equal\n // than the key we are looking for. This means we need to search for\n // the first group where the first key is greater than the key we are setting\n // and use the group before that one.\n index := sort.Search(len(l.groups), func(i int) bool {\n return key.Less(l.groups[i].First())\n }) - 1\n if index >= 0 {\n l.lastIndex = index\n }\n return index\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TimeComponentIndex computes the index of a certain component in the table. M is the maximum URI depth, N is the maximum expiry depth, C is the index of the Time component, and P is the position of the component in the ID. Given M and N, this function returns the index in the table of the group element for Time component C at position P in an ID. The formula is (N+1)M + (M+1)C + (PC), which simplifies M(N+1+C) + P
|
func TimeComponentIndex(m int, n int, c int, p int) int {
return m*(n+1+c) + p
}
|
[
"func (dct *DecomposedCiphertext) TimeComponentElement(c TimeComponentPosition, p int) *bn256.G1 {\n\ti := TimeComponentIndex(int(dct.NumURIComponents), int(dct.NumTimeComponents), int(c), p)\n\treturn dct.Table[i]\n}",
"func URIComponentIndex(m int, n int, c int, p int) int {\n\treturn n*c + p\n}",
"func (c *cpuMetric) getIndex() int {\n\treturn c.idx\n}",
"func (tr Timerange) IndexOfTime(point time.Time) int {\n\tif tr.Resolution() == 0 {\n\t\treturn 0\n\t}\n\treturn int(point.Sub(tr.Start()) / tr.Resolution())\n}",
"func getCPUTimeForComponent(client influxdb.Client, start, end, execUUID, component string) (float64, error) {\n\tresult, err := client.Select(fmt.Sprintf(`from(bucket:\"%s\")\n\t\t\t|> range(start: %s, stop: %s)\n\t\t\t|> filter(fn:(r) => r._measurement == \"process_cpu_seconds_total\" and r.exec_uuid == \"%s\" and r.component == \"%s\")\n\t\t\t|> max()`,\n\t\tclient.Config.Database, start, end, execUUID, component))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\ttime := 0.0\n\tfor _, value := range result {\n\t\ttime += value[\"_value\"].(float64)\n\t}\n\treturn time, nil\n}",
"func (us *Update) IndexByTime(prefix string, typ indexType, t time.Time) (index string) {\n\tyear := t.Format(\"2006\")\n\tmonth := t.Format(\"01\")\n\tswitch typ {\n\tcase IndexTypeYear:\n\t\tindex = strings.Join([]string{prefix, year}, \"_\")\n\tcase IndexTypeMonth:\n\t\tindex = strings.Join([]string{prefix, year, month}, \"_\")\n\tcase IndexTypeDay:\n\t\tday := t.Format(\"02\")\n\t\tindex = strings.Join([]string{prefix, year, month, day}, \"_\")\n\tcase IndexTypeWeek:\n\t\tindex = strings.Join([]string{prefix, year, month, weeks[t.Day()/8]}, \"_\")\n\t}\n\treturn\n}",
"func (v *Vector3) Component(index int) float32 {\n\n\tswitch index {\n\tcase 0:\n\t\treturn v.X\n\tcase 1:\n\t\treturn v.Y\n\tcase 2:\n\t\treturn v.Z\n\tdefault:\n\t\tpanic(\"index is out of range\")\n\t}\n}",
"func (k *KvChannelIndex) getBlockIndex(sequence uint64) uint16 {\n\treturn uint16(sequence / byteIndexBlockCapacity)\n}",
"func (v *Vector2) Component(index int) float32 {\n\n\tswitch index {\n\tcase 0:\n\t\treturn v.X\n\tcase 1:\n\t\treturn v.Y\n\tdefault:\n\t\tpanic(\"index is out of range\")\n\t}\n}",
"func (group *Group) Index() (index int) {\n return group.index\n}",
"func (c ColumnID) index() int {\n\treturn int(c - 1)\n}",
"func GetTableIndex(input int64, tableCount int) int {\n\treturn int(Crc32(input)) % tableCount\n}",
"func (q *Qchan) Idx() uint32 {\n\tif q == nil {\n\t\treturn 0\n\t}\n\treturn q.KeyGen.Step[4] & 0x7fffffff\n}",
"func (c *Client) WoWGuildCrestComponentsIndex() (*wowgd.GuildCrestComponentsIndex, []byte, error) {\n\tvar (\n\t\tdat wowgd.GuildCrestComponentsIndex\n\t\tb []byte\n\t\terr error\n\t)\n\n\tb, err = c.getURLBody(c.apiURL+fmt.Sprintf(\"/data/wow/guild-crest/index?locale=%s\", c.locale), c.staticNamespace)\n\tif err != nil {\n\t\treturn &dat, b, err\n\t}\n\n\terr = json.Unmarshal(b, &dat)\n\tif err != nil {\n\t\treturn &dat, b, err\n\t}\n\n\treturn &dat, b, nil\n}",
"func DurationMinuteComponent(d time.Duration) int {\n\tmin := d / time.Minute\n\thour := d / time.Hour\n\treturn int(min - hour*60)\n}",
"func DurationMillisecondComponent(d time.Duration) int {\n\tms := d / time.Millisecond\n\tsec := d / time.Second\n\treturn int(ms - sec*1e3)\n}",
"func (s *solver) cellIndex(p position) int {\n\treturn p.y*s.width + p.x\n}",
"func TimeToIndex(t time.Time, tf time.Duration) int64 {\n\ttLocal := ToSystemTimezone(t)\n\t// special 1D case (maximum supported on-disk size)\n\tif tf == utils.Day {\n\t\treturn int64(tLocal.YearDay() - 1)\n\t}\n\treturn 1 + tLocal.Sub(\n\t\ttime.Date(\n\t\t\ttLocal.Year(),\n\t\t\ttime.January,\n\t\t\t1, 0, 0, 0, 0,\n\t\t\ttLocal.Location())).Nanoseconds()/tf.Nanoseconds()\n}",
"func (u *UF) Components() map[int]int {\n\tgrps := make(map[int]int) // component id :-> component size\n\tgid := 0 // group id\n\n\tfor i := 0; i < len(u.id); i++ {\n\t\tgid = u.Root(i)\n\t\tgrps[gid] = u.size[gid]\n\t}\n\treturn grps\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TimeComponentElement returns the group element for the particular URI component index and ID index.
|
func (dct *DecomposedCiphertext) TimeComponentElement(c TimeComponentPosition, p int) *bn256.G1 {
i := TimeComponentIndex(int(dct.NumURIComponents), int(dct.NumTimeComponents), int(c), p)
return dct.Table[i]
}
|
[
"func (*TimeGroups) GetPath() string { return \"/api/objects/time/group/\" }",
"func TimeComponentIndex(m int, n int, c int, p int) int {\n\treturn m*(n+1+c) + p\n}",
"func (e Element) Group() int {\n\treturn e.getInfo().group\n}",
"func (t *TimeGroup) GetPath() string { return fmt.Sprintf(\"/api/objects/time/group/%s\", t.Reference) }",
"func GetComponent(uri string) string {\n\tparts := strings.Split(uri, \":\")\n\tif len(parts) <= 1 {\n\t\treturn \"\"\n\t}\n\treturn parts[0]\n}",
"func tmplByTimerGroup(m map[timer.Group]*data.Timer, g string) *data.Timer {\n\treturn m[timer.Group(g)]\n}",
"func macroTimeGroup(_ *Query, args []string) (string, error) {\n\tif len(args) != 2 {\n\t\treturn \"\", fmt.Errorf(\"%w: expected 1 argument, received %d\", ErrorBadArgumentCount, len(args))\n\t}\n\n\tres := \"\"\n\tswitch args[1] {\n\tcase \"minute\":\n\t\tres += fmt.Sprintf(\"datepart(minute, %s),\", args[0])\n\t\tfallthrough\n\tcase \"hour\":\n\t\tres += fmt.Sprintf(\"datepart(hour, %s),\", args[0])\n\t\tfallthrough\n\tcase \"day\":\n\t\tres += fmt.Sprintf(\"datepart(day, %s),\", args[0])\n\t\tfallthrough\n\tcase \"month\":\n\t\tres += fmt.Sprintf(\"datepart(month, %s),\", args[0])\n\t\tfallthrough\n\tcase \"year\":\n\t\tres += fmt.Sprintf(\"datepart(year, %s)\", args[0])\n\t}\n\n\treturn res, nil\n}",
"func (Lutetium) GetGroup() string {\n\tvar g groupType = b3\n\treturn g.get()\n}",
"func (key CompositeKey) Group() string {\n\treturn key.parts[2]\n}",
"func (*TimeGroup) PostPath() string {\n\treturn \"/api/objects/time/group/\"\n}",
"func (group *Group) Index() (index int) {\n return group.index\n}",
"func (_this *MediaDeviceInfo) GroupId() string {\n\tvar ret string\n\tvalue := _this.Value_JS.Get(\"groupId\")\n\tret = (value).String()\n\treturn ret\n}",
"func (o GetContainerGroupsGroupOutput) FailedTime() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetContainerGroupsGroup) string { return v.FailedTime }).(pulumi.StringOutput)\n}",
"func (m *Measurement) GroupId() int {\n return m.data.GroupId\n}",
"func groupName(apiVersion string) string {\n\treturn strings.Split(apiVersion, \"/\")[0]\n}",
"func group(mbi []*models.Alr, keyValue []utils.KvPair, lastUpdated time.Time) *r4Models.Group {\n\tgroup := &r4Models.Group{}\n\tgroup.Id = &r4Datatypes.Id{Value: \"example-id-group\"}\n\tmember := []*r4Models.Group_Member{}\n\textension := []*r4Datatypes.Extension{}\n\treasonCodes := &r4Datatypes.Extension{\n\t\tUrl: &r4Datatypes.Uri{\n\t\t\tValue: \"http://alr.cms.gov/ig/StructureDefinition/ext-changeReason\",\n\t\t}}\n\tgroup.Meta = &r4Datatypes.Meta{\n\t\tLastUpdated: &r4Datatypes.Instant{\n\t\t\tPrecision: r4Datatypes.Instant_SECOND,\n\t\t\tValueUs: lastUpdated.UnixNano() / int64(time.Microsecond),\n\t\t},\n\t\tProfile: []*r4Datatypes.Canonical{\n\t\t\t{Value: \"http://alr.cms.gov/ig/StructureDefinition/alr-Group\"},\n\t\t}}\n\n\tfor _, kv := range keyValue {\n\t\tswitch {\n\t\tcase changeTypeP.MatchString(kv.Key):\n\t\t\t// ext - changeType\n\t\t\tvar val = \"nochange\"\n\t\t\t// Mapping to DaVinci ATR\n\t\t\tif kv.Value == \"1\" {\n\t\t\t\tval = \"dropped\"\n\t\t\t}\n\n\t\t\text := extensionMaker(\"http://hl7.org/fhir/us/davinci-atr/STU1/StructureDefinition-ext-changeType.html\",\n\t\t\t\t\"\", \"\", \"\", \"\")\n\t\t\text.Value = &r4Datatypes.Extension_ValueX{\n\t\t\t\tChoice: &r4Datatypes.Extension_ValueX_Code{\n\t\t\t\t\tCode: &r4Datatypes.Code{Value: val},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\textension = append(extension, ext)\n\t\tcase changeReasonP.MatchString(kv.Key):\n\t\t\t// ext - changeReason\n\n\t\t\t// Data with a value of 0 should not be included in the FHIR resource\n\t\t\tif kv.Value != \"0\" {\n\t\t\t\t// get the variable name from the map set in mapping.go\n\t\t\t\tdisplay := utils.GroupPatternDescriptions[kv.Key]\n\t\t\t\tsubExt := extensionMaker(\"reasonCode\",\n\t\t\t\t\t\"\", kv.Key, \"https://bluebutton.cms.gov/resources/variables/alr/changeReason/\", display)\n\n\t\t\t\treasonCodes.Extension = append(reasonCodes.Extension, subExt)\n\t\t\t}\n\t\tcase claimsBasedAssignmentFlagP.MatchString(kv.Key):\n\t\t\t// ext - claimsBasedAssignmentFlag\n\t\t\tvar val = true\n\t\t\tif kv.Value == \"0\" {\n\t\t\t\tval = false\n\t\t\t}\n\n\t\t\text := extensionMaker(\"http://alr.cms.gov/ig/StructureDefinition/ext-claimsBasedAssignmentFlag\",\n\t\t\t\t\"\", \"\", \"\", \"\")\n\t\t\text.Value = &r4Datatypes.Extension_ValueX{\n\t\t\t\tChoice: &r4Datatypes.Extension_ValueX_Boolean{\n\t\t\t\t\tBoolean: &r4Datatypes.Boolean{Value: val},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\textension = append(extension, ext)\n\t\tcase claimsBasedAssignmentStepP.MatchString(kv.Key):\n\t\t\t// ext - claimsBasedAssignmentStep\n\n\t\t\tval, err := strconv.ParseInt(kv.Value, 10, 32)\n\t\t\tif err != nil {\n\t\t\t\tlog.API.Warnf(\"Could convert string to int for {}: {}\", mbi, err)\n\t\t\t}\n\t\t\text := extensionMaker(\"http://alr.cms.gov/ig/StructureDefinition/ext-claimsBasedAssignmentStep\",\n\t\t\t\t\"\", \"\", \"\", \"\")\n\t\t\text.Value = &r4Datatypes.Extension_ValueX{\n\t\t\t\tChoice: &r4Datatypes.Extension_ValueX_Integer{\n\t\t\t\t\tInteger: &r4Datatypes.Integer{Value: int32(val)},\n\t\t\t\t},\n\t\t\t}\n\t\tcase newlyAssignedBeneficiaryFlagP.MatchString(kv.Key):\n\t\t\t// ext - newlyAssignedBeneficiaryFlag\n\t\t\tvar val = true\n\t\t\tif kv.Value == \"0\" {\n\t\t\t\tval = false\n\t\t\t}\n\n\t\t\text := extensionMaker(\"http://alr.cms.gov/ig/StructureDefinition/ext-newlyAssignedBeneficiaryFlag\",\n\t\t\t\t\"\", \"\", \"\", \"\")\n\t\t\text.Value = &r4Datatypes.Extension_ValueX{\n\t\t\t\tChoice: &r4Datatypes.Extension_ValueX_Boolean{\n\t\t\t\t\tBoolean: &r4Datatypes.Boolean{Value: val},\n\t\t\t\t},\n\t\t\t}\n\t\tcase pervAssignedBeneficiaryFlagP.MatchString(kv.Key):\n\t\t\t// ext - pervAssignedBeneficiaryFlag\n\t\t\tvar val = true\n\t\t\tif kv.Value == \"0\" {\n\t\t\t\tval = false\n\t\t\t}\n\n\t\t\text := extensionMaker(\"http://alr.cms.gov/ig/StructureDefinition/ext-prevAssignedBeneficiaryFlag\",\n\t\t\t\t\"\", \"\", \"\", \"\")\n\t\t\text.Value = &r4Datatypes.Extension_ValueX{\n\t\t\t\tChoice: &r4Datatypes.Extension_ValueX_Boolean{\n\t\t\t\t\tBoolean: &r4Datatypes.Boolean{Value: val},\n\t\t\t\t},\n\t\t\t}\n\t\tcase voluntaryAlignmentFlagP.MatchString(kv.Key):\n\t\t\t// ext - voluntaryAlignmentFlag\n\t\t\tvar val = true\n\t\t\tif kv.Value == \"0\" {\n\t\t\t\tval = false\n\t\t\t}\n\n\t\t\text := extensionMaker(\"http://alr.cms.gov/ig/StructureDefinition/ext-newlyAssignedBeneficiaryFlag\",\n\t\t\t\t\"\", \"\", \"\", \"\")\n\t\t\text.Value = &r4Datatypes.Extension_ValueX{\n\t\t\t\tChoice: &r4Datatypes.Extension_ValueX_Boolean{\n\t\t\t\t\tBoolean: &r4Datatypes.Boolean{Value: val},\n\t\t\t\t},\n\t\t\t}\n\t\tcase vaSelectionOnlyP.MatchString(kv.Key):\n\t\t\t// ext - vaSelectionOnlyFlag\n\t\t\tvar val = true\n\t\t\tif kv.Value == \"0\" {\n\t\t\t\tval = false\n\t\t\t}\n\n\t\t\text := extensionMaker(\"http://alr.cms.gov/ig/StructureDefinition/ext-vaSelectionOnlyFlag\",\n\t\t\t\t\"\", \"\", \"\", \"\")\n\t\t\text.Value = &r4Datatypes.Extension_ValueX{\n\t\t\t\tChoice: &r4Datatypes.Extension_ValueX_Boolean{\n\t\t\t\t\tBoolean: &r4Datatypes.Boolean{Value: val},\n\t\t\t\t},\n\t\t\t}\n\t\t}\n\n\t}\n\textension = append(extension, reasonCodes)\n\n\t// NOTE: there is only one element in Member slice\n\tfor i := range mbi {\n\n\t\tm := &r4Models.Group_Member{}\n\n\t\tm.Extension = extension\n\t\tm.Entity = &r4Datatypes.Reference{Reference: &r4Datatypes.Reference_PatientId{\n\t\t\tPatientId: &r4Datatypes.ReferenceId{Value: mbi[i].BeneMBI},\n\t\t}}\n\n\t\tmember = append(member, m)\n\t}\n\n\tgroup.Member = member\n\n\treturn group\n}",
"func (Seaborgium) GetGroup() string {\n\tvar g groupType = b6\n\treturn g.get()\n}",
"func (*WebStore) GetComponentGVK() schema.GroupVersionKind {\n\treturn schema.GroupVersionKind{\n\t\tGroup: GroupVersion.Group,\n\t\tVersion: GroupVersion.Version,\n\t\tKind: \"WebStore\",\n\t}\n}",
"func groupName(pattok string) string {\n\tif strings.HasPrefix(pattok, GroupStart) && strings.HasSuffix(pattok, GroupEnd) {\n\t\treturn pattok[len(GroupStart) : len(pattok)-len(GroupEnd)]\n\t}\n\treturn \"\"\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
EncryptDecomposed encrypts an element of GT using HIBE and Ciphertext Decomposition.
|
func EncryptDecomposed(message *bn256.GT, params *hibe.Params, uriPath ID, timePath ID) *DecomposedCiphertext {
if params.Pairing == nil {
panic("Pairing must be Precached before calling EncryptDecomposed()")
}
// Randomly choose s in Zp
s, err := rand.Int(rand.Reader, bn256.Order)
if err != nil {
panic(err)
}
ciphertext := new(DecomposedCiphertext)
ciphertext.A = new(bn256.GT)
ciphertext.A.ScalarMult(params.Pairing, s)
ciphertext.A.Add(ciphertext.A, message)
ciphertext.B = new(bn256.G2).ScalarMult(params.G, s)
ciphertext.D = new(bn256.G1).ScalarMult(params.G3, s)
m := len(uriPath)
n := len(timePath)
hCacheSize := m + n
tableSize := m + n + ((m * n) << 1)
uriHashed := uriPath.HashToZp()
timeHashed := timePath.HashToZp()
ciphertext.NumURIComponents = uint8(m)
ciphertext.NumTimeComponents = uint8(n)
ciphertext.Table = make([]*bn256.G1, tableSize, tableSize)
hCache := make([]*bn256.G1, m+n)
for i := 0; i != hCacheSize; i++ {
hCache[i] = new(bn256.G1).ScalarMult(params.H[i], s)
}
for j, uriComponentHash := range uriHashed {
// pos varies over all positions in the final ID at which this URI
// component could be.
for pos := j; pos != j+n+1; pos++ {
index := URIComponentIndex(m, n, j, pos)
ciphertext.Table[index] = new(bn256.G1)
ciphertext.Table[index].ScalarMult(hCache[pos], uriComponentHash)
}
}
for j, timeComponentHash := range timeHashed {
// pos varies over all positions in the final ID at which this time
// component could be.
for pos := j; pos != j+m+1; pos++ {
index := TimeComponentIndex(m, n, j, pos)
ciphertext.Table[index] = new(bn256.G1)
ciphertext.Table[index].ScalarMult(hCache[pos], timeComponentHash)
}
}
return ciphertext
}
|
[
"func DecryptDecomposed(ciphertext *DecomposedCiphertext, id ID, key *hibe.PrivateKey) *bn256.GT {\n\thibeCiphertext := AssembleCiphertext(ciphertext, id)\n\tif hibeCiphertext == nil {\n\t\treturn nil\n\t}\n\n\treturn hibe.Decrypt(key, hibeCiphertext)\n}",
"func AssembleCiphertext(ciphertext *DecomposedCiphertext, id ID) *hibe.Ciphertext {\n\tthirdElement := new(bn256.G1).ScalarMult(ciphertext.D, big.NewInt(1))\n\n\tvar uriRelPos URIComponentPosition = 0\n\tvar timeRelPos TimeComponentPosition = 0\n\tfor pos, idComponent := range id {\n\t\tvar elem *bn256.G1\n\t\tif idComponent.Type() == URIComponentType {\n\t\t\telem = ciphertext.URIComponentElement(uriRelPos, pos)\n\t\t\turiRelPos++\n\t\t} else {\n\t\t\telem = ciphertext.TimeComponentElement(timeRelPos, pos)\n\t\t\ttimeRelPos++\n\t\t}\n\t\tthirdElement.Add(thirdElement, elem)\n\t}\n\n\treturn &hibe.Ciphertext{\n\t\tA: ciphertext.A,\n\t\tB: ciphertext.B,\n\t\tC: thirdElement,\n\t}\n}",
"func Enc(data *[]byte, ts time.Time, pk string) (*[]byte, error) {\n\thasher := sha512.New()\n\thasher.Write([]byte(ts.Format(time.RFC3339)))\n\tout := hex.EncodeToString(hasher.Sum(nil))\n\tnonce, _ := hex.DecodeString(out[64:(64 + 24)])\n\taData, _ := hex.DecodeString(out)\n\tk, _ := hex.DecodeString(pk)\n\tif len(k) < 32 {\n\t\treturn nil, errors.New(\"key length too short\")\n\t}\n\tk = k[:32]\n\n\tblock, err := aes.NewCipher(k)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taesgcm, err := cipher.NewGCM(block)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcipherText := aesgcm.Seal(nil, nonce, *data, aData)\n\treturn &cipherText, nil\n}",
"func (obj *DataCryptoGrapher) Encrypt(rawBuf []byte) ([]byte, tgdb.TGError) {\n\tif logger.IsDebug() {\n\t\tlogger.Debug(fmt.Sprintf(\"Entering DataCryptoGrapher:Encrypt() w/ raw buffer as '%+v'\", rawBuf))\n\t}\n\t// TODO: Uncomment once DataCryptoGrapher is implemented\n\t/**\n\ttry {\n\t\tCipher cipher = Cipher.getInstance(publicKey.getAlgorithm());\n\t\tcipher.init(Cipher.ENCRYPT_MODE, publicKey, algparams);\n\t\treturn cipher.doFinal(data);\n\t}\n\tcatch (Exception e) {\n\t\tthrow new TGException(e);\n\t}\n\n\tblock, err := blowfish.NewCipher(key)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tmode := ecb.NewECBEncrypter(block)\n\tpadder := padding.NewPkcs5Padding()\n\tpt, err = padder.Pad(pt) // padd last block of plaintext if block size less than block cipher size\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tct := make([]byte, len(pt))\n\tmode.CryptBlocks(ct, pt)\n\treturn ct\n\t\n\n\tblock, err := blowfish.NewCipher(obj.remoteCert.RawSubjectPublicKeyInfo)\n\tif err != nil {\n\t\treturn nil, GetErrorByType(TGErrorSecurityException, INTERNAL_SERVER_ERROR, err.Error(), \"\")\n\t}\n\tencryptedBuf := make([]byte, aes.BlockSize+len(rawBuf))\n\tiv := encryptedBuf[:aes.BlockSize]\n\tmode := cipher.NewCBCEncrypter(block, iv)\n\tmode.CryptBlocks(encryptedBuf[aes.BlockSize:], rawBuf)\n\tfmt.Printf(\"%x\\n\", encryptedBuf)\n\t\n\tblock := blowfish.NewCipher(obj.remoteCert.RawSubjectPublicKeyInfo)\n\tencryptedBuf := make([]byte, aes.BlockSize+len(decBuffer))\n\tiv := encryptedBuf[:aes.BlockSize]\n\n\talgo := obj.remoteCert.PublicKeyAlgorithm\n\tswitch algo {\n\tcase x509.RSA:\n\tcase x509.DSA:\n\t\tblock, err := des.NewCipher(rawBuf)\n\t\tif err != nil {\n\t\t\treturn nil, exception.GetErrorByType(types.TGErrorSecurityException, types.INTERNAL_SERVER_ERROR, err.Error(), \"\")\n\t\t}\n\t\tmode := cipher.NewCBCEncrypter(block, iv)\n\t\tmode.CryptBlocks(encryptedBuf[aes.BlockSize:], rawBuf)\n\tcase x509.ECDSA:\n\t}\n\t*/\n\t//logger.Log(fmt.Sprintf(\"Returning DataCryptoGrapher:Decrypt() w/ encrypted buffer as '%+v'\", encryptedBuf))\n\treturn nil, nil\n}",
"func indexBlockEnc(block int64, prefix int64, id int, blockId int) {\n\tvar i int64\n\n\tvar tagPos []int = make([]int, tPLength) // the current available space of each tag's list\n\tfor i = 0; i < tPLength; i++ {\n\t\ttagPos[i] = 0\n\t}\n\n\tvar cipherPos = 0 // the current available space of ciphertext list\n\tgamma, _ := rand.Int(rand.Reader, bn256.Order) // the nonce\n\tindex[id].PubKey[blockId] = new(bn256.G1).ScalarMult(g1s, gamma).Marshal() // calculate the public key of one block in one index item\n\n\tfor i = 0; i < tPLength; i++ { // initialize the tag list with 100 (one value out of range)\n\t\tfor j := 0; j < int(tPLength); j++ {\n\t\t\tindex[id].Tag[blockId][i][j] = 100\n\t\t}\n\t}\n\n\t// iStr includes the operator > or <\n\tfor i = 0; i < blockPossValue; i++ {\n\t\tif i == block { // do not encrypt the equal block\n\t\t\tcontinue\n\t\t} else if i < block { // the current variable is smaller than the current block\n\t\t\tiStr := strconv.FormatInt(i, 10) + \"<\" // add the inequality operator into the string to be hashed\n\t\t\texp := getHashedValue(iStr, prefix, blockId) // calculate the hash value in tag and ciphertext\n\t\t\t// calculate the tag\n\t\t\ttag, _ := strconv.Atoi(new(big.Int).Mod(exp, big.NewInt(int64(tPLength))).String()) // the tag\n\t\t\tindex[id].Tag[blockId][tag][tagPos[tag]] = uint8(cipherPos) // store the list number of the ciphertext in the current available space of corresponding tag\n\t\t\ttagPos[tag]++\n\n\t\t\t// generate the ciphertext\n\t\t\tt := new(bn256.G1).ScalarBaseMult(exp)\n\t\t\tindex[id].Ciphertext[blockId][cipherPos] = new(bn256.G1).ScalarMult(t, gamma).Marshal()\n\t\t\tcipherPos++\n\t\t} else if i > block { // the current variable is larger than the current block (the process procedure is similar)\n\t\t\tiStr := strconv.FormatInt(i, 10) + \">\"\n\t\t\texp := getHashedValue(iStr, prefix, blockId)\n\t\t\t// calculate the tag\n\t\t\ttag, _ := strconv.Atoi(new(big.Int).Mod(exp, big.NewInt(int64(tPLength))).String()) // the tag\n\t\t\tindex[id].Tag[blockId][tag][tagPos[tag]] = uint8(cipherPos)\n\t\t\ttagPos[tag]++\n\n\t\t\t// generate the ciphertext\n\t\t\tt := new(bn256.G1).ScalarBaseMult(exp)\n\t\t\tindex[id].Ciphertext[blockId][cipherPos] = new(bn256.G1).ScalarMult(t, gamma).Marshal()\n\t\t\tcipherPos++\n\t\t}\n\t}\n}",
"func (o InterconnectAttachmentOutput) Encryption() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *InterconnectAttachment) pulumi.StringPtrOutput { return v.Encryption }).(pulumi.StringPtrOutput)\n}",
"func Decipher(cText string) (string, string, float64) {\n\tctxt, _ := hex.DecodeString(cText)\n\tplaintext := make([]byte, len(ctxt))\n\ttxt := \"\"\n\thigh := 0.0\n\tkey := \"\"\n\n\t//we loop around all possible ASCII characters as one of these character\n\t//was used as the key.\n\tfor i := 0; i < 256; i++ {\n\n\t\tfor k := 0; k < len(ctxt); k++ {\n\t\t\tplaintext[k] = byte(ctxt[k] ^ byte(i))\n\t\t}\n\n\t\tscore := getScore(plaintext)\n\n\t\t//a low score means that the deciphered plaintext is the closest to our expected(english text)\n\t\tif score > high {\n\t\t\ttxt = string(plaintext)\n\t\t\thigh = score\n\t\t\tkey = string(byte(i))\n\t\t}\n\t}\n\n\treturn txt, key, high\n}",
"func (c *ElGamal) Compose(a, b *ElGamal) *ElGamal {\n\tc.first.Add(a.first, b.first)\n\tc.second.Add(a.second, b.second)\n\treturn c\n}",
"func (pc *plainCipherKey) Encrypt(plaintext []byte) ([]byte, error) { return plaintext[:], nil }",
"func EncryptDecrypt(input, key string) (output string) {\n\tkL := len(key)\n\tfor i := range input {\n\t\toutput += string(input[i] ^ key[i%kL])\n\t}\n\treturn output\n}",
"func (ring DecoderRing) Encrypt(plaintext string) ([]byte, error) {\n\tdataKeyResult, err := ring.kmsService.GenerateDataKey(&kms.GenerateDataKeyInput{\n\t\tKeyId: aws.String(ring.masterKeyID),\n\t\tKeySpec: aws.String(\"AES_128\"),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkeyPlaintext := dataKeyResult.Plaintext\n\tkeyCiphertext := dataKeyResult.CiphertextBlob\n\n\tmessagePlaintext := []byte(plaintext)\n\tblock, err := aes.NewCipher(keyPlaintext)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgcm, err := cipher.NewGCM(block)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnonce := make([]byte, gcm.NonceSize())\n\t_, err = io.ReadFull(rand.Reader, nonce)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmessageCiphertext := gcm.Seal(nonce, nonce, messagePlaintext, nil)\n\treturn append(keyCiphertext, messageCiphertext...), nil\n}",
"func Encrypt(plaintext string, key []byte, nonce []byte) []byte {\n\taesgcm := aesGcmCipher(key)\n\tciphertext := aesgcm.Seal(nil, nonce, []byte(plaintext), nil)\n\treturn ciphertext\n}",
"func (r *RC4A) Encipher(text string) string {\n\tn := 256\n\ts1 := rc4KSA(r.key, n)\n\ts2 := rc4KSA(r.key, n)\n\ti := 0\n\tj1 := 0\n\tj2 := 0\n\tres := []byte(text)\n\tfor y := 0; y < len(text); y++ {\n\t\ti = (i + 1) % n\n\t\tj1 = (j1 + s1[i]) % n\n\t\ts1[i], s1[j1] = s1[j1], s1[i]\n\t\tres[y] ^= byte(s2[(s1[i]+s1[j1])%n])\n\t\ty++\n\t\tj2 = (j2 + s2[i]) % n\n\t\ts2[i], s2[j2] = s2[j2], s2[i]\n\t\tres[y] ^= byte(s1[(s2[i]+s2[j2])%n])\n\t}\n\treturn string(res)\n}",
"func queryBlockEnc(block int64, prefix int64, blockId int, gamma *big.Int) {\n\tblockStr := strconv.FormatInt(block, 10) + \">\" // add the inequality operator\n\texp := getHashedValue(blockStr, prefix, blockId) // calculate the hash value\n\ttagValue, _ := strconv.Atoi(new(big.Int).Mod(exp, big.NewInt(tPLength)).String()) // calculate the tag\n\tquery.Tag[blockId] = uint8(tagValue) // store the tag in the list\n\n\t// calculate the ciphertext of one block\n\tt := new(bn256.G2).ScalarBaseMult(exp)\n\tquery.Ciphertext[blockId] = new(bn256.G2).ScalarMult(t, gamma).Marshal()\n}",
"func (g *GCM) Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) {\n\tpayload := raw[recordlayer.HeaderSize:]\n\traw = raw[:recordlayer.HeaderSize]\n\n\tnonce := make([]byte, gcmNonceLength)\n\tcopy(nonce, g.localWriteIV[:4])\n\tif _, err := rand.Read(nonce[4:]); err != nil {\n\t\treturn nil, err\n\t}\n\n\tadditionalData := generateAEADAdditionalData(&pkt.Header, len(payload))\n\tencryptedPayload := g.localGCM.Seal(nil, nonce, payload, additionalData)\n\tr := make([]byte, len(raw)+len(nonce[4:])+len(encryptedPayload))\n\tcopy(r, raw)\n\tcopy(r[len(raw):], nonce[4:])\n\tcopy(r[len(raw)+len(nonce[4:]):], encryptedPayload)\n\n\t// Update recordLayer size to include explicit nonce\n\tbinary.BigEndian.PutUint16(r[recordlayer.HeaderSize-2:], uint16(len(r)-recordlayer.HeaderSize))\n\treturn r, nil\n}",
"func Transpose(plaintext []byte, offset int) []byte {\n\tcipher := Cipher(offset)\n\tlegend := Key(cipher)\n\n\tciphertext := make([]byte, len(plaintext), len(plaintext))\n\n\tfor i, b := range plaintext {\n\t\tc := legend[b]\n\n\t\tif c == 0 {\n\t\t\tc = space\n\t\t}\n\n\t\tciphertext[i] = c\n\t}\n\n\treturn ciphertext\n}",
"func (tsc *TSC) Encrypt() *TSC {\n\treturn tsc.convert(Encryption)\n}",
"func (e *Encrypter) Encrypt(cleartext []byte) ([]byte, string, error) {\n\te.lock.RLock()\n\tdefer e.lock.RUnlock()\n\n\tkeyset, err := e.activeKeySetLocked()\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\tnonce, err := crypto.Bytes(keyset.cipher.NonceSize())\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"failed to generate key wrapper nonce: %v\", err)\n\t}\n\n\tkeyID := keyset.rootKey.Meta.KeyID\n\tadditional := []byte(keyID) // include the keyID in the signature inputs\n\n\t// we use the nonce as the dst buffer so that the ciphertext is\n\t// appended to that buffer and we always keep the nonce and\n\t// ciphertext together, and so that we're not tempted to reuse\n\t// the cleartext buffer which the caller still owns\n\tciphertext := keyset.cipher.Seal(nonce, nonce, cleartext, additional)\n\treturn ciphertext, keyID, nil\n}",
"func encrypt(data []byte, passphrase string) []byte {\n\tblock, _ := aes.NewCipher([]byte(createHash(passphrase)))\n\tgcm, err := cipher.NewGCM(block)\n\tif err != nil {\n\t\tlog.Panicf(\"Encrypt Cipher: %v\", err)\n\t}\n\tnonce := make([]byte, gcm.NonceSize())\n\tif _, err = io.ReadFull(rand.Reader, nonce); err != nil {\n\t\tlog.Panicf(\"Nonce: %v\", err)\n\t}\n\tciphertext := gcm.Seal(nonce, nonce, data, nil)\n\treturn ciphertext\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
AssembleCiphertext computes the ciphertext of the message encrypted under the specified ID.
|
func AssembleCiphertext(ciphertext *DecomposedCiphertext, id ID) *hibe.Ciphertext {
thirdElement := new(bn256.G1).ScalarMult(ciphertext.D, big.NewInt(1))
var uriRelPos URIComponentPosition = 0
var timeRelPos TimeComponentPosition = 0
for pos, idComponent := range id {
var elem *bn256.G1
if idComponent.Type() == URIComponentType {
elem = ciphertext.URIComponentElement(uriRelPos, pos)
uriRelPos++
} else {
elem = ciphertext.TimeComponentElement(timeRelPos, pos)
timeRelPos++
}
thirdElement.Add(thirdElement, elem)
}
return &hibe.Ciphertext{
A: ciphertext.A,
B: ciphertext.B,
C: thirdElement,
}
}
|
[
"func NewCiphertext(params Parameters, degree, level int, scale float64) (ciphertext *Ciphertext) {\n\n\tciphertext = &Ciphertext{newElement(params, degree, level, scale)}\n\tciphertext.Element.Element.IsNTT = true\n\n\treturn ciphertext\n}",
"func EncryptMessage(clearText, aesKey, nxt []byte, sid, rid int) (msg *EncryptedMessage, err error) {\n // Create a random HMAC key\n hmacKey := make([]byte, hmacKeySize)\n if _, err := rand.Read(hmacKey); err != nil {\n return nil, err\n }\n\n // Add PKCS7 padding to clearText\n paddedClearText, err := PKCS7Pad(clearText, aes.BlockSize)\n if err != nil {\n return nil, err\n }\n\n // Add PKCS7 padding to next key\n nxt, err = PKCS7Pad(nxt, aes.BlockSize)\n if err != nil {\n return nil, err\n }\n\n // Create buffers for ciphertexts\n cipherText := make([]byte, len(paddedClearText))\n encryptedKey := make([]byte, len(hmacKey))\n encryptedNxt := make([]byte, len(nxt))\n\n // Create AES block cipher\n aesCipher, err := aes.NewCipher(aesKey)\n if err != nil {\n return nil, err\n }\n\n // Create a random initialization vector for AES encryption\n iv := make([]byte, aes.BlockSize)\n if _, err = rand.Read(iv); err != nil {\n return nil, err\n }\n\n // Encrypt data with CBC block encrypter\n cbc := cipher.NewCBCEncrypter(aesCipher, iv)\n cbc.CryptBlocks(cipherText, paddedClearText)\n\n // Encrypt hmac key with CBC block encrypter\n cbc = cipher.NewCBCEncrypter(aesCipher, iv)\n cbc.CryptBlocks(encryptedKey, hmacKey)\n\n // Encrypt nxt key with CBC block encrypter\n cbc = cipher.NewCBCEncrypter(aesCipher, iv)\n cbc.CryptBlocks(encryptedNxt, nxt)\n\n // Generate MAC tag for data\n mac := hmac.New(secureHash, hmacKey)\n tmp := make([]byte, 0, len(cipherText) + len(encryptedNxt))\n tmp = append(tmp, cipherText...)\n tmp = append(tmp, encryptedNxt...)\n mac.Write(tmp)\n tag := mac.Sum(nil)\n\n msg = &EncryptedMessage{\n Sid: sid,\n Rid: rid,\n Nxt: encryptedNxt,\n IV: iv,\n Msg: cipherText,\n Tag: tag,\n Key: encryptedKey,\n }\n\n return msg, err\n}",
"func AESEncrypt(kmsSvc kmsiface.KMSAPI, keyID, keyName, plaintext string) (string, error) {\n\tres, err := kmsSvc.GenerateDataKey(&kms.GenerateDataKeyInput{\n\t\tKeyId: aws.String(keyID),\n\t\tKeySpec: aws.String(\"AES_256\"),\n\t\tEncryptionContext: map[string]*string{\"keyName\": &keyName},\n\t})\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"kms.GenerateDataKey failed\")\n\t}\n\tdefer clearKey(res.Plaintext)\n\n\tblock, err := aes.NewCipher(res.Plaintext)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"aes.NewCipher failed\")\n\t}\n\taesgcm, err := cipher.NewGCM(block)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"cipher.NewGCM failed\")\n\t}\n\tnonce := make([]byte, aesgcm.NonceSize())\n\tif _, err = io.ReadFull(rand.Reader, nonce); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"ReadFull(rend.Reader,nonce) failed\")\n\t}\n\tbuf := &bytes.Buffer{}\n\tbase64w := base64.NewEncoder(base64.StdEncoding, buf)\n\tzwriter, err := zlib.NewWriterLevel(base64w, zlib.BestCompression)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"zlib.NewWriterLevel failed\")\n\t}\n\terr = gob.NewEncoder(zwriter).Encode(\n\t\tdata{\n\t\t\tCryptedKey: res.CiphertextBlob,\n\t\t\tEncrypted: aesgcm.Seal(nonce, nonce, []byte(plaintext), []byte(keyID)),\n\t\t},\n\t)\n\tzwriter.Close() // nolint errcheck\n\tbase64w.Close() // nolint errcheck\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"gob. encode failed\")\n\t}\n\treturn buf.String(), nil\n}",
"func Encrypt(plainText string, key []byte) ([]byte, error) {\n\t// use first 32byte if the key length is longer than 32byte.\n\tif len(key) > KeySize {\n\t\tkey = key[0:KeySize]\n\t}\n\n\taead, err := chacha20poly1305.NewX(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnonce := make([]byte, nonceSizeX)\n\tif _, err := rand.Read(nonce); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcipherText := aead.Seal(nil, nonce, []byte(plainText), nil)\n\tcipherText = append(nonce, cipherText...)\n\treturn cipherText, nil\n}",
"func EcbEncrypt(text []byte, key []byte) []byte {\n\n\tvar aesBlock, err = aes.NewCipher(key)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// pads the message to be a multiple of the block size\n\ttext = padding.PadMsg(text, aesBlock.BlockSize())\n\n\tcipher := make([]byte, 0, len(text))\n\n\t// encryption loop for each block\n\tfor i := 0; i < len(text); i += aesBlock.BlockSize() {\n\t\tcipherBlock := EcbEncryptBlock(text[i:i+aesBlock.BlockSize()], aesBlock)\n\t\tcipher = append(cipher, cipherBlock...)\n\t}\n\n\treturn cipher\n\n}",
"func aes_cbc_enc(text, encKey []byte) ([]byte, []byte) {\n // Get a random IV\n cipherBlock := make([]byte, 16)\n _, err := rand.Read(cipherBlock)\n check(err)\n // `cipherBlock` is a temp value used during calculation. `IV` is used to \n // store the initial seed\n IV := make([]byte, 16)\n copy(IV, cipherBlock)\n\n res := make([]byte, len(text))\n // get the AES cipher\n cipher, err := aes.NewCipher(encKey)\n check(err)\n // block by block calculation\n for i := 0; i < len(text) / 16; i++ {\n for j := 0; j < 16; j++ {\n text[i * 16 + j] ^= cipherBlock[j]\n }\n cipher.Encrypt(cipherBlock, text[i * 16 : i * 16 + 16])\n copy(res[i * 16 : i * 16 + 16], cipherBlock)\n }\n return IV, res\n}",
"func (c *Caesar) Encipher(text string) string {\n\treturn caesarEncipher(text, c.key)\n}",
"func encrypt(args []string) []byte {\n keyStr, inputFile := args[2], args[4]\n data, err := ioutil.ReadFile(inputFile)\n check(err)\n if len(data) % 2 != 0 {\n fmt.Println(\"Invalid plaintext file: octet representation only.\")\n os.Exit(1)\n }\n // read in and decode the hex formatted text file\n plaintext := make([]byte, hex.DecodedLen(len(data)))\n _, err = hex.Decode(plaintext, data)\n check(err)\n key := make([]byte, 32)\n _, err = hex.Decode(key, []byte(keyStr))\n // split key\n encKey, macKey := key[:16], key[16:]\n // calculate HMAC on M with `macKey` to get a tag\n hmacTag := hmac(plaintext, macKey)\n // append the tag to the original plaintext message\n plainTextWithTag := append(plaintext, hmacTag...)\n // do the PS padding\n paddedPlainTextWithTag := psPad(plainTextWithTag)\n // do AES CBC mode encryption to get a ciphertext. Return the IV meanwhile\n IV, cipherText := aes_cbc_enc(paddedPlainTextWithTag, encKey)\n // append the ciphertext with IV, and return\n return append(IV, cipherText...)\n}",
"func encryptCTR(key, plainText []byte) (cipherText []byte, err error){\n\tif len(key) != KEY_SIZE{\n\t\treturn nil, errors.New(\"encryption failed cause key does not match key length\")\n\t}\n\n \t//setup a new cipher with given key\n \tblock, err := aes.NewCipher(key)\n\n \tif err != nil{\n \tpanic(err)\n \t}\n\n \t//make our cipher and init with length of given plain text + aes block size\n \tcipherText = make([]byte,len(plainText)+AES_BLOCKSIZE)\n\n \t//random bytes but must be same as the block size\n \tinitVec := getRandomBytes(AES_BLOCKSIZE)\n \t//new ctr\n \tstream := cipher.NewCTR(block, initVec)\n\n \t//set init vec bytes to ciphetext\n \tfor i := 0; i < AES_BLOCKSIZE; i++{\n \t\tcipherText[i] = initVec[i]\n \t}\n\n \t//perform streaming xor\n \tstream.XORKeyStream(cipherText[AES_BLOCKSIZE:], plainText)\n\n \t//clean buffer\n \tflushBuffer(initVec)\n\n \treturn \n}",
"func (ring DecoderRing) Encrypt(plaintext string) ([]byte, error) {\n\tdataKeyResult, err := ring.kmsService.GenerateDataKey(&kms.GenerateDataKeyInput{\n\t\tKeyId: aws.String(ring.masterKeyID),\n\t\tKeySpec: aws.String(\"AES_128\"),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkeyPlaintext := dataKeyResult.Plaintext\n\tkeyCiphertext := dataKeyResult.CiphertextBlob\n\n\tmessagePlaintext := []byte(plaintext)\n\tblock, err := aes.NewCipher(keyPlaintext)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgcm, err := cipher.NewGCM(block)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnonce := make([]byte, gcm.NonceSize())\n\t_, err = io.ReadFull(rand.Reader, nonce)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmessageCiphertext := gcm.Seal(nonce, nonce, messagePlaintext, nil)\n\treturn append(keyCiphertext, messageCiphertext...), nil\n}",
"func Encrypt(plaintext string, key []byte, nonce []byte) []byte {\n\taesgcm := aesGcmCipher(key)\n\tciphertext := aesgcm.Seal(nil, nonce, []byte(plaintext), nil)\n\treturn ciphertext\n}",
"func Encrypt(key []byte, message string) ([]byte, error) {\n\tfor len(message)%aes.BlockSize != 0 {\n\t\tmessage = message + padding\n\t}\n\n\tplaintext := []byte(message)\n\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn nil, ErrCipherCreation\n\t}\n\n\tencryptedText := make([]byte, aes.BlockSize+len(plaintext))\n\tiv := encryptedText[:aes.BlockSize]\n\tif _, err := io.ReadFull(rand.Reader, iv); err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn nil, ErrCipherTextRead\n\t}\n\n\tmode := cipher.NewCBCEncrypter(block, iv)\n\tmode.CryptBlocks(encryptedText[aes.BlockSize:], plaintext)\n\n\treturn encryptedText, nil\n}",
"func aesEcrpt(key, content []byte) ([]byte, error) {\n\tutil.Debug(\"len(key): %d, len(content): %d\", len(key), len(content))\n\tvar encrypted []byte = make([]byte, 0, len(content))\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, merr.InvalidErr(err, \"key: %s\", string(key))\n\t}\n\t/*\n\t func NewCBCDecrypter(b Block, iv []byte) BlockMode\n\t func NewCBCEncrypter(b Block, iv []byte) BlockMode\n\t*/\n\tencrypter := cipher.NewCBCEncrypter(block, key)\n\tutil.Debug(\"block size: %d\", encrypter.BlockSize())\n\tencrypter.CryptBlocks(encrypted, content)\n\tutil.Debug(\"encrypted: %d\\n%s\\ncontent: %d\\n%s\", len(encrypted), encrypted, len(content), content)\n\tencrypter.CryptBlocks(content, content)\n\tutil.Debug(\"content: %d\\n%s\", len(content), content)\n\treturn encrypted, nil\n}",
"func (ca *ChaCha20Poly1305) Encrypt(plaintext []byte, associatedData []byte) ([]byte, error) {\n\tnonce := random.GetRandomBytes(chacha20poly1305.NonceSize)\n\tct, err := ca.chaCha20Poly1305InsecureNonce.Encrypt(nonce, plaintext, associatedData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn append(nonce, ct...), nil\n}",
"func (r *RC4A) Encipher(text string) string {\n\tn := 256\n\ts1 := rc4KSA(r.key, n)\n\ts2 := rc4KSA(r.key, n)\n\ti := 0\n\tj1 := 0\n\tj2 := 0\n\tres := []byte(text)\n\tfor y := 0; y < len(text); y++ {\n\t\ti = (i + 1) % n\n\t\tj1 = (j1 + s1[i]) % n\n\t\ts1[i], s1[j1] = s1[j1], s1[i]\n\t\tres[y] ^= byte(s2[(s1[i]+s1[j1])%n])\n\t\ty++\n\t\tj2 = (j2 + s2[i]) % n\n\t\ts2[i], s2[j2] = s2[j2], s2[i]\n\t\tres[y] ^= byte(s1[(s2[i]+s2[j2])%n])\n\t}\n\treturn string(res)\n}",
"func (o GetCaCertificatesCertificateOutput) CaCertificateId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetCaCertificatesCertificate) string { return v.CaCertificateId }).(pulumi.StringOutput)\n}",
"func caesarCipher(s string, k int32) string {\n\n\tvar builder strings.Builder\n\n\tfor _, b := range s {\n\t\terr := builder.WriteByte(cipher(byte(b), int(k)))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\treturn builder.String()\n\n}",
"func decryptCTR(key, cipherText []byte) (plainText []byte, err error){\n\t//check key length\n\tif len(key) != KEY_SIZE{\n\t\treturn nil, errors.New(\"encryption failed cause key does not match key length\")\n\t}\n\n\t//check to see if ciphher text is not too short\n\tif len(cipherText) <= AES_BLOCKSIZE{\n\t\tfmt.Println(\"ciphertext is shorter than AES_BLOCKSIZE\")\n\t\treturn \n\t}\n\n\t//setup a new cipher with given key\n \tblock, err := aes.NewCipher(key)\n\n \tif err != nil {return}\n\n \t//parseout init vec\n \tinitVec := cipherText[:AES_BLOCKSIZE]\n \t//clean hmac out only initvec + message is left\n \tcipherText = cipherText[AES_BLOCKSIZE:]\n\n \t//new ctr\n \tstream := cipher.NewCTR(block, initVec)\n\n \t//make our plaintext with cipher text length\n \tplainText = make([]byte, len(cipherText))\n \tstream.XORKeyStream(plainText, cipherText)\n\n \treturn\n}",
"func (a *API) Wrap(ctx context.Context, id string, plainText []byte, additionalAuthData *[]string) ([]byte, error) {\n\tkeysAction, err := a.wrapIt(ctx, id, plainText, additionalAuthData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ([]byte)(keysAction.CipherText), nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
DecryptDecomposed decrypts the ciphertext and returns the plaintext element of GT.
|
func DecryptDecomposed(ciphertext *DecomposedCiphertext, id ID, key *hibe.PrivateKey) *bn256.GT {
hibeCiphertext := AssembleCiphertext(ciphertext, id)
if hibeCiphertext == nil {
return nil
}
return hibe.Decrypt(key, hibeCiphertext)
}
|
[
"func (tsc *TSC) Decrypt() *TSC {\n\treturn tsc.convert(Decryption)\n}",
"func (a ADFGX) Decrypt(ciphertext, key string) (string, error) {\n\tinput := runes.Clean(ciphertext)\n\tk1, k2, err := a.parseKeys(key)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttb := newTranspositionBlock(k2)\n\tunfranctionated := tb.detranspose(input)\n\n\tif len(unfranctionated)%2 != 0 {\n\t\treturn \"\", errors.New(\"invalid ciphertext length\")\n\t}\n\n\t// convert pairs of letters into coordinates in keyblock\n\tkb := newKeyblock(k1)\n\tvar out strings.Builder\n\tfor i := 0; i < len(unfranctionated); i += 2 {\n\t\trow, found := adfgxReverse[unfranctionated[i]]\n\t\tcol, found := adfgxReverse[unfranctionated[i+1]]\n\t\tif !found {\n\t\t\treturn \"\", errors.New(\"invalid ciphertext\")\n\t\t}\n\t\tr, err := kb.getValue(location{row, col})\n\t\tif err != nil {\n\t\t\treturn \"\", errors.New(\"invalid ciphertext\")\n\t\t}\n\t\tout.WriteRune(r)\n\t}\n\n\treturn out.String(), nil\n}",
"func EncryptDecomposed(message *bn256.GT, params *hibe.Params, uriPath ID, timePath ID) *DecomposedCiphertext {\n\tif params.Pairing == nil {\n\t\tpanic(\"Pairing must be Precached before calling EncryptDecomposed()\")\n\t}\n\n\t// Randomly choose s in Zp\n\ts, err := rand.Int(rand.Reader, bn256.Order)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tciphertext := new(DecomposedCiphertext)\n\n\tciphertext.A = new(bn256.GT)\n\tciphertext.A.ScalarMult(params.Pairing, s)\n\tciphertext.A.Add(ciphertext.A, message)\n\n\tciphertext.B = new(bn256.G2).ScalarMult(params.G, s)\n\n\tciphertext.D = new(bn256.G1).ScalarMult(params.G3, s)\n\n\tm := len(uriPath)\n\tn := len(timePath)\n\thCacheSize := m + n\n\ttableSize := m + n + ((m * n) << 1)\n\n\turiHashed := uriPath.HashToZp()\n\ttimeHashed := timePath.HashToZp()\n\n\tciphertext.NumURIComponents = uint8(m)\n\tciphertext.NumTimeComponents = uint8(n)\n\n\tciphertext.Table = make([]*bn256.G1, tableSize, tableSize)\n\n\thCache := make([]*bn256.G1, m+n)\n\tfor i := 0; i != hCacheSize; i++ {\n\t\thCache[i] = new(bn256.G1).ScalarMult(params.H[i], s)\n\t}\n\n\tfor j, uriComponentHash := range uriHashed {\n\t\t// pos varies over all positions in the final ID at which this URI\n\t\t// component could be.\n\t\tfor pos := j; pos != j+n+1; pos++ {\n\t\t\tindex := URIComponentIndex(m, n, j, pos)\n\t\t\tciphertext.Table[index] = new(bn256.G1)\n\t\t\tciphertext.Table[index].ScalarMult(hCache[pos], uriComponentHash)\n\t\t}\n\t}\n\n\tfor j, timeComponentHash := range timeHashed {\n\t\t// pos varies over all positions in the final ID at which this time\n\t\t// component could be.\n\t\tfor pos := j; pos != j+m+1; pos++ {\n\t\t\tindex := TimeComponentIndex(m, n, j, pos)\n\t\t\tciphertext.Table[index] = new(bn256.G1)\n\t\t\tciphertext.Table[index].ScalarMult(hCache[pos], timeComponentHash)\n\t\t}\n\t}\n\n\treturn ciphertext\n}",
"func Decipher(cText string) (string, string, float64) {\n\tctxt, _ := hex.DecodeString(cText)\n\tplaintext := make([]byte, len(ctxt))\n\ttxt := \"\"\n\thigh := 0.0\n\tkey := \"\"\n\n\t//we loop around all possible ASCII characters as one of these character\n\t//was used as the key.\n\tfor i := 0; i < 256; i++ {\n\n\t\tfor k := 0; k < len(ctxt); k++ {\n\t\t\tplaintext[k] = byte(ctxt[k] ^ byte(i))\n\t\t}\n\n\t\tscore := getScore(plaintext)\n\n\t\t//a low score means that the deciphered plaintext is the closest to our expected(english text)\n\t\tif score > high {\n\t\t\ttxt = string(plaintext)\n\t\t\thigh = score\n\t\t\tkey = string(byte(i))\n\t\t}\n\t}\n\n\treturn txt, key, high\n}",
"func Decrypt(priv PrivateKey, K, C twistededwards.PointAffine) (msg big.Int) {\n\n\tvar M, S twistededwards.PointAffine\n\tvar bScalar big.Int\n\tbScalar.SetBytes(priv.scalar[:])\n\n\t// ElGamal-decrypt the ciphertext (K,C) to reproduce the message.\n\tS.ScalarMul(&K, &bScalar)\n\tS.Neg(&S)\n\tM.Add(&C, &S)\n\n\tmsg = MessageMap[M]\n\treturn\n}",
"func Decrypt(data []byte, passphrase string) ([]byte, error) {\n\tif len(data) == 0 || len(passphrase) == 0 {\n\t\treturn data, errors.New(\"Length of data is zero, can't decrpyt!\")\n\t}\n\tkey := []byte(SHA3hash(passphrase)[96:128]) // last 32 characters in hash\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn data, errors.Wrap(err, \"Error while initializing new cipher\")\n\t}\n\tgcm, err := cipher.NewGCM(block)\n\tif err != nil {\n\t\treturn data, errors.Wrap(err, \"failed to initialize new gcm block\")\n\t}\n\tnonceSize := gcm.NonceSize()\n\tnonce, ciphertext := data[:nonceSize], data[nonceSize:]\n\tplaintext, err := gcm.Open(nil, nonce, ciphertext, nil)\n\tif err != nil {\n\t\treturn plaintext, errors.Wrap(err, \"Error while opening gcm mode\")\n\t}\n\treturn plaintext, nil\n}",
"func decrypt(data []byte, passphrase string) []byte {\n\tkey := []byte(createHash(passphrase))\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\tlog.Panicf(\"Get Cipher: %v\", err)\n\t}\n\tgcm, err := cipher.NewGCM(block)\n\tif err != nil {\n\t\tlog.Panicf(\"Decrypt Cipher: %v\", err)\n\t}\n\tnonceSize := gcm.NonceSize()\n\tnonce, ciphertext := data[:nonceSize], data[nonceSize:]\n\tplaintext, err := gcm.Open(nil, nonce, ciphertext, nil)\n\tif err != nil {\n\t\tlog.Panicf(\"Decrypt Message: %v\", err)\n\t}\n\treturn plaintext\n}",
"func (n *noLockAEAD) Decrypt(ciphertext, additionalData []byte) ([]byte, error) {\n\treturn ciphertext, nil\n}",
"func Decrypt(privateKey *rsa.PrivateKey, cipherText, label []byte) ([]byte, error) {\n\tplainText, err := rsa.DecryptOAEP(sha256.New(), rand.Reader, privateKey, cipherText, label)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn plainText, nil\n}",
"func decrypt(aead tink.AEAD, enc []byte, msg proto.Message, context string) error {\n\tblob, err := aead.Decrypt(enc, []byte(context))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn proto.Unmarshal(blob, msg)\n}",
"func (c *Crypter) Decrypt(ctx context.Context, ciphertext []byte) ([]byte, error) {\n\treq := &kmspb.DecryptRequest{\n\t\tName: c.keyID.String(),\n\t\tCiphertext: ciphertext,\n\t}\n\tresp, err := c.client.Decrypt(ctx, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.GetPlaintext(), nil\n}",
"func Decrypt(cipher string, privateKey string) (string, error) {\n\n\tprivateKeyFile, err := writeToTemp(privateKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer deleteTempFile(privateKeyFile.Name())\n\n\tsamlXmlsecInput, err := writeToTemp(cipher)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer deleteTempFile(samlXmlsecInput.Name())\n\n\tsamlXmlsecOutput, err := ioutil.TempFile(os.TempDir(), \"tmpgs\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer deleteTempFile(samlXmlsecOutput.Name())\n\tsamlXmlsecOutput.Close()\n\n\toutput, err := exec.Command(\"xmlsec1\", \"--decrypt\", \"--privkey-pem\", privateKeyFile.Name(), \"--id-attr:ID\", \"http://www.w3.org/2001/04/xmlenc#EncryptedData\",\n\t\t\"--output\", samlXmlsecOutput.Name(), samlXmlsecInput.Name()).CombinedOutput()\n\tif err != nil {\n\t\treturn \"\", errors.New(err.Error() + \" : \" + string(output))\n\t}\n\n\tdecrypted, err := ioutil.ReadFile(samlXmlsecOutput.Name())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdecryptedXML := strings.Trim(string(decrypted), \"\\n\")\n\treturn decryptedXML, nil\n}",
"func (g *Generator) Decompose(id string) (map[string]uint64, error) {\n\tb, err := g.enc.Decode(id)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"indigo: failed to decode, id = %s: %w\", id, err)\n\t}\n\n\treturn sonyflake.Decompose(b), nil\n}",
"func Decrypt(passphrase string, ciphertext []byte) (string, error) {\n\tvar salt []byte\n\tvar iv []byte\n\tvar data []byte\n\n\tversionN, err := models.FileVersion(string(ciphertext))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tversion, ok := models.FileVersions[versionN]\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"Invalid version number: %d\", versionN)\n\t}\n\n\tfile, err := version.Parse(string(ciphertext))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif file.Encoding == models.Base64EncodingPrefix {\n\t\tvar err error\n\t\tsalt, iv, data, err = decodeBase64(passphrase, file.Ciphertext)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t} else {\n\t\tvar err error\n\t\tsalt, iv, data, err = decodeHex(passphrase, file.Ciphertext)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tbefore := time.Now()\n\tkey, _, err := deriveKey(passphrase, salt, file.NumRounds)\n\tafter := time.Now()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tutils.LogDebug(fmt.Sprintf(\"PBKDF2 key derivation used %d rounds and took %d ms\", file.NumRounds, after.Sub(before).Milliseconds()))\n\n\tb, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\taesgcm, err := cipher.NewGCM(b)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdata, err = aesgcm.Open(nil, iv, data, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(data), nil\n}",
"func Decrypt(msg []byte, sk ed25519.PrivateKey) ([]byte, error) {\n\tcurvePriv := PrivateKeyToCurve25519(sk)\n\tcurvePub := PublicKeyToCurve25519(sk.Public().(ed25519.PublicKey))\n\n\tif len(msg) < 48 {\n\t\treturn nil, fmt.Errorf(\"invalid cipher text too short\")\n\t}\n\n\treturn box.Open(msg, &curvePub, &curvePriv)\n}",
"func decrypt(bufs [][]byte, score func([]byte) int) error {\n\tkeystream, err := breakCTR(bufs, score)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, buf := range bufs {\n\t\tn := XORBytes(buf, buf, keystream)\n\t\tfmt.Println(string(buf[:n]))\n\t}\n\treturn nil\n}",
"func Decrypt(secretKey string, encrypted string) (string, error) {\n\tvar decryptNonce [24]byte\n\tvar secretKeyBytes [32]byte\n\tcopy(secretKeyBytes[:], secretKey)\n\tcopy(decryptNonce[:], encrypted[:24])\n\tdecrypted, ok := secretbox.Open(nil, []byte(encrypted[24:]), &decryptNonce, &secretKeyBytes)\n\tif !ok {\n\t\terr := errors.New(\"Unable to decrypt string\")\n\t\tlog.Println(err)\n\t\treturn \"\", err\n\t}\n\treturn string(decrypted), nil\n}",
"func (cracker *CaesarCracker) Decrypt(options SolveOptions, reply *ReplyData) error {\n\tvar plaintext []byte\n\tvar usedKey string\n\tvar err error\n\tif options.Key == \"\" {\n\t\tplaintext, usedKey, err = cracker.Crack(options.CT)\n\t} else {\n\t\tusedKey = options.Key\n\t\tplaintext, err = cracker.Solve(options.CT, options.Key)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\t*reply = ReplyData{\n\t\tKey: usedKey,\n\t\tPlainText: plaintext,\n\t}\n\treturn nil\n}",
"func (t *RPC) Decrypt(offset uint64, path string, key []byte, masterPid int) (string, error) {\n\targuments := &args.CryptArgs{\n\t\tOffset: offset,\n\t\tLoopdev: path,\n\t\tKey: key,\n\t\tMasterPid: masterPid,\n\t}\n\n\tvar reply string\n\terr := t.Client.Call(t.Name+\".Decrypt\", arguments, &reply)\n\n\treturn reply, err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test_Connection_useBeforeOpen tests if we can connect to the db, a.k.a., if the underlying driver works.
|
func Test_Connection_useBeforeOpen(t *testing.T) {
t.Parallel()
its := assert.New(t)
conn, err := New()
its.Nil(err)
tx, err := conn.Begin()
its.NotNil(err)
its.True(ex.Is(ErrConnectionClosed, err))
its.Nil(tx)
inv := conn.Invoke()
its.Nil(inv.DB)
its.True(inv.DB == nil)
any, err := conn.Query("select 1").Any()
its.NotNil(err)
its.True(ex.Is(ErrConnectionClosed, err), err.Error())
its.False(any)
}
|
[
"func TestOpenDB(t *testing.T) {\n\t_, err := OpenDB() // Open db\n\n\tif err != nil { // Check for errors\n\t\tt.Fatal(err) // Panic\n\t}\n}",
"func (t *TestStore) MustOpen() {\n\tif err := t.Open(DATASOURCETEST); err != nil {\n\t\tpanic(err)\n\t}\n\tif err := t.DB.Ping(); err != nil {\n\t\tpanic(err)\n\t}\n}",
"func TestOpenDB(t *testing.T) {\n\tOpenDB(TestDBName)\n}",
"func ensureConnection(ctx context.Context) error {\n\tc := db.Config{}\n\terr := testutil.ResolveDBConfig(ctx, &c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpool, err := db.New(db.OptConfig(c))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = testutil.ValidatePool(ctx, pool, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn pool.Close()\n}",
"func MustConnect(driverName, dataSourceName string) *DB {\n db, err := Connect(driverName, dataSourceName)\n if err != nil {\n panic(err)\n }\n return db\n}",
"func TestConnectionSanityCheck(t *testing.T) {\n\tassert := assert.New(t)\n\n\tconn, err := OpenTestConnection()\n\tassert.Nil(err)\n\tstr := conn.Config.CreateDSN()\n\t_, err = sql.Open(\"pgx\", str)\n\tassert.Nil(err)\n}",
"func OpenTestMysqlDatabaseConnection() (db *gorm.DB) {\n\terrMsgms := []string{}\n\n\tvar username = os.Getenv(\"dbUsername\")\n\tvar password = os.Getenv(\"dbPassword\")\n\tvar dbHost = os.Getenv(\"dbHost\")\n\tvar dbSchema = os.Getenv(\"dbSchema\")\n\tvar dbPort = os.Getenv(\"dbPort\")\n\n\tif len(username) == 0 {\n\t\terrMsgms = append(errMsgms, \"key : dbUsername not set on env envirotnment\")\n\t}\n\tif len(password) == 0 {\n\t\terrMsgms = append(errMsgms, \"key : dbPassword not set on env envirotnment\")\n\t}\n\tif len(dbHost) == 0 {\n\t\terrMsgms = append(errMsgms, \"key : dbHost not set on env envirotnment\")\n\t}\n\tif len(dbSchema) == 0 {\n\t\terrMsgms = append(errMsgms, \"key : dbSchema not set on env envirotnment\")\n\t}\n\tif len(dbPort) == 0 {\n\t\terrMsgms = append(errMsgms, \"key : dbPort not set on env envirotnment\")\n\t}\n\tif len(errMsgms) > 0 {\n\t\tpanic(\"Database parameter not found : \\n\" + strings.Join(errMsgms, \"\\n\"))\n\t}\n\tconQuery := username + \":\" + password\n\tconQuery = conQuery + \"@tcp(\" + dbHost + \":\" + dbPort + \")/\" + dbSchema + \"?charset=utf8&parseTime=True&loc=Local\"\n\tdb, err := gorm.Open(\"mysql\", conQuery)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn db\n}",
"func TestConnection() bool {\n\tconnectDB()\n\tdefer disconnectDB()\n\n\tquery := `SELECT * FROM pg_catalog.pg_tables`\n\t_, err := db.Query(query)\n\tlog.Println(err)\n\n\treturn err == nil\n}",
"func (con *Con) checkIsOpen() error {\n\tif !con.IsOpen() {\n\t\treturn driver.ErrBadConn\n\t}\n\treturn nil\n}",
"func openDB(driver, config string) *sql.DB {\n\tdb, err := sql.Open(driver, config)\n\tif err != nil {\n\t\tlog.Fatalln(\"database connection failed:\", err)\n\t}\n\tif driver == \"mysql\" {\n\t\t// per issue https://github.com/go-sql-driver/mysql/issues/257\n\t\tdb.SetMaxIdleConns(0)\n\t}\n\tif err := pingDatabase(db); err != nil {\n\t\tlog.Fatalln(\"database ping attempts failed:\", err)\n\t}\n\treturn db\n}",
"func ConnectLow(cfg *config.AppConfig, nodb bool) *sql.DB {\n\tif cfg.StateConnectURL != \"\" {\n\t\tcs := cfg.StateConnectURL\n\t\t/* url.Parse requires scheme in the beginning of the URL, just prepend\n\t\t* with random scheme if it wasn't in the config file URL */\n\t\tif !strings.Contains(cfg.StateConnectURL, \"://\") {\n\t\t\tcs = \"dsn://\" + cfg.StateConnectURL\n\t\t}\n\t\tu, err := url.Parse(cs)\n\t\tif log.E(err) {\n\t\t\treturn nil\n\t\t}\n\t\tvar host, port string = u.Host, \"\"\n\t\tif strings.Contains(u.Host, \":\") {\n\t\t\thost, port, _ = net.SplitHostPort(u.Host)\n\t\t}\n\t\tif u.User.Username() == \"\" || host == \"\" {\n\t\t\tlog.Errorf(\"Host and username required in DB db URL\")\n\t\t\treturn nil\n\t\t}\n\t\tif port == \"\" {\n\t\t\tport = \"3306\"\n\t\t}\n\t\tuport, err := strconv.ParseUint(port, 10, 16)\n\t\tif log.E(err) {\n\t\t\treturn nil\n\t\t}\n\t\tpwd, _ := u.User.Password()\n\t\tdbAddr = &db.Addr{Host: host, Port: uint16(uport), User: u.User.Username(), Pwd: pwd, Db: types.MyDbName}\n\t} else {\n\t\tdbAddr = db.GetInfo(&db.Loc{Service: types.MySvcName, Name: types.MyDbName}, db.Master)\n\t\tif dbAddr == nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\tif nodb {\n\t\tdbAddr.Db = \"\"\n\t}\n\tcn, err := db.Open(dbAddr)\n\tif err == nil {\n\t\treturn cn\n\t}\n\treturn nil\n}",
"func ConnectToTestDB() *reform.DB {\n\tdriver := strings.TrimSpace(os.Getenv(\"REFORM_TEST_DRIVER\"))\n\tsource := strings.TrimSpace(os.Getenv(\"REFORM_TEST_SOURCE\"))\n\tif driver == \"\" || source == \"\" {\n\t\tlog.Fatal(\"no driver or source, set REFORM_TEST_DRIVER and REFORM_TEST_SOURCE\")\n\t}\n\n\t// register custom function \"sleep\" for context tests\n\tif driver == \"sqlite3\" {\n\t\tdriver = \"sqlite3_with_sleep\"\n\n\t\tsqlite3RegisterOnce.Do(func() {\n\t\t\tsleep := func(nsec int64) (int64, error) {\n\t\t\t\ttime.Sleep(time.Duration(nsec))\n\t\t\t\treturn nsec, nil\n\t\t\t}\n\t\t\tsql.Register(driver, &sqlite3Driver.SQLiteDriver{\n\t\t\t\tConnectHook: func(conn *sqlite3Driver.SQLiteConn) error {\n\t\t\t\t\treturn conn.RegisterFunc(\"sleep\", sleep, false)\n\t\t\t\t},\n\t\t\t})\n\t\t})\n\t}\n\n\tdb, err := sql.Open(driver, source)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Use single connection so various session-related variables work.\n\t// For example: \"PRAGMA foreign_keys\" for SQLite3, \"SET IDENTITY_INSERT\" for MS SQL, etc.\n\tdb.SetMaxIdleConns(1)\n\tdb.SetMaxOpenConns(1)\n\tdb.SetConnMaxLifetime(0)\n\n\tif err = db.Ping(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tnow := time.Now()\n\n\t// select dialect for driver\n\tdialect := dialects.ForDriver(driver)\n\tswitch dialect {\n\tcase postgresql.Dialect:\n\t\tinspectOnce.Do(func() {\n\t\t\tlog.Printf(\"driver = %q, source = %q\", driver, source)\n\n\t\t\tlog.Printf(\"time.Now() = %s\", now)\n\t\t\tlog.Printf(\"time.Now().UTC() = %s\", now.UTC())\n\n\t\t\tvar version, tz string\n\t\t\tif err = db.QueryRow(\"SHOW server_version\").Scan(&version); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tif err = db.QueryRow(\"SHOW TimeZone\").Scan(&tz); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tlog.Printf(\"PostgreSQL version = %q\", version)\n\t\t\tlog.Printf(\"PostgreSQL TimeZone = %q\", tz)\n\t\t})\n\n\tcase mysql.Dialect:\n\t\tinspectOnce.Do(func() {\n\t\t\tlog.Printf(\"driver = %q, source = %q\", driver, source)\n\n\t\t\tlog.Printf(\"time.Now() = %s\", now)\n\t\t\tlog.Printf(\"time.Now().UTC() = %s\", now.UTC())\n\n\t\t\tq := \"SELECT @@version, @@sql_mode, @@autocommit, @@time_zone\"\n\t\t\tvar version, mode, autocommit, tz string\n\t\t\tif err = db.QueryRow(q).Scan(&version, &mode, &autocommit, &tz); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tlog.Printf(\"MySQL version = %q\", version)\n\t\t\tlog.Printf(\"MySQL sql_mode = %q\", mode)\n\t\t\tlog.Printf(\"MySQL autocommit = %q\", autocommit)\n\t\t\tlog.Printf(\"MySQL time_zone = %q\", tz)\n\t\t})\n\n\tcase sqlite3.Dialect:\n\t\tif _, err = db.Exec(\"PRAGMA foreign_keys = ON\"); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tinspectOnce.Do(func() {\n\t\t\tlog.Printf(\"driver = %q, source = %q\", driver, source)\n\n\t\t\tlog.Printf(\"time.Now() = %s\", now)\n\t\t\tlog.Printf(\"time.Now().UTC() = %s\", now.UTC())\n\n\t\t\tvar version, sourceID string\n\t\t\tif err = db.QueryRow(\"SELECT sqlite_version(), sqlite_source_id()\").Scan(&version, &sourceID); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tlog.Printf(\"SQLite3 version = %q\", version)\n\t\t\tlog.Printf(\"SQLite3 source = %q\", sourceID)\n\t\t})\n\n\tcase mssql.Dialect: //nolint:staticcheck\n\t\tfallthrough\n\tcase sqlserver.Dialect:\n\t\tinspectOnce.Do(func() {\n\t\t\tlog.Printf(\"driver = %q, source = %q\", driver, source)\n\n\t\t\tlog.Printf(\"time.Now() = %s\", now)\n\t\t\tlog.Printf(\"time.Now().UTC() = %s\", now.UTC())\n\n\t\t\tvar version string\n\t\t\tvar options uint16\n\t\t\tif err = db.QueryRow(\"SELECT @@VERSION, @@OPTIONS\").Scan(&version, &options); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\txact := \"ON\"\n\t\t\tif options&0x4000 == 0 {\n\t\t\t\txact = \"OFF\"\n\t\t\t}\n\t\t\tlog.Printf(\"MS SQL VERSION = %s\", version)\n\t\t\tlog.Printf(\"MS SQL OPTIONS = %#4x (XACT_ABORT %s)\", options, xact)\n\t\t})\n\n\tdefault:\n\t\tlog.Fatalf(\"reform: no dialect for driver %s\", driver)\n\t}\n\n\treturn reform.NewDB(db, dialect, nil)\n}",
"func Connect(cfg *config.AppConfig) bool {\n\tconn = ConnectLow(cfg, false)\n\tif conn != nil {\n\t\tlog.Debugf(\"Initialized and connected to state DB\")\n\t\treturn true\n\t}\n\treturn false\n}",
"func TestDbconn(t *testing.T) {\n\t_, err := Dbconn()\n\tif err != nil {\n\t\tt.Errorf(\"Failed to connect to DB: %s\", err)\n\t}\n}",
"func openDBConnection(user, password, host string, port int, serviceName string) (db *sql.DB, err error) {\n\n\tdb, err = sql.Open(\"mysql\", fmt.Sprintf(\"%s:%s@tcp(%s:%d)/\", user, password, host, port))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error opening db %s: %s\", serviceName, err.Error())\n\t}\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to ping db %s: %s\", serviceName, err.Error())\n\t}\n\n\tdb.SetMaxOpenConns(5)\n\tdb.SetMaxIdleConns(5)\n\tdb.SetConnMaxLifetime(time.Minute * 15)\n\n\treturn\n}",
"func MustOpen() *sql.DB {\n\tdb, err := Open()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn db\n}",
"func TestClosesNewConnIfNotUsed(t *testing.T) {\n\tt.Parallel()\n\n\tp := newConnectionPool()\n\tconn := &connection{\n\t\topeningDelay: 1 * time.Second,\n\t}\n\tp.newConnection = func() Connection { return conn }\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tp.getConnection(56)\n\t\tclose(done)\n\t}()\n\n\tp.onNewRemoteConnection(56, &connection{isOpen: true})\n\n\tselect {\n\tcase <-done:\n\t\tassert.False(t, conn.isOpen)\n\tcase <-time.After(2 * time.Second):\n\t\tt.Fail()\n\t}\n}",
"func TestOCSPFailOpen(t *testing.T) {\n\tcleanup()\n\tdefer cleanup()\n\n\tconfig := &Config{\n\t\tAccount: \"fakeaccount1\",\n\t\tUser: \"fakeuser\",\n\t\tPassword: \"fakepassword\",\n\t\tLoginTimeout: 10 * time.Second,\n\t\tOCSPFailOpen: OCSPFailOpenTrue,\n\t}\n\tvar db *sql.DB\n\tvar err error\n\tvar testURL string\n\ttestURL, err = DSN(config)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to build URL from Config: %v\", config)\n\t}\n\n\tif db, err = sql.Open(\"snowflake\", testURL); err != nil {\n\t\tt.Fatalf(\"failed to open db. %v, err: %v\", testURL, err)\n\t}\n\tdefer db.Close()\n\tif err = db.Ping(); err == nil {\n\t\tt.Fatalf(\"should fail to ping. %v\", testURL)\n\t}\n\tdriverErr, ok := err.(*SnowflakeError)\n\tif !ok {\n\t\tt.Fatalf(\"failed to extract error SnowflakeError: %v\", err)\n\t}\n\tif driverErr.Number != ErrCodeFailedToConnect {\n\t\tt.Fatalf(\"should failed to connect %v\", err)\n\t}\n}",
"func TestConnIsValid(t *testing.T) {\n\tdb := newTestDB(t, \"people\")\n\tdefer closeDB(t, db)\n\n\tdb.SetMaxOpenConns(1)\n\n\tctx := context.Background()\n\n\tc, err := db.Conn(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = c.Raw(func(raw any) error {\n\t\tdc := raw.(*fakeConn)\n\t\tdc.stickyBad = true\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tc.Close()\n\n\tif len(db.freeConn) > 0 && db.freeConn[0].ci.(*fakeConn).stickyBad {\n\t\tt.Fatal(\"bad connection returned to pool; expected bad connection to be discarded\")\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TestConnectionSanityCheck tests if we can connect to the db, a.k.a., if the underlying driver works.
|
func TestConnectionSanityCheck(t *testing.T) {
assert := assert.New(t)
conn, err := OpenTestConnection()
assert.Nil(err)
str := conn.Config.CreateDSN()
_, err = sql.Open("pgx", str)
assert.Nil(err)
}
|
[
"func ensureConnection(ctx context.Context) error {\n\tc := db.Config{}\n\terr := testutil.ResolveDBConfig(ctx, &c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpool, err := db.New(db.OptConfig(c))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = testutil.ValidatePool(ctx, pool, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn pool.Close()\n}",
"func TestConnection() bool {\n\tconnectDB()\n\tdefer disconnectDB()\n\n\tquery := `SELECT * FROM pg_catalog.pg_tables`\n\t_, err := db.Query(query)\n\tlog.Println(err)\n\n\treturn err == nil\n}",
"func TestDbconn(t *testing.T) {\n\t_, err := Dbconn()\n\tif err != nil {\n\t\tt.Errorf(\"Failed to connect to DB: %s\", err)\n\t}\n}",
"func TestConnIsValid(t *testing.T) {\n\tdb := newTestDB(t, \"people\")\n\tdefer closeDB(t, db)\n\n\tdb.SetMaxOpenConns(1)\n\n\tctx := context.Background()\n\n\tc, err := db.Conn(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = c.Raw(func(raw any) error {\n\t\tdc := raw.(*fakeConn)\n\t\tdc.stickyBad = true\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tc.Close()\n\n\tif len(db.freeConn) > 0 && db.freeConn[0].ci.(*fakeConn).stickyBad {\n\t\tt.Fatal(\"bad connection returned to pool; expected bad connection to be discarded\")\n\t}\n}",
"func CheckSuccessConnection(err error) {\n\tif err != nil {\n\t\tpanic(err.Error())\n\t} else {\n\t\tfmt.Println(\"Success! Connected to database \", DbName)\n\t}\n}",
"func CheckDatabaseConnection() error {\n\te := db.GetEngine(db.DefaultContext)\n\t_, err := e.Exec(\"SELECT 1\")\n\treturn err\n}",
"func checkConn() {\n\tconnStr := getConnStr()\n\n\tdb, err := sql.Open(\"postgres\", connStr)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer db.Close()\n\n\terr = db.Ping()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Printf(\"\\nSuccessfully connected to the database!\\n\")\n}",
"func CheckConnection() bool {\n\tif Mongo == nil {\n\t\tConnect()\n\t}\n\tif Mongo != nil {\n\t\treturn true\n\t}\n\treturn false\n}",
"func testConnectFail(t *testing.T) {\n\tdb, err := t38c.Connect(\"fakehost\", \"9999\", 1)\n\tif err == nil {\n\t\ttFatalNoErr(t, \"Connect\")\n\t}\n\n\tif !strings.HasPrefix(err.Error(), \"error connecting to server:\") {\n\t\ttErrorStr(t, \"Connect\", \"error connecting to servier\", err)\n\t}\n\n\tif db != nil {\n\t\ttErrorStr(t, \"DB\", \"nil\", \"not nil\")\n\t}\n}",
"func CheckConnecion(db *pg.DB) (bool, error) {\n\t_, err := db.Exec(\"SELECT 1\")\n\tif err != nil {\n\t\tlog.Printf(\"Conexion a base de datos invalida, Revisar: %v\\n\", err)\n\t\treturn false, err\n\t}\n\tlog.Println(\"Conexion a base de datos valida\")\n\treturn true, err\n\n}",
"func testlabConnectDb() (*sql.DB, error) {\n\tuserDb := \"morty\"\n\tpwDb := \"True-cube1\"\n\tconnDb := \"192.168.181.121:3306\"\n\tschemaDb := \"testlab\"\n\n\tdb, err := mySQLConnect(userDb, pwDb, connDb, schemaDb)\n\tcheckErr(err)\n\treturn db, err\n}",
"func (s *ScyllaDB) CheckConnection() derrors.Error {\n\tif s.Session == nil {\n\t\treturn derrors.NewGenericError(\"Session not created\")\n\t}\n\treturn nil\n}",
"func IsValidConnForTest(_ *Loc, _ ConnectionType, _ *Addr, _ string) bool {\n\treturn true\n}",
"func MustConnect(driverName, dataSourceName string) *DB {\n db, err := Connect(driverName, dataSourceName)\n if err != nil {\n panic(err)\n }\n return db\n}",
"func (a *Client) TestDatabaseConnection(params *TestDatabaseConnectionParams, authInfo runtime.ClientAuthInfoWriter) (*TestDatabaseConnectionOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewTestDatabaseConnectionParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"testDatabaseConnection\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/v4/databases/test\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &TestDatabaseConnectionReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*TestDatabaseConnectionOK), nil\n\n}",
"func CheckDbAlive() {\n\tc := sdUsersDb\n\tif IsConnectionDead(c) {\n\t\tapperror.Panic500If(apperror.ErrDummy, \"Internal error\")\n\t}\n}",
"func Test_Connection_useBeforeOpen(t *testing.T) {\n\tt.Parallel()\n\tits := assert.New(t)\n\n\tconn, err := New()\n\tits.Nil(err)\n\n\ttx, err := conn.Begin()\n\tits.NotNil(err)\n\tits.True(ex.Is(ErrConnectionClosed, err))\n\tits.Nil(tx)\n\n\tinv := conn.Invoke()\n\tits.Nil(inv.DB)\n\tits.True(inv.DB == nil)\n\n\tany, err := conn.Query(\"select 1\").Any()\n\tits.NotNil(err)\n\tits.True(ex.Is(ErrConnectionClosed, err), err.Error())\n\tits.False(any)\n}",
"func TestOpenDB(t *testing.T) {\n\t_, err := OpenDB() // Open db\n\n\tif err != nil { // Check for errors\n\t\tt.Fatal(err) // Panic\n\t}\n}",
"func testDB(db *sql.DB) error {\n\tif err := db.Ping(); err != nil {\n\t\treturn fmt.Errorf(\"ping: %w\", err)\n\t}\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TestConnectionConfigSetsDatabase tests if we set the .database property on open.
|
func TestConnectionConfigSetsDatabase(t *testing.T) {
assert := assert.New(t)
conn, err := New(OptConfigFromEnv())
assert.Nil(err)
assert.Nil(conn.Open())
defer conn.Close()
assert.NotEmpty(conn.Config.DatabaseOrDefault())
}
|
[
"func Database(cnf config.DBConfig) Configurator {\n\treturn func(instance *Storage) (err error) {\n\t\tdefer errors.Recover(&err)\n\t\tinstance.exec = executor.New(cnf.DriverName())\n\t\tinstance.db, err = sql.Open(cnf.DriverName(), string(cnf.DSN))\n\t\tif err == nil {\n\t\t\tinstance.db.SetMaxOpenConns(cnf.MaxOpen)\n\t\t\tinstance.db.SetMaxIdleConns(cnf.MaxIdle)\n\t\t\tinstance.db.SetConnMaxLifetime(cnf.MaxLifetime)\n\t\t}\n\t\treturn\n\t}\n}",
"func TestDatabaseScope(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\n\tctx := context.Background()\n\tparams, _ := tests.CreateTestServerParams()\n\ts, sqlDB, kvDB := serverutils.StartServer(t, params)\n\tdefer s.Stopper().Stop(ctx)\n\n\texecutor := filetable.MakeInternalFileToTableExecutor(s.InternalExecutor().(*sql.\n\t\tInternalExecutor), kvDB)\n\tfileTableReadWriter, err := filetable.NewFileToTableSystem(ctx, qualifiedTableName,\n\t\texecutor, security.RootUserName())\n\trequire.NoError(t, err)\n\n\t// Verify defaultdb has the file we wrote.\n\tuploadedContent, err := uploadFile(ctx, \"file1\", 1024, 10,\n\t\tfileTableReadWriter, kvDB)\n\trequire.NoError(t, err)\n\toldDBReader, oldDBSize, err := fileTableReadWriter.ReadFile(ctx, \"file1\", 0)\n\trequire.NoError(t, err)\n\trequire.Equal(t, int64(len(uploadedContent)), oldDBSize)\n\toldDBContent, err := ioutil.ReadAll(oldDBReader)\n\trequire.NoError(t, err)\n\trequire.True(t, bytes.Equal(uploadedContent, oldDBContent))\n\n\t// Switch database and attempt to read the file.\n\t_, err = sqlDB.Exec(`CREATE DATABASE newdb`)\n\trequire.NoError(t, err)\n\tnewFileTableReadWriter, err := filetable.NewFileToTableSystem(ctx,\n\t\t\"newdb.file_table_read_writer\", executor, security.RootUserName())\n\trequire.NoError(t, err)\n\t_, _, err = newFileTableReadWriter.ReadFile(ctx, \"file1\", 0)\n\trequire.True(t, oserror.IsNotExist(err))\n}",
"func assertDatabaseEqualsConfig(t *testing.T, expectedCfg *model.Config) {\n\tt.Helper()\n\n\texpectedCfg = prepareExpectedConfig(t, expectedCfg)\n\t_, actualCfg := getActualDatabaseConfig(t)\n\tassert.Equal(t, expectedCfg, actualCfg)\n}",
"func OpenTestMysqlDatabaseConnection() (db *gorm.DB) {\n\terrMsgms := []string{}\n\n\tvar username = os.Getenv(\"dbUsername\")\n\tvar password = os.Getenv(\"dbPassword\")\n\tvar dbHost = os.Getenv(\"dbHost\")\n\tvar dbSchema = os.Getenv(\"dbSchema\")\n\tvar dbPort = os.Getenv(\"dbPort\")\n\n\tif len(username) == 0 {\n\t\terrMsgms = append(errMsgms, \"key : dbUsername not set on env envirotnment\")\n\t}\n\tif len(password) == 0 {\n\t\terrMsgms = append(errMsgms, \"key : dbPassword not set on env envirotnment\")\n\t}\n\tif len(dbHost) == 0 {\n\t\terrMsgms = append(errMsgms, \"key : dbHost not set on env envirotnment\")\n\t}\n\tif len(dbSchema) == 0 {\n\t\terrMsgms = append(errMsgms, \"key : dbSchema not set on env envirotnment\")\n\t}\n\tif len(dbPort) == 0 {\n\t\terrMsgms = append(errMsgms, \"key : dbPort not set on env envirotnment\")\n\t}\n\tif len(errMsgms) > 0 {\n\t\tpanic(\"Database parameter not found : \\n\" + strings.Join(errMsgms, \"\\n\"))\n\t}\n\tconQuery := username + \":\" + password\n\tconQuery = conQuery + \"@tcp(\" + dbHost + \":\" + dbPort + \")/\" + dbSchema + \"?charset=utf8&parseTime=True&loc=Local\"\n\tdb, err := gorm.Open(\"mysql\", conQuery)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn db\n}",
"func TestOpenDB(t *testing.T) {\n\tOpenDB(TestDBName)\n}",
"func ensureConnection(ctx context.Context) error {\n\tc := db.Config{}\n\terr := testutil.ResolveDBConfig(ctx, &c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpool, err := db.New(db.OptConfig(c))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = testutil.ValidatePool(ctx, pool, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn pool.Close()\n}",
"func (c *Config) connectToDatabase() error {\n\n\tdbDsn := \"zhengye:csye3631@tcp(127.0.0.1:3307)/discovery?parseTime=true\"\n\tdb, err := gorm.Open(\"mysql\", dbDsn)\n\n\t//if err != nil || db == nil {\n\t//\n\t//\tfor i := 1; i <= 12; i++ {\n\t//\t\ttime.Sleep(5 * time.Second)\n\t//\n\t//\t\tdb, err = gorm.Open(\"mysql\", dbDsn)\n\t//\n\t//\t\tif db != nil && err == nil {\n\t//\t\t\tbreak\n\t//\t\t}\n\t//\t}\n\t//\n\t//\tif err != nil || db == nil {\n\t//\t\tlog.Fatal(err)\n\t//\t}\n\t//}\n\n\tc.db = db\n\n\treturn err\n}",
"func NewTestDB(c config.Config) (*sql.DB, error) {\n\tvar db *sql.DB\n\n\tif c.DBDriverName == \"\" {\n\t\treturn nil, errors.New(\"Config is missing database connection information: DBDriverName\")\n\t}\n\n\tif c.DBHost == \"\" {\n\t\treturn nil, errors.New(\"Config is missing database connection information: DBHost\")\n\t}\n\n\tif c.DBName == \"\" {\n\t\treturn nil, errors.New(\"Config is missing database connection information: DBName\")\n\t}\n\n\tif c.DBPassword == \"\" {\n\t\treturn nil, errors.New(\"Config is missing database connection information: DBPassword\")\n\t}\n\n\tif c.DBPort == \"\" {\n\t\treturn nil, errors.New(\"Config is missing database connection information: DBPort\")\n\t}\n\n\tif c.DBUser == \"\" {\n\t\treturn nil, errors.New(\"Config is missing database connection information: DBUser\")\n\t}\n\n\tif c.MigrationFiles == \"\" {\n\t\treturn nil, errors.New(\"Config is missing database connection information: MigrationFiles\")\n\t}\n\n\tif c.DBMigrationVersion == 0 {\n\t\treturn nil, errors.New(\"Config is missing database connection information: DBMigrationVersion\")\n\t}\n\n\tdbName := c.DBName\n\tc.DBName = \"\"\n\tdriver, cs := config.GetPostgresConnectionInfo(c)\n\tdb, err := NewDB(driver, cs)\n\n\tif err != nil {\n\t\tfmt.Printf(\"Could not connect to database with %v %v\", driver, cs)\n\t\tpanic(err)\n\t}\n\n\tif err = db.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\n\trand.Seed(time.Now().UTC().UnixNano())\n\tc.DBName = fmt.Sprintf(\"%v_%d\", dbName, rand.Int())\n\tCreateDatabase(db, c.DBName)\n\tdb.Close()\n\n\tdriver, cs = config.GetPostgresConnectionInfo(c)\n\tdb, err = NewDB(driver, cs)\n\tif err != nil {\n\t\tfmt.Printf(\"Could not connect to database with %v %v\", driver, cs)\n\t\treturn nil, err\n\t}\n\n\terr = RunMigrations(db, c.DBName, c.MigrationFiles, c.DBMigrationVersion)\n\tif err != nil {\n\t\tfmt.Printf(\"Error while running migrations\")\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}",
"func setupDB(t *testing.T) *DBInfo {\n\tvar testDB DBInfo\n\ttestDB.TrackCollectionString = \"test_tracks\"\n\ttestDB.WebhookCollectionString = \"test_webhooks\"\n\ttestDB.DBString = \"paragliding\"\n\ttestDB.ConnectionString = \"mongodb://newtpu:[email protected]:39903/paragliding\"\n\n\treturn &testDB\n}",
"func UseDBTestContext() error {\n\tvar err error\n\tDB, err = gorm.Open(\"postgres\", GetTestConnectionContext())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn err\n\t}\n\tdefer DB.Close()\n\treturn nil\n}",
"func DbConfig() {\n\tdb, e = gorm.Open(\"postgres\", \"host=192.168.77.40 port=5439 user=postgres password=testpassword dbname=postgres sslmode=disable\")\n\tif e != nil {\n\t\tfmt.Println(e)\n\t} else {\n\t\tfmt.Println(\"Connection Established\")\n\t}\n\tdb.SingularTable(true)\n}",
"func SetupDatabase(databaseURL string, debug bool) error {\n\tvar err error\n\tlogrus.WithField(\"connectionString\", databaseURL).Info(\"Attempting to connect to database\")\n\tdatabase, err = gorm.Open(\"postgres\", databaseURL)\n\tif err != nil {\n\t\tlogrus.Panic(err)\n\t}\n\tverifyDatabaseConnection(database)\n\tdatabase.DB().SetMaxIdleConns(10)\n\tdatabase.DB().SetMaxOpenConns(100)\n\tdatabase.LogMode(debug)\n\n\treturn err\n}",
"func SetupTestDatabase(t *testing.T, ctx context.Context) testcontainers.Container {\n\tcontainer, database, err := InitTestDatabase(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tDB = database\n\treturn container\n}",
"func ConnectToTestDB() *reform.DB {\n\tdriver := strings.TrimSpace(os.Getenv(\"REFORM_TEST_DRIVER\"))\n\tsource := strings.TrimSpace(os.Getenv(\"REFORM_TEST_SOURCE\"))\n\tif driver == \"\" || source == \"\" {\n\t\tlog.Fatal(\"no driver or source, set REFORM_TEST_DRIVER and REFORM_TEST_SOURCE\")\n\t}\n\n\t// register custom function \"sleep\" for context tests\n\tif driver == \"sqlite3\" {\n\t\tdriver = \"sqlite3_with_sleep\"\n\n\t\tsqlite3RegisterOnce.Do(func() {\n\t\t\tsleep := func(nsec int64) (int64, error) {\n\t\t\t\ttime.Sleep(time.Duration(nsec))\n\t\t\t\treturn nsec, nil\n\t\t\t}\n\t\t\tsql.Register(driver, &sqlite3Driver.SQLiteDriver{\n\t\t\t\tConnectHook: func(conn *sqlite3Driver.SQLiteConn) error {\n\t\t\t\t\treturn conn.RegisterFunc(\"sleep\", sleep, false)\n\t\t\t\t},\n\t\t\t})\n\t\t})\n\t}\n\n\tdb, err := sql.Open(driver, source)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Use single connection so various session-related variables work.\n\t// For example: \"PRAGMA foreign_keys\" for SQLite3, \"SET IDENTITY_INSERT\" for MS SQL, etc.\n\tdb.SetMaxIdleConns(1)\n\tdb.SetMaxOpenConns(1)\n\tdb.SetConnMaxLifetime(0)\n\n\tif err = db.Ping(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tnow := time.Now()\n\n\t// select dialect for driver\n\tdialect := dialects.ForDriver(driver)\n\tswitch dialect {\n\tcase postgresql.Dialect:\n\t\tinspectOnce.Do(func() {\n\t\t\tlog.Printf(\"driver = %q, source = %q\", driver, source)\n\n\t\t\tlog.Printf(\"time.Now() = %s\", now)\n\t\t\tlog.Printf(\"time.Now().UTC() = %s\", now.UTC())\n\n\t\t\tvar version, tz string\n\t\t\tif err = db.QueryRow(\"SHOW server_version\").Scan(&version); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tif err = db.QueryRow(\"SHOW TimeZone\").Scan(&tz); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tlog.Printf(\"PostgreSQL version = %q\", version)\n\t\t\tlog.Printf(\"PostgreSQL TimeZone = %q\", tz)\n\t\t})\n\n\tcase mysql.Dialect:\n\t\tinspectOnce.Do(func() {\n\t\t\tlog.Printf(\"driver = %q, source = %q\", driver, source)\n\n\t\t\tlog.Printf(\"time.Now() = %s\", now)\n\t\t\tlog.Printf(\"time.Now().UTC() = %s\", now.UTC())\n\n\t\t\tq := \"SELECT @@version, @@sql_mode, @@autocommit, @@time_zone\"\n\t\t\tvar version, mode, autocommit, tz string\n\t\t\tif err = db.QueryRow(q).Scan(&version, &mode, &autocommit, &tz); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tlog.Printf(\"MySQL version = %q\", version)\n\t\t\tlog.Printf(\"MySQL sql_mode = %q\", mode)\n\t\t\tlog.Printf(\"MySQL autocommit = %q\", autocommit)\n\t\t\tlog.Printf(\"MySQL time_zone = %q\", tz)\n\t\t})\n\n\tcase sqlite3.Dialect:\n\t\tif _, err = db.Exec(\"PRAGMA foreign_keys = ON\"); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tinspectOnce.Do(func() {\n\t\t\tlog.Printf(\"driver = %q, source = %q\", driver, source)\n\n\t\t\tlog.Printf(\"time.Now() = %s\", now)\n\t\t\tlog.Printf(\"time.Now().UTC() = %s\", now.UTC())\n\n\t\t\tvar version, sourceID string\n\t\t\tif err = db.QueryRow(\"SELECT sqlite_version(), sqlite_source_id()\").Scan(&version, &sourceID); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tlog.Printf(\"SQLite3 version = %q\", version)\n\t\t\tlog.Printf(\"SQLite3 source = %q\", sourceID)\n\t\t})\n\n\tcase mssql.Dialect: //nolint:staticcheck\n\t\tfallthrough\n\tcase sqlserver.Dialect:\n\t\tinspectOnce.Do(func() {\n\t\t\tlog.Printf(\"driver = %q, source = %q\", driver, source)\n\n\t\t\tlog.Printf(\"time.Now() = %s\", now)\n\t\t\tlog.Printf(\"time.Now().UTC() = %s\", now.UTC())\n\n\t\t\tvar version string\n\t\t\tvar options uint16\n\t\t\tif err = db.QueryRow(\"SELECT @@VERSION, @@OPTIONS\").Scan(&version, &options); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\txact := \"ON\"\n\t\t\tif options&0x4000 == 0 {\n\t\t\t\txact = \"OFF\"\n\t\t\t}\n\t\t\tlog.Printf(\"MS SQL VERSION = %s\", version)\n\t\t\tlog.Printf(\"MS SQL OPTIONS = %#4x (XACT_ABORT %s)\", options, xact)\n\t\t})\n\n\tdefault:\n\t\tlog.Fatalf(\"reform: no dialect for driver %s\", driver)\n\t}\n\n\treturn reform.NewDB(db, dialect, nil)\n}",
"func InitDatabase() (db *gorm.DB, err error) {\n\tdbDriver := viper.GetString(\"DB_DIALECT\")\n\tvar connectionString string\n\n\tif dbDriver == \"postgres\" {\n\t\tconnectionString = buildPostgresConnectionString()\n\t}\n\n\tdb, err = openConnection(dbDriver, connectionString)\n\n\treturn\n}",
"func SetupDatabase(t *testing.T) db.Database {\n\tfmt.Println(\"Setting up Database\")\n\tdb, err := db.CreateDatabaseConnection(TestPath)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tdb.InitializeNotesTable()\n\treturn db\n}",
"func TestSqlLite(t *testing.T) {\n\tvar (\n\t\tconexion db.StConect\n\t)\n\tpath := \"config/sqllite.ini\"\n\tt.Logf(\"Capturando path:%s\", path)\n\terr := conexion.ConfigINI(path)\n\tif err != nil {\n\t\tt.Errorf(\"Error:%s\", err.Error())\n\t}\n\tt.Logf(\"Conexion:%s\", conexion.Conexion.ToString())\n\tt.Logf(\"Probando...\")\n\tt.Logf(\"prueba:%v\", conexion.Test())\n}",
"func setupDB() (string, string) {\n\tif config.Settings.Enviroment == \"test\" {\n\t\treturn config.Settings.TestDatabaseURL, config.Settings.DialectDB\n\t} else {\n\t\treturn config.Settings.DatabaseURL, config.Settings.DialectDB\n\t}\n}",
"func initTest(t *testing.T) *database.Connection {\n\tlogging.SetupStdoutLogging()\n\treturn database.NewTestConnection(t, \"service-test-db\")\n}",
"func InitTestDatabase(ctx context.Context) (testcontainers.Container, *gorm.DB, error) {\n\t// Create the Postgres test container\n\treq := testcontainers.ContainerRequest{\n\t\tImage: \"postgis/postgis:latest\",\n\t\tExposedPorts: []string{\"5432/tcp\"},\n\t\tEnv: map[string]string{\n\t\t\t\"POSTGRES_DB\": \"postgres\",\n\t\t\t\"POSTGRES_USER\": \"postgres\",\n\t\t\t\"POSTGRES_PASSWORD\": \"postgres\",\n\t\t},\n\t\tWaitingFor: wait.ForLog(\"database system is ready to accept connections\").WithOccurrence(2),\n\t}\n\tcontainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{\n\t\tContainerRequest: req,\n\t\tStarted: true,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Get the host\n\thost, err := container.Host(ctx)\n\tif err != nil {\n\t\tcontainer.Terminate(ctx)\n\t\treturn nil, nil, err\n\t}\n\n\t// Get the port\n\tport, err := container.MappedPort(ctx, \"5432\")\n\tif err != nil {\n\t\tcontainer.Terminate(ctx)\n\t\treturn nil, nil, err\n\t}\n\n\t// Create connection string to the test container\n\tdsn := fmt.Sprintf(\"host=%s port=%s user=%s password=%s dbname=%s sslmode=disable\",\n\t\thost,\n\t\tport.Port(),\n\t\t\"postgres\",\n\t\t\"postgres\",\n\t\t\"postgres\")\n\n\t// Connect to the database\n\tdatabase, err := InitDatabase(dsn)\n\tif err != nil {\n\t\tcontainer.Terminate(ctx)\n\t\treturn nil, nil, err\n\t}\n\n\treturn container, database, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewlrcParser produces a new parser instance for the optional input antlr.TokenStream. The lrcParser instance produced may be reused by calling the SetInputStream method. The initial parser configuration is expensive to construct, and the object is not threadsafe; however, if used within a Golang sync.Pool, the construction cost amortizes well and the objects can be used in a threadsafe manner.
|
func NewlrcParser(input antlr.TokenStream) *lrcParser {
this := new(lrcParser)
deserializer := antlr.NewATNDeserializer(nil)
deserializedATN := deserializer.DeserializeFromUInt16(parserATN)
decisionToDFA := make([]*antlr.DFA, len(deserializedATN.DecisionToState))
for index, ds := range deserializedATN.DecisionToState {
decisionToDFA[index] = antlr.NewDFA(ds, index)
}
this.BaseParser = antlr.NewBaseParser(input)
this.Interpreter = antlr.NewParserATNSimulator(this, deserializedATN, decisionToDFA, antlr.NewPredictionContextCache())
this.RuleNames = ruleNames
this.LiteralNames = literalNames
this.SymbolicNames = symbolicNames
this.GrammarFileName = "lrcParser.g4"
return this
}
|
[
"func New(r io.Reader) *Parser {\n\treturn &Parser{r: r, Wg: &sync.WaitGroup{}}\n}",
"func New(l *lexer.Lexer) *Parser {\n\tp := &Parser{\n\t\tl: l,\n\t\terrors: []ParsingError{},\n\t}\n\tp.nextToken()\n\tp.nextToken()\n\n\treturn p\n}",
"func New(r io.Reader, opts ...Opt) *Lexer {\n\tl := &Lexer{\n\t\tr: bufio.NewReader(r),\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(l)\n\t}\n\n\treturn l\n}",
"func newParser() *Parser {\n\treturn &Parser{\n\t\tbuiltins: map[string][]byte{},\n\t\tlimiter: make(semaphore, 10),\n\t}\n}",
"func NewTLParser(input antlr.TokenStream) *TLParser {\n\tthis := new(TLParser)\n\tdeserializer := antlr.NewATNDeserializer(nil)\n\tdeserializedATN := deserializer.DeserializeFromUInt16(parserATN)\n\tdecisionToDFA := make([]*antlr.DFA, len(deserializedATN.DecisionToState))\n\tfor index, ds := range deserializedATN.DecisionToState {\n\t\tdecisionToDFA[index] = antlr.NewDFA(ds, index)\n\t}\n\tthis.BaseParser = antlr.NewBaseParser(input)\n\n\tthis.Interpreter = antlr.NewParserATNSimulator(this, deserializedATN, decisionToDFA, antlr.NewPredictionContextCache())\n\tthis.RuleNames = ruleNames\n\tthis.LiteralNames = literalNames\n\tthis.SymbolicNames = symbolicNames\n\tthis.GrammarFileName = \"TL.g4\"\n\n\treturn this\n}",
"func NewCParser(input antlr.TokenStream) *CParser {\n\tCParserInit()\n\tthis := new(CParser)\n\tthis.BaseParser = antlr.NewBaseParser(input)\n\tstaticData := &CParserStaticData\n\tthis.Interpreter = antlr.NewParserATNSimulator(this, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache)\n\tthis.RuleNames = staticData.RuleNames\n\tthis.LiteralNames = staticData.LiteralNames\n\tthis.SymbolicNames = staticData.SymbolicNames\n\tthis.GrammarFileName = \"C.g4\"\n\n\treturn this\n}",
"func (mrb *MrbState) ParserNew() MrbParserState {\n\treturn MrbParserState{C.mrb_parser_new(mrb.p)}\n}",
"func New(parser protocol.OperationParser) *Parser {\n\treturn &Parser{coreParser: parser}\n}",
"func NewParser(r io.Reader) (p *Parser, err error) {\n\tdoc, err := html.Parse(r)\n\tif err != nil {\n\t\treturn p, err\n\t}\n\tp = &Parser{\n\t\tdoc: doc,\n\t\tnode: doc,\n\t}\n\treturn p, nil\n}",
"func New(filepath string) *Parser {\n\tnewParse := &Parser{filepath: filepath, LineNumber: 0, nextAddress: 16}\n\tnewParse.symbols = map[string]int{\n\t\t\"SP\": 0,\n\t\t\"LCL\": 1,\n\t\t\"ARG\": 2,\n\t\t\"THIS\": 3,\n\t\t\"THAT\": 4,\n\t\t\"SCREEN\": 16384,\n\t\t\"KBD\": 24576,\n\t}\n\tfor i := 0; i < 16; i++ {\n\t\tnewParse.addSymbol(\"R\"+strconv.Itoa(i), i)\n\t}\n\treturn newParse\n}",
"func NewParser() Parser {\n\treturn new(parser)\n}",
"func NewParser() *Parser {\n\treturn &Parser{lineFormat: commonLogFormat}\n}",
"func New(input string) Lexer {\n\tl := Lexer{input: input}\n\tl.advance()\n\treturn l\n}",
"func NewParser(r *reader.Reader) *RespParser {\n\tp := &RespParser{\n\t\t// start with root frame to contain eventual result\n\t\tstack: []stackFrame{{}},\n\t}\n\tp.Reset(r)\n\treturn p\n}",
"func NewCParser(input antlr.TokenStream) *CParser {\n\tthis := new(CParser)\n\tdeserializer := antlr.NewATNDeserializer(nil)\n\tdeserializedATN := deserializer.DeserializeFromUInt16(parserATN)\n\tdecisionToDFA := make([]*antlr.DFA, len(deserializedATN.DecisionToState))\n\tfor index, ds := range deserializedATN.DecisionToState {\n\t\tdecisionToDFA[index] = antlr.NewDFA(ds, index)\n\t}\n\tthis.BaseParser = antlr.NewBaseParser(input)\n\n\tthis.Interpreter = antlr.NewParserATNSimulator(this, deserializedATN, decisionToDFA, antlr.NewPredictionContextCache())\n\tthis.RuleNames = ruleNames\n\tthis.LiteralNames = literalNames\n\tthis.SymbolicNames = symbolicNames\n\tthis.GrammarFileName = \"C.g4\"\n\n\treturn this\n}",
"func newParser(name string, tokens []*Token, template *Template) *Parser {\n\tp := &Parser{\n\t\tname: name,\n\t\ttokens: tokens,\n\t\ttemplate: template,\n\t}\n\tif len(tokens) > 0 {\n\t\tp.lastToken = tokens[len(tokens)-1]\n\t}\n\treturn p\n}",
"func NewParser() parser.BlockParser {\n\treturn defaultParser\n}",
"func New(src string) *Lexer {\n\treturn &Lexer{\n\t\tcur: utfstrings.Cursor{\n\t\t\tString: src,\n\t\t},\n\t}\n}",
"func NewFromString(startState StateFn, input string, channelCap int) Lexer {\n\treturn newLex(startState, strings.NewReader(input), len(input), false, channelCap)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewNodeFormatter Create a new node formatter
|
func NewNodeFormatter(indent string) *NodeFormatter {
return &NodeFormatter{indent: indent}
}
|
[
"func NewFormat(source string, quiet bool) formatter.Format {\n\tswitch source {\n\tcase formatter.PrettyFormatKey:\n\t\treturn nodeInspectPrettyTemplate\n\tcase formatter.TableFormatKey:\n\t\tif quiet {\n\t\t\treturn formatter.DefaultQuietFormat\n\t\t}\n\t\treturn defaultNodeTableFormat\n\tcase formatter.RawFormatKey:\n\t\tif quiet {\n\t\t\treturn `node_id: {{.ID}}`\n\t\t}\n\t\treturn `node_id: {{.ID}}\\nhostname: {{.Hostname}}\\nstatus: {{.Status}}\\navailability: {{.Availability}}\\nmanager_status: {{.ManagerStatus}}\\n`\n\t}\n\treturn formatter.Format(source)\n}",
"func (ns Nodes) Format(s fmt.State, c rune) {\n\tdelimiter := \", \"\n\tif s.Flag(' ') {\n\t\tdelimiter = \" \"\n\t}\n\tif s.Flag('+') {\n\t\tdelimiter = \", \\n\"\n\t}\n\tswitch c {\n\tcase 'd':\n\t\ts.Write([]byte(\"[\"))\n\t\tfor i, n := range ns {\n\t\t\tfmt.Fprintf(s, \"%x\", n.ID())\n\t\t\tif i < len(ns)-1 {\n\t\t\t\tfmt.Fprintf(s, \"%s\", delimiter)\n\t\t\t}\n\t\t}\n\t\ts.Write([]byte(\"]\"))\n\tcase 'v', 's':\n\t\ts.Write([]byte(\"[\"))\n\t\tfor i, n := range ns {\n\t\t\tif s.Flag('#') {\n\t\t\t\tfmt.Fprintf(s, \"%s :: %v\", n.Name(), n.t)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(s, \"%s\", n.Name())\n\t\t\t}\n\t\t\tif i < len(ns)-1 {\n\t\t\t\tfmt.Fprintf(s, \"%s\", delimiter)\n\t\t\t}\n\t\t}\n\t\ts.Write([]byte(\"]\"))\n\tcase 'Y':\n\t\ts.Write([]byte(\"[\"))\n\t\tfor i, n := range ns {\n\t\t\tfmt.Fprintf(s, \"%v\", n.t)\n\t\t\tif i < len(ns)-1 {\n\t\t\t\tfmt.Fprintf(s, \"%s\", delimiter)\n\t\t\t}\n\t\t}\n\t\ts.Write([]byte(\"]\"))\n\n\tcase 'P':\n\t\ts.Write([]byte(\"[\"))\n\t\tfor i, n := range ns {\n\t\t\tfmt.Fprintf(s, \"%p\", n)\n\t\t\tif i < len(ns)-1 {\n\t\t\t\tfmt.Fprintf(s, \"%s\", delimiter)\n\t\t\t}\n\t\t}\n\t\ts.Write([]byte(\"]\"))\n\t}\n}",
"func newNode(format string, paramsNum int, router *Router) *Node {\n\tn := new(Node)\n\tn.format = format\n\tn.paramNum = paramsNum\n\tn.root = router\n\treturn n\n}",
"func NewFormatter(s string) (*Formatter, error) {\n\tana, err := newAnalyzer(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = os.Chdir(ana.repoRoot)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb, err := exec.Command(\"/bin/sh\", \"-c\", `go list -f \"{{.ImportPath}} {{.Imports}}\" ./...`).Output()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpkgMap := parseImportedPackages(strings.TrimSuffix(string(b), \"\\n\"))\n\n\terr = ana.analyzeIntoTree(strings.TrimPrefix(ana.target, ana.srcPath), pkgMap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ana.ToFormatter(), nil\n}",
"func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {\n\tfs := &formatState{value: v, cs: cs}\n\tfs.pointers = make(map[uintptr]int)\n\treturn fs\n}",
"func FormatNode(node ast.Node) (string, error) {\n\tbuf := new(bytes.Buffer)\n\terr := format.Node(buf, token.NewFileSet(), node)\n\treturn buf.String(), err\n}",
"func newNode(t ntype, pre string, p *node, c children, h *Resource, pnames []string) *node {\n\tn := &node{\n\t\ttyp: t,\n\t\tlabel: pre[0],\n\t\tprefix: pre,\n\t\tparent: p,\n\t\tchildren: c,\n\t\t// create a Resource method to handler map for this node\n\t\tresource: h,\n\t\tpnames: pnames,\n\t}\n\tfor _, v := range pnames {\n\t\tn.fmtpnames = append(n.fmtpnames, \"%3A\"+v+\"=\")\n\t}\n\treturn n\n}",
"func newOutputNode(parent *outputNode, path string) *outputNode {\n\treturn &outputNode{\n\t\tpathTracker: &pathTracker{parent: parent.pathTracker, path: path},\n\t\terrRecorder: parent.errRecorder,\n\t}\n}",
"func NewNode() *node { return new(node) }",
"func NewNdjsonFormatter() (f *NdjsonFormatter) {\n\treturn &NdjsonFormatter{}\n}",
"func NewDecoratedNode(typ DecorationType, decoration string, node sql.Node) *DecoratedNode {\n\treturn &DecoratedNode{\n\t\tUnaryNode: UnaryNode{node},\n\t\tdecoration: decoration,\n\t\tDecorationType: typ,\n\t}\n}",
"func New(scanner *bufio.Scanner, indentationSymbol string) (*Node, error) {\n\treturn newTree(\n\t\ttoLines(scanner, indentationSymbol),\n\t)\n}",
"func CreateFormatter(logFormat string) logrus.Formatter {\n\tvar formatType logrus.Formatter\n\tswitch strings.ToLower(logFormat) {\n\tcase JsonFormat:\n\t\tformatType = &logrus.JSONFormatter{}\n\tcase TextFormat:\n\t\tif os.Getenv(\"FORCE_LOG_COLORS\") == \"1\" {\n\t\t\tformatType = &logrus.TextFormatter{ForceColors: true}\n\t\t} else {\n\t\t\tformatType = &logrus.TextFormatter{}\n\t\t}\n\tdefault:\n\t\tformatType = &logrus.TextFormatter{}\n\t}\n\n\treturn formatType\n}",
"func AppendNewNode(messageNewNode string) NodeStruct {\n\n\ts := \"START AppendNewNode() - Appends a new Node to the nodeList\"\n\tlog.Debug(\"ROUTINGNODE: I/F \" + s)\n\n\tnewNode := appendNewNode(messageNewNode)\n\n\ts = \"END AppendNewNode() - Appends a new Node to the nodeList\"\n\tlog.Debug(\"ROUTINGNODE: I/F \" + s)\n\n\treturn newNode\n\n}",
"func newNode(lex *lexer.Lexeme) *Node {\n\treturn &Node{\n\t\tlexeme: lex,\n\t}\n}",
"func newHandlerNode(method string, handler http.Handler) *handlerNode {\n\treturn &handlerNode{method, handler}\n}",
"func appendNewNode(messageNewNode string) NodeStruct {\n\n\ts := \"START appendNewNode() - Appends a new Node to the nodeList\"\n\tlog.Debug(\"ROUTINGNODE: GUTS \" + s)\n\n\tnewNode := NodeStruct{}\n\tjson.Unmarshal([]byte(messageNewNode), &newNode)\n\n\tnewNode.Index = len(nodeList)\n\tnodeList = append(nodeList, newNode)\n\n\ts = \"END appendNewNode() - Appends a new Node to the nodeList\"\n\tlog.Debug(\"ROUTINGNODE: GUTS \" + s)\n\n\treturn newNode\n\n}",
"func NewNewLineFormatter(options ...func(*NewLineFormatter) error) (*NewLineFormatter, error) {\n\tformatter := &NewLineFormatter{naive: false}\n\tfor _, optionFunc := range options {\n\t\terr := optionFunc(formatter)\n\t\tif err != nil {\n\t\t\treturn nil, xerrors.Errorf(\"Could not construct NewLineFormatter: %w\", err)\n\t\t}\n\t}\n\n\treturn formatter, nil\n}",
"func NewJSONFormatter(oidResolver OIDResolver, aggregator sender.Sender) (JSONFormatter, error) {\n\tif oidResolver == nil {\n\t\treturn JSONFormatter{}, fmt.Errorf(\"NewJSONFormatter called with a nil OIDResolver\")\n\t}\n\treturn JSONFormatter{oidResolver, aggregator}, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ToLines Convert a node to a LISPstyle slice of strings
|
func (f *NodeFormatter) ToLines(n Node) []string {
header := fmt.Sprintf("(%s [%d:%d]", n.Type.String(), n.Loc.Start, n.Loc.End)
footer := ")"
if len(n.Children) == 0 {
return []string{fmt.Sprintf("%s%s", header, footer)}
}
lines := []string{}
lines = append(lines, header)
for _, child := range n.Children {
for _, line := range f.ToLines(child) {
lines = append(lines, fmt.Sprintf("%s%s", f.indent, line))
}
}
lines = append(lines, footer)
return lines
}
|
[
"func LineToSlice(s string) []string {\n\tif s == \"\" {\n\t\treturn nil\n\t}\n\treturn strings.Split(strings.TrimRight(s, \"\\n\"), \"\\n\")\n}",
"func wayToLineString(way *Way, nodes map[osm.NodeID]*Node) geom.LineString {\n\tvar p geom.LineString\n\tfor _, n := range way.Nodes {\n\t\tpoint, ok := nodeToPoint(nodes[n])\n\t\tif ok {\n\t\t\tp = append(p, point)\n\t\t}\n\t}\n\treturn p\n}",
"func Lines(nodes []*Node) ([]string, error) {\n\tif len(nodes) == 0 {\n\t\treturn []string{}, nil\n\t}\n\n\tg := &graph{\n\t\tslots: [][]byte{nodes[0].ID},\n\t\tnodes: nodes,\n\t}\n\n\treturn g.table().lines()\n}",
"func ToStrings(nodes []Node) string {\n\tnodeStr := \"\"\n\tfirst := true\n\tfor _, node := range nodes {\n\t\tif &node != nil {\n\t\t\tif !first {\n\t\t\t\tnodeStr += \" \"\n\t\t\t}\n\t\t\tnodeStr += node.String()\n\t\t\tfirst = false\n\t\t}\n\t}\n\treturn nodeStr\n}",
"func (c TemplateCommand) StringLines() []string {\n\treturn nil\n\t// return c.Command.StringLines()\n}",
"func ToLine(v interface{}) (string, error) {\n\tsu, err := ParseLine(v)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn su.ToLine()\n}",
"func (n *Node) toSlice() []int {\n\treturn []int{n.start, n.end}\n}",
"func (tu TemplateUtils) Lines(bytes []byte) []template.HTML {\n\treturn Lines(bytes)\n}",
"func relationToMultiLineString(relation *Relation, ways map[osm.WayID]*Way,\n\tnodes map[osm.NodeID]*Node) geom.MultiLineString {\n\tvar p geom.MultiLineString\n\tfor _, m := range relation.Members {\n\t\tswitch m.Type {\n\t\tcase osm.TypeWay:\n\t\t\tif w := ways[osm.WayID(m.Ref)]; w != nil {\n\t\t\t\tp = append(p, wayToLineString(w, nodes))\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(fmt.Errorf(\"unsupported relation type %T\", m.Type))\n\t\t}\n\t}\n\treturn p\n}",
"func renderNode(node forest.Node, store forest.Store, config renderConfig) ([]RenderedLine, error) {\n\tvar (\n\t\tancestorColor = tcell.StyleDefault.Foreground(tcell.ColorYellow)\n\t\tdescendantColor = tcell.StyleDefault.Foreground(tcell.ColorGreen)\n\t\tcurrentColor = tcell.StyleDefault.Foreground(tcell.ColorRed)\n\t\tconversationRootColor = tcell.StyleDefault.Foreground(tcell.ColorTeal)\n\t)\n\tidstring, _ := node.ID().MarshalString()\n\tlog.Printf(\"%s => %d\", idstring, config.state)\n\tvar out []RenderedLine\n\tvar style tcell.Style\n\tswitch n := node.(type) {\n\tcase *forest.Reply:\n\t\tauthor, present, err := store.Get(&n.Author)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if !present {\n\t\t\treturn nil, fmt.Errorf(\"Node %v is not in the store\", n.Author)\n\t\t}\n\t\tasIdent := author.(*forest.Identity)\n\t\tswitch config.state {\n\t\tcase ancestor:\n\t\t\tstyle = ancestorColor\n\t\tcase descendant:\n\t\t\tstyle = descendantColor\n\t\tcase current:\n\t\t\tstyle = currentColor\n\t\tdefault:\n\t\t\tstyle = tcell.StyleDefault\n\t\t}\n\t\ttimestamp := n.Created.Time().UTC()\n\t\trendered := fmt.Sprintf(\"%s - %s:\\n%s\", timestamp.Format(time.Stamp), string(asIdent.Name.Blob), string(n.Content.Blob))\n\t\t// drop all trailing newline characters\n\t\tfor rendered[len(rendered)-1] == \"\\n\"[0] {\n\t\t\trendered = rendered[:len(rendered)-1]\n\t\t}\n\t\tfor _, line := range strings.Split(rendered, \"\\n\") {\n\t\t\tout = append(out, RenderedLine{\n\t\t\t\tID: n.ID(),\n\t\t\t\tStyle: style,\n\t\t\t\tText: line,\n\t\t\t})\n\t\t}\n\t\tif n.Depth == 1 {\n\t\t\tout[0].Style = conversationRootColor\n\t\t} else {\n\t\t\tout[0].Style = tcell.StyleDefault\n\t\t}\n\t}\n\treturn out, nil\n}",
"func (nodes CharNodes) ToString() string {\n\tvar str []string\n\n\tfor _, node := range nodes {\n\t\t// Add before items\n\t\tif len(node.Before) > 0 {\n\t\t\tfor _, before := range node.Before {\n\t\t\t\tstr = append(str, before)\n\t\t\t}\n\t\t}\n\n\t\t// Add actual char\n\t\tstr = append(str, node.Char)\n\n\t\t// Add after items\n\t\tif len(node.After) > 0 {\n\t\t\tfor _, after := range node.After {\n\t\t\t\tstr = append(str, after)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn strings.Join(str, \"\")\n}",
"func (p *CosmosNodePresenter) ToRow() []string {\n\treturn []string{p.Name, p.ChainID, p.State, p.Config}\n}",
"func (nls newlines) getLines(data []byte, low, high int) []byte {\n\tif low >= high {\n\t\treturn nil\n\t}\n\n\tlowStart, _ := nls.lineBounds(low)\n\t_, highEnd := nls.lineBounds(high - 1)\n\n\treturn data[lowStart:highEnd]\n}",
"func nodeStr(t Tree, inbranch, node interface{}) (string, int) {\n\n\tvar line string\n\n\tif inbranch != nil {\n\t\tline += fmt.Sprintf(\"-%03v->\", inbranch)\n\t}\n\n\tnodeid := t.NodeID(node)\n\tif nodeid != \"\" {\n\t\tline += \"#\" + nodeid\n\t}\n\n\tindent := len(line)\n\n\tline += t.NodeInfo(node)\n\n\tbrCnt := len(t.Branches(node))\n\tif brCnt > 1 {\n\t\tline += fmt.Sprintf(\"*%d\", brCnt)\n\t}\n\n\tv, isLeaf := t.LeafVal(node)\n\tif isLeaf {\n\t\tline += fmt.Sprintf(\"=%v\", v)\n\t}\n\treturn line, indent\n}",
"func pathToStringSlice(path []Vertex) []string {\n\tactual := []string{}\n\tfor _, s := range path {\n\t\tactual = append(actual, s.(Station).id.String())\n\t}\n\treturn actual\n}",
"func linesToTreeNodes(location *profile.Location, mappingId uint64, mapping *pb.Mapping, lines []profile.Line, value, diff int64) (outerMost *pb.FlamegraphNode, innerMost *pb.FlamegraphNode) {\n\tfor i, line := range lines {\n\t\tvar children []*pb.FlamegraphNode = nil\n\t\tif i > 0 {\n\t\t\tchildren = []*pb.FlamegraphNode{outerMost}\n\t\t}\n\t\touterMost = &pb.FlamegraphNode{\n\t\t\tMeta: &pb.FlamegraphNodeMeta{\n\t\t\t\tLocation: &pb.Location{\n\t\t\t\t\tId: location.ID,\n\t\t\t\t\tMappingId: mappingId,\n\t\t\t\t\tAddress: location.Address,\n\t\t\t\t\tIsFolded: location.IsFolded,\n\t\t\t\t},\n\t\t\t\tFunction: &pb.Function{\n\t\t\t\t\tId: line.Function.ID,\n\t\t\t\t\tName: line.Function.Name,\n\t\t\t\t\tSystemName: line.Function.SystemName,\n\t\t\t\t\tFilename: line.Function.Filename,\n\t\t\t\t\tStartLine: line.Function.StartLine,\n\t\t\t\t},\n\t\t\t\tLine: &pb.Line{\n\t\t\t\t\tLocationId: location.ID,\n\t\t\t\t\tFunctionId: line.Function.ID,\n\t\t\t\t\tLine: line.Line,\n\t\t\t\t},\n\t\t\t\tMapping: mapping,\n\t\t\t},\n\t\t\tChildren: children,\n\t\t\tCumulative: value,\n\t\t\tDiff: diff,\n\t\t}\n\t\tif i == 0 {\n\t\t\tinnerMost = outerMost\n\t\t}\n\t}\n\n\treturn outerMost, innerMost\n}",
"func getLines(s string) []string {\n\tvar lines []string\n\n\tfor _, line := range strings.Split(s, nl) {\n\t\tlines = append(lines, line)\n\t}\n\treturn lines\n}",
"func copyLines(lines []shaping.Line) []shaping.Line {\n\tout := make([]shaping.Line, len(lines))\n\tfor lineIdx, line := range lines {\n\t\tlineCopy := make([]shaping.Output, len(line))\n\t\tfor runIdx, run := range line {\n\t\t\tlineCopy[runIdx] = run\n\t\t\tlineCopy[runIdx].Glyphs = slices.Clone(run.Glyphs)\n\t\t}\n\t\tout[lineIdx] = lineCopy\n\t}\n\treturn out\n}",
"func (l *Lorem) Lines(lineCount int) string {\n\treturn l.Lorem().Sentences(lineCount, \"\\n\")\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetSecrets retrieves a secret value and memoizes the result
|
func GetSecrets(clientSet kubernetes.Interface, namespace, name, key string) ([]byte, error) {
secretsIf := clientSet.CoreV1().Secrets(namespace)
var secret *apiv1.Secret
var err error
_ = wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) {
secret, err = secretsIf.Get(name, metav1.GetOptions{})
if err != nil {
log.Warnf("Failed to get secret '%s': %v", name, err)
if !retry.IsRetryableKubeAPIError(err) {
return false, err
}
return false, nil
}
return true, nil
})
if err != nil {
return []byte{}, errors.InternalWrapError(err)
}
val, ok := secret.Data[key]
if !ok {
return []byte{}, errors.Errorf(errors.CodeBadRequest, "secret '%s' does not have the key '%s'", name, key)
}
return val, nil
}
|
[
"func (conf ProjectsJSON) GetSecrets(projectName ProjectName) (Secrets, error) {\n\tp, exists := conf[projectName]\n\tif !exists {\n\t\treturn nil, fmt.Errorf(\"no project %q\", projectName)\n\t}\n\tresult := make(Secrets, len(p.Secrets), len(p.Secrets))\n\tfor i, s := range p.Secrets {\n\t\tresult[i] = Secret(s)\n\t}\n\treturn result, nil // TODO: change type of field\n\t//\tresults := make([]string, len(p.Secrets), len(p.Secrets))\n\t//\tfor i, secret := range p.Secrets {\n\t//\t\tresults[i] = fmt.Sprintf(\"https://%s/%s/files/%s/%s/certs/%s\", secretServiceDomain, sshash, pv.Name, pv.Version, secret.Name)\n\t//\t}\n\t//\treturn results\n}",
"func (i *IBMSecretsManager) GetSecrets(path string, _ map[string]string) (map[string]interface{}, error) {\n\tsecret, err := i.VaultClient.Logical().Read(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif secret == nil {\n\t\treturn nil, fmt.Errorf(\"Could not find secrets at path %s\", path)\n\t}\n\n\tvar data map[string]interface{}\n\tdata = secret.Data\n\n\t// Make sure the secret exists\n\tif _, ok := data[\"secrets\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"Could not find secrets at path %s\", path)\n\t}\n\n\t// Get list of secrets\n\tsecretList := data[\"secrets\"].([]interface{})\n\tv := make([]string, 0, len(secretList))\n\t// Loop through secrets and get id\n\t// as getting the list of secrets does not include the payload\n\tfor _, value := range secretList {\n\t\tsecret := value.(map[string]interface{})\n\t\tif t, found := secret[\"id\"]; found {\n\t\t\tv = append(v, t.(string))\n\t\t}\n\t}\n\n\t// Read each secret and get payload\n\tsecrets := make(map[string]interface{})\n\tfor _, j := range v {\n\t\tsecret, err := i.VaultClient.Logical().Read(fmt.Sprintf(\"%s/%s\", path, j))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif secret == nil || len(secret.Data) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar data map[string]interface{}\n\t\tdata = secret.Data\n\n\t\t// Get name and data of secret and append to secrets map\n\t\tsecretName := data[\"name\"].(string)\n\t\tsecretData := data[\"secret_data\"].(map[string]interface{})\n\t\tsecrets[secretName] = secretData[\"payload\"]\n\t}\n\n\treturn secrets, nil\n}",
"func getSecret(vaultClient keyvault.BaseClient, vaultname string, secname string) (result keyvault.SecretBundle, err error) {\n\tlog.Debugf(\"Making a call to: https://%s.vault.azure.net to retrieve value for KEY: %s\\n\", vaultname, secname)\n\treturn vaultClient.GetSecret(context.Background(), \"https://\"+vaultname+\".vault.azure.net\", secname, \"\")\n}",
"func GetSecret(s string) string {\n\tlog.Printf(\"Getting secret %v....\\n\", s)\n\tv, err := kv.GetSecret(context.Background(), GetKvURL(kvName), s, \"\")\n\tif err != nil {\n\t\tlog.Printf(\"Failed to get value for %v.\\n\", s)\n\t\tpanic(err.Error())\n\t}\n\treturn *v.Value\n}",
"func (c *SynchronizationJobClient) GetSecrets(ctx context.Context, servicePrincipalId string) (*SynchronizationSecret, int, error) {\n\tresp, status, _, err := c.BaseClient.Get(ctx, GetHttpRequestInput{\n\t\tValidStatusCodes: []int{http.StatusOK},\n\t\tConsistencyFailureFunc: ServicePrincipalDoesNotExistConsistency,\n\t\tUri: Uri{\n\t\t\tEntity: fmt.Sprintf(\"/servicePrincipals/%s/synchronization/secrets\", servicePrincipalId),\n\t\t\tHasTenantId: true,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn nil, status, fmt.Errorf(\"SynchronizationJobClient.BaseClient.Get(): %v\", err)\n\t}\n\n\tdefer resp.Body.Close()\n\trespBody, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, status, fmt.Errorf(\"io.ReadAll(): %v\", err)\n\t}\n\n\tvar synchronizationSecret SynchronizationSecret\n\tif err := json.Unmarshal(respBody, &synchronizationSecret); err != nil {\n\t\treturn nil, status, fmt.Errorf(\"json.Unmarshal(): %v\", err)\n\t}\n\n\treturn &synchronizationSecret, status, nil\n}",
"func GetSecrets(retrieve func(string) string, prefix, delimeter string, args ...string) []string {\n\tenvs := []string{}\n\n\tfor _, key := range args {\n\t\tenvs = append(envs, retrieve(prefix+delimeter+key))\n\t}\n\n\treturn envs\n}",
"func (_class SecretClass) GetValue(sessionID SessionRef, self SecretRef) (_retval string, _err error) {\n\tif IsMock {\n\t\treturn _class.GetValueMock(sessionID, self)\n\t}\t\n\t_method := \"secret.get_value\"\n\t_sessionIDArg, _err := convertSessionRefToXen(fmt.Sprintf(\"%s(%s)\", _method, \"session_id\"), sessionID)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_selfArg, _err := convertSecretRefToXen(fmt.Sprintf(\"%s(%s)\", _method, \"self\"), self)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_result, _err := _class.client.APICall(_method, _sessionIDArg, _selfArg)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_retval, _err = convertStringToGo(_method + \" -> \", _result.Value)\n\treturn\n}",
"func (m *MLPService) GetSecret(projectID models.ID, name string) (string, error) {\n\tret := m.Called(projectID, name)\n\n\tif ret[1] != nil {\n\t\treturn \"\", ret[1].(error)\n\t}\n\n\treturn (ret[0]).(string), nil\n}",
"func getSecretStore(hash string) (interface{}, error) {\n\tredisConn, err := Redis()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer redisConn.Close()\n\n\tcounterViews := strings.Join([]string{hash, \"counter\"}, \"-\")\n\tredisConn.Send(\"GET\", hash)\n\tredisConn.Flush()\n\n\t// Get GET command result\n\tresultSecret, err := redisConn.Receive()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Secret not found\n\tif resultSecret == nil {\n\t\treturn nil, nil\n\t}\n\n\tredisConn.Send(\"DECR\", counterViews)\n\tredisConn.Flush()\n\t// Get DECR command result.\n\tresultDecr, err := redisConn.Receive()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If counter equals to zero, then we reached view limit.\n\t// In this case delete keys from Redis and return secret.\n\tif resultDecr.(int64) == 0 {\n\t\tredisConn.Send(\"DEL\", counterViews, hash)\n\t\tredisConn.Flush()\n\n\t\tresultDel, err := redisConn.Receive()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// We deleted 2 keys. Check the result.\n\t\tif resultDel.(int64) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"Internal error! Code: 1\")\n\t\t}\n\t}\n\n\t// All OK, return secret\n\treturn resultSecret, nil\n}",
"func GetSecret() string {\n\treturn secret\n}",
"func (kv *SecretsKVStorePlugin) Get(ctx context.Context, orgId int64, namespace string, typ string) (string, bool, error) {\n\treq := &smp.GetSecretRequest{\n\t\tKeyDescriptor: &smp.Key{\n\t\t\tOrgId: orgId,\n\t\t\tNamespace: namespace,\n\t\t\tType: typ,\n\t\t},\n\t}\n\n\tres, err := kv.secretsPlugin.GetSecret(ctx, req)\n\tif res.UserFriendlyError != \"\" {\n\t\terr = wrapUserFriendlySecretError(res.UserFriendlyError)\n\t}\n\n\tif res.Exists {\n\t\tupdateFatalFlag(ctx, kv)\n\t}\n\n\tif kv.fallbackEnabled {\n\t\tif err != nil || res.UserFriendlyError != \"\" || !res.Exists {\n\t\t\tres.DecryptedValue, res.Exists, err = kv.fallbackStore.Get(ctx, orgId, namespace, typ)\n\t\t}\n\t}\n\n\treturn res.DecryptedValue, res.Exists, err\n}",
"func vaultGetSecret(config applicationConfig) (string, error) {\n\tvaultToken := config.VaultToken\n\tvar secrets string\n\tvar err error\n\n\t// issue new vault token if it was not set from config\n\tif config.VaultToken == \"\" {\n\t\tvaultToken, err = getK8SVaultToken(config.VaultSecretURL)\n\t\tif err != nil {\n\t\t\treturn secrets, err\n\t\t}\n\t}\n\n\tsecrets, err = getVaultSecret(config.VaultSecretURL, vaultToken)\n\tif err != nil {\n\t\treturn secrets, fmt.Errorf(\"failed to retrive vault secrets - %s\", err)\n\t}\n\n\treturn secrets, nil\n}",
"func (s *Syncer) cacheSecrets(lifetime util.SecretLifetime) error {\n\tif s.briefcase.HasCachedSecrets(lifetime) {\n\t\treturn nil\n\t}\n\n\tvar simpleSecrets []briefcase.SimpleSecret\n\n\tfor _, secret := range s.config.VaultConfig.Secrets {\n\t\tif secret.Lifetime == lifetime {\n\n\t\t\t// The same key could be in different paths, but we don't allow this because it's confusing.\n\t\t\tfor _, s := range simpleSecrets {\n\t\t\t\tif s.Key == secret.Key {\n\t\t\t\t\treturn fmt.Errorf(\"duplicate secret key %q\", secret.Key)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif secretData, err := s.readSecret(secret); err != nil {\n\t\t\t\treturn err\n\t\t\t} else {\n\t\t\t\tsimpleSecrets = append(simpleSecrets, secretData...)\n\t\t\t}\n\t\t}\n\t}\n\n\ts.briefcase.StoreSecrets(lifetime, simpleSecrets)\n\n\treturn nil\n}",
"func (s *Service) Secrets() map[string]string {\n\treturn s.secrets\n}",
"func GetSecretList() []string {\n\tvar l []string\n\tvar fCount int = 0\n\tsIterator, err := kv.GetSecretsComplete(ctx.Background(), GetKvURL(kvName), nil)\n\tif err != nil {\n\t\tlog.Printf(\"Unable to retrieve secrets: %v\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tfor sIterator.NotDone() {\n\t\tif filterSecret(sIterator.Value(), kvTagsInc, kvTagsEx) {\n\t\t\tl = append(l, path.Base(*sIterator.Value().ID))\n\t\t\tfCount++\n\t\t}\n\t\terr := sIterator.Next()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to iterator keyvault secrets: %v\", err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\tlog.Printf(\"%v filtered results will be added to the secret\\n\", fCount)\n\treturn l\n}",
"func GetSecret(ctx *pulumi.Context, key string) pulumi.StringOutput {\n\tv, _ := get(ctx, key, \"\", \"\")\n\treturn pulumi.ToSecret(pulumi.String(v)).(pulumi.StringOutput)\n}",
"func GetSecrets(namespace, labels string) (*v1.SecretList, error) {\n\tkube := lazyInit()\n\tlistOptions := metav1.ListOptions{}\n\tif len(labels) > 0 {\n\t\tlistOptions.LabelSelector = labels\n\t}\n\tsecrets, err := kube.CoreV1().Secrets(namespace).List(context.TODO(), listOptions)\n\tif err != nil {\n\t\treturn &v1.SecretList{}, fmt.Errorf(\"Failed to get secrets: %v\", err)\n\t}\n\tfmt.Printf(\"Number of kubernetes secrets found: %d \\n\", len(secrets.Items))\n\treturn secrets, nil\n}",
"func getSecretFor(keyID string) (secret []byte, ok bool) {\n\tsecret, ok = keyToSecretMap[keyID]\n\treturn\n}",
"func (s StaticStore) GetSecret(k string) (Secret, error) {\n\tif secret, ok := s[k]; ok {\n\t\treturn secret, nil\n\t}\n\treturn Secret{}, ErrNoSuchSecret\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
OperationIntercepter: intercepts the entire query operation before it hits gqlgen
|
func (t tracer) InterceptOperation(ctx context.Context, next graphql.OperationHandler) graphql.ResponseHandler {
oc := graphql.GetOperationContext(ctx)
span, ctx := opentracing.StartSpanFromContext(ctx, oc.OperationName)
ext.SpanKind.Set(span, "server")
ext.Component.Set(span, "gqlgen")
span.SetTag("query", oc.RawQuery)
defer span.Finish()
return next(ctx)
}
|
[
"func (c *nativeCodec) interceptUnary(_ context.Context, method string, req, res interface{}, _ *grpc.ClientConn, _ grpc.UnaryInvoker, _ ...grpc.CallOption) error {\n\tc.doc = req.(*pb.CommitRequest).Writes[0].GetUpdate()\n\tres.(*pb.CommitResponse).WriteResults = []*pb.WriteResult{{}}\n\treturn nil\n}",
"func (s *BaseCypherListener) EnterOC_RegularQuery(ctx *OC_RegularQueryContext) {}",
"func (s *BaseCypherListener) EnterOC_Query(ctx *OC_QueryContext) {}",
"func GRPCClientUnaryInterceptor(\n\tctx context.Context,\n\tmethod string,\n\treq interface{},\n\treply interface{},\n\tcc *grpc.ClientConn,\n\tinvoker grpc.UnaryInvoker,\n\topts ...grpc.CallOption,\n) error {\n\n\tseg, err := GetSegmentFromContext(ctx)\n\tif err != nil {\n\t\treturn invoker(ctx, method, req, reply, cc, opts...)\n\t}\n\n\tsubseg := seg.AddNewSubsegment(method)\n\tsubseg.AddRemote()\n\n\tsampled := \"0\"\n\tif seg.Traced {\n\t\tsampled = \"1\"\n\t}\n\n\tmdctx := metadata.NewContext(ctx, metadata.New(map[string]string{\n\t\tmdRootKey: seg.TraceID,\n\t\tmdParentKey: subseg.ID,\n\t\tmdSampledKey: sampled,\n\t}))\n\n\terr = invoker(mdctx, method, req, reply, cc, opts...)\n\n\tsubseg.Close(err, utils.ErrorType)\n\n\treturn err\n}",
"func (me *Database) Trace(op, sql string, args ...interface{}) {\n\tif me.logger != nil {\n\t\tif sql != \"\" {\n\t\t\tif len(args) != 0 {\n\t\t\t\tme.logger.Printf(\"[goqu] %s [query:=`%s` args:=%+v]\", op, sql, args)\n\t\t\t} else {\n\t\t\t\tme.logger.Printf(\"[goqu] %s [query:=`%s`]\", op, sql)\n\t\t\t}\n\t\t} else {\n\t\t\tme.logger.Printf(\"[goqu] %s\", op)\n\t\t}\n\t}\n}",
"func (s *BaseCypherListener) EnterOC_InQueryCall(ctx *OC_InQueryCallContext) {}",
"func LoggingInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {\n\tstart := time.Now()\n\n\th, err := handler(ctx, req)\n\n\tlog.Printf(\"request processed - method=%s duration=%s error=%v\\n\", info.FullMethod, time.Since(start), err)\n\treturn h, err\n}",
"func (s *BaseCypherListener) EnterOC_SingleQuery(ctx *OC_SingleQueryContext) {}",
"func OperationHandler(c echo.Context) error {\n\tdbname := c.Query(\"dbname\")\n\tstartQuery := c.Query(\"start\")\n\tlimitQuery := c.Query(\"limit\")\n\n\tlimit, err := strconv.Atoi(limitQuery)\n\tif err != nil {\n\t\tlimit = 10\n\t}\n\n\tstart, err := strconv.Atoi(startQuery)\n\tif err != nil {\n\t\tstart = 0\n\t}\n\n\tInitMgo()\n\tdefer CloseMgo()\n\n\toperations, err := QueryOperation(dbname, start, limit)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn c.NoContent(http.StatusBadRequest)\n\t}\n\n\toperations[\"name\"] = dbname\n\treturn c.JSON(operations)\n}",
"func (c *RelationshipClient) Intercept(interceptors ...Interceptor) {\n\tc.inters.Relationship = append(c.inters.Relationship, interceptors...)\n}",
"func (c *RelationshipInfoClient) Intercept(interceptors ...Interceptor) {\n\tc.inters.RelationshipInfo = append(c.inters.RelationshipInfo, interceptors...)\n}",
"func (e *XAExecutor) Interceptors(hooks []exec.SQLHook) {\n\te.hooks = hooks\n}",
"func (g *BaseGenerator) fillInQuery(qi query.Query, humanLabel, humanDesc, sql string) {\n\tv := url.Values{}\n\tv.Set(\"count\", \"false\")\n\tv.Set(\"query\", sql)\n\tq := qi.(*query.HTTP)\n\tq.HumanLabel = []byte(humanLabel)\n\tq.RawQuery = []byte(sql)\n\tq.HumanDescription = []byte(humanDesc)\n\tq.Method = []byte(\"GET\")\n\tq.Path = []byte(fmt.Sprintf(\"/exec?%s\", v.Encode()))\n\tq.Body = nil\n}",
"func (c *TransactionAttemptContext) queryWrapper(scope *Scope, statement string, options QueryOptions, hookPoint string,\n\tisBeginWork bool, existingErrorCheck bool, txData []byte, txImplicit bool) (*QueryResult, error) {\n\tc.logger.logInfof(c.attemptID, \"Query wrapped running %s, scope level = %t, begin work = %t, txImplicit = %t\",\n\t\tredactUserDataString(statement), scope != nil, isBeginWork, txImplicit)\n\n\tvar target string\n\tif !isBeginWork && !txImplicit {\n\t\tif !c.queryModeLocked() {\n\t\t\t// This is quite a big lock but we can't put the context into \"query mode\" until we know that begin work was\n\t\t\t// successful. We also can't allow any further ops to happen until we know if we're in \"query mode\" or not.\n\n\t\t\t// queryBeginWork implicitly performs an existingErrorCheck and the call into Serialize on the gocbcore side\n\t\t\t// will return an error if there have been any previously failed operations.\n\t\t\tif err := c.queryBeginWork(scope); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\t// If we've got here then transactionQueryState cannot be nil.\n\t\ttarget = c.queryState.queryTarget\n\n\t\tc.logger.logInfof(c.attemptID, \"Using query target %s\", redactSystemDataString(target))\n\n\t\tif !c.txn.CanCommit() && !c.txn.ShouldRollback() {\n\t\t\tc.logger.logInfof(c.attemptID, \"Transaction marked cannot commit and should not rollback, failing\")\n\t\t\treturn nil, operationFailed(transactionQueryOperationFailedDef{\n\t\t\t\tShouldNotRetry: true,\n\t\t\t\tReason: gocbcore.TransactionErrorReasonTransactionFailed,\n\t\t\t\tErrorCause: ErrOther,\n\t\t\t\tErrorClass: gocbcore.TransactionErrorClassFailOther,\n\t\t\t\tShouldNotRollback: true,\n\t\t\t}, c)\n\t\t}\n\t}\n\n\tif existingErrorCheck {\n\t\tif !c.txn.CanCommit() {\n\t\t\tc.logger.logInfof(c.attemptID, \"Transaction marked cannot commit during existing error check, failing\")\n\t\t\treturn nil, operationFailed(transactionQueryOperationFailedDef{\n\t\t\t\tShouldNotRetry: true,\n\t\t\t\tReason: gocbcore.TransactionErrorReasonTransactionFailed,\n\t\t\t\tErrorCause: ErrPreviousOperationFailed,\n\t\t\t\tErrorClass: gocbcore.TransactionErrorClassFailOther,\n\t\t\t}, c)\n\t\t}\n\t}\n\n\texpired, err := c.hooks.HasExpiredClientSideHook(*c, hookPoint, statement)\n\tif err != nil {\n\t\t// This isn't meant to happen...\n\t\treturn nil, &TransactionOperationFailedError{\n\t\t\terrorCause: err,\n\t\t}\n\t}\n\tcfg := c.txn.Config()\n\tif cfg.ExpirationTime < 10*time.Millisecond || expired {\n\t\tc.logger.logInfof(c.attemptID, \"Transaction expired, failing\")\n\t\treturn nil, operationFailed(transactionQueryOperationFailedDef{\n\t\t\tShouldNotRetry: true,\n\t\t\tShouldNotRollback: true,\n\t\t\tReason: gocbcore.TransactionErrorReasonTransactionExpired,\n\t\t\tErrorCause: ErrAttemptExpired,\n\t\t\tErrorClass: gocbcore.TransactionErrorClassFailExpiry,\n\t\t}, c)\n\t}\n\n\toptions.Metrics = true\n\toptions.Internal.Endpoint = target\n\tif options.Raw == nil {\n\t\toptions.Raw = make(map[string]interface{})\n\t}\n\tif !isBeginWork && !txImplicit {\n\t\toptions.Raw[\"txid\"] = c.txn.Attempt().ID\n\t}\n\n\tif len(txData) > 0 {\n\t\toptions.Raw[\"txdata\"] = json.RawMessage(txData)\n\t}\n\tif txImplicit {\n\t\toptions.Raw[\"tximplicit\"] = true\n\n\t\tif options.ScanConsistency == 0 {\n\t\t\toptions.ScanConsistency = QueryScanConsistencyRequestPlus\n\t\t}\n\t\toptions.Raw[\"durability_level\"] = durabilityLevelToQueryString(cfg.DurabilityLevel)\n\t\toptions.Raw[\"txtimeout\"] = fmt.Sprintf(\"%dms\", cfg.ExpirationTime.Milliseconds())\n\t\tif cfg.CustomATRLocation.Agent != nil {\n\t\t\t// Agent being non nil signifies that this was set.\n\t\t\toptions.Raw[\"atrcollection\"] = fmt.Sprintf(\n\t\t\t\t\"%s.%s.%s\",\n\t\t\t\tcfg.CustomATRLocation.Agent.BucketName(),\n\t\t\t\tcfg.CustomATRLocation.ScopeName,\n\t\t\t\tcfg.CustomATRLocation.CollectionName,\n\t\t\t)\n\t\t}\n\n\t\t// Need to make sure we don't end up straight back here...\n\t\toptions.AsTransaction = nil\n\t}\n\toptions.Timeout = cfg.ExpirationTime + cfg.KeyValueTimeout + (1 * time.Second)\n\n\terr = c.hooks.BeforeQuery(*c, statement)\n\tif err != nil {\n\t\treturn nil, queryMaybeTranslateToTransactionsError(err, c)\n\t}\n\n\tvar result *QueryResult\n\tvar queryErr error\n\tif scope == nil {\n\t\tresult, queryErr = c.cluster.Query(statement, &options)\n\t} else {\n\t\tresult, queryErr = scope.Query(statement, &options)\n\t}\n\tif queryErr != nil {\n\t\treturn nil, queryMaybeTranslateToTransactionsError(queryErr, c)\n\t}\n\n\terr = c.hooks.AfterQuery(*c, statement)\n\tif err != nil {\n\t\treturn nil, queryMaybeTranslateToTransactionsError(err, c)\n\t}\n\n\treturn result, nil\n}",
"func LogOperation(ctx context.Context, operation string, operator func() error) error {\n\ttsStart := time.Now()\n\terr := operator()\n\ttimeElapsed := time.Since(tsStart)\n\tactivityID := GetTraceID(ctx)\n\tif err == nil {\n\t\tzap.L().Info(\"operationDone\",\n\t\t\tzap.String(\"op\", operation),\n\t\t\tzap.String(\"activityId\", activityID),\n\t\t\tzap.Int(\"latency\", int(timeElapsed.Seconds()*1000)))\n\t} else {\n\t\tzap.L().Error(\"operationFailed\",\n\t\t\tzap.String(\"op\", operation),\n\t\t\tzap.String(\"activityId\", activityID),\n\t\t\tzap.Error(err),\n\t\t\tzap.Int(\"latency\", int(timeElapsed.Seconds()*1000)))\n\t}\n\n\treturn err\n}",
"func (r *Read) encodeOpQuery(desc description.SelectedServer, cmd bsonx.Doc) (wiremessage.WireMessage, error) {\n\trdr, err := marshalCommand(cmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif desc.Server.Kind == description.Mongos {\n\t\trdr, err = r.addReadPref(r.ReadPref, desc.Server.Kind, desc.Kind, rdr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tquery := wiremessage.Query{\n\t\tMsgHeader: wiremessage.Header{RequestID: wiremessage.NextRequestID()},\n\t\tFullCollectionName: r.DB + \".$cmd\",\n\t\tFlags: r.slaveOK(desc),\n\t\tNumberToReturn: -1,\n\t\tQuery: rdr,\n\t}\n\n\treturn query, nil\n}",
"func (i *regtestInterceptor) UnaryInterceptor(ctx context.Context, method string,\n\treq, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker,\n\topts ...grpc.CallOption) error {\n\n\tidStr := fmt.Sprintf(\"LSATID %x\", i.id[:])\n\tidCtx := metadata.AppendToOutgoingContext(\n\t\tctx, lsat.HeaderAuthorization, idStr,\n\t)\n\treturn invoker(idCtx, method, req, reply, cc, opts...)\n}",
"func buildOperator(events Events, index blog.Index, cache cache.FactCache, views lookups.All, query *plandef.Plan) queryOperator {\n\tinputOps := make([]queryOperator, len(query.Inputs))\n\tfor i, input := range query.Inputs {\n\t\tinputOps[i] = buildOperator(events, index, cache, views, input)\n\t}\n\tswitch op := query.Operator.(type) {\n\tcase *plandef.Ask:\n\t\treturn &decoratedOp{events, newAsk(op, inputOps)}\n\tcase *plandef.Enumerate:\n\t\treturn &decoratedOp{events, &enumerateOp{op}}\n\tcase *plandef.ExternalIDs:\n\t\treturn &decoratedOp{events, newExternalIDs(index, views, op, inputOps)}\n\tcase *plandef.OrderByOp:\n\t\treturn &decoratedOp{events, newOrderByOp(op, inputOps)}\n\tcase *plandef.LimitAndOffsetOp:\n\t\treturn &decoratedOp{events, newLimitAndOffsetOp(op, inputOps)}\n\tcase *plandef.DistinctOp:\n\t\treturn &decoratedOp{events, newDistinctOp(op, inputOps)}\n\tcase *plandef.HashJoin:\n\t\treturn &decoratedOp{events, newHashJoin(op, inputOps)}\n\tcase *plandef.LoopJoin:\n\t\treturn &decoratedOp{events, newLoopJoin(op, inputOps)}\n\tcase *plandef.InferPO:\n\t\treturn &decoratedOp{events, newInferPO(index, views, op, inputOps)}\n\tcase *plandef.InferSP:\n\t\treturn &decoratedOp{events, newInferSP(index, views, op, inputOps)}\n\tcase *plandef.InferSPO:\n\t\treturn &decoratedOp{events, newInferSPO(index, views, cache, op, inputOps)}\n\tcase *plandef.LookupPO:\n\t\treturn &decoratedOp{events, newLookupPO(index, views, op, inputOps)}\n\tcase *plandef.LookupPOCmp:\n\t\treturn &decoratedOp{events, newLookupPOCmp(index, views, op, inputOps)}\n\tcase *plandef.LookupS:\n\t\treturn &decoratedOp{events, newLookupS(index, views, op, inputOps)}\n\tcase *plandef.LookupSP:\n\t\treturn &decoratedOp{events, newLookupSP(index, views, op, inputOps)}\n\tcase *plandef.LookupSPO:\n\t\treturn &decoratedOp{events, newLookupSPO(index, views, op, inputOps)}\n\tcase *plandef.Projection:\n\t\treturn &decoratedOp{events, newProjection(op, inputOps)}\n\tcase *plandef.SelectLit:\n\t\treturn &decoratedOp{events, newSelectLitOp(op, inputOps)}\n\tcase *plandef.SelectVar:\n\t\treturn &decoratedOp{events, newSelectVarOp(op, inputOps)}\n\t}\n\tpanic(fmt.Sprintf(\"Unexpected operator in query executor: %T (%v)\", query.Operator, query.Operator))\n}",
"func (h *QueryHook) BeforeQuery(ctx context.Context, event *bun.QueryEvent) context.Context {\n\treturn ctx\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ProcessList takes a snapshot of running processes
|
func ProcessList(conf *Config, cache *osCache, logger logrus.FieldLogger) ([]*TopProcess, error) {
var fs procfs.FS
var err error
if conf.ProcPath == "" {
fs, err = procfs.NewDefaultFS()
} else {
fs, err = procfs.NewFS(conf.ProcPath)
}
if err != nil {
return nil, err
}
procs, err := fs.AllProcs()
if err != nil {
return nil, err
}
hostMem, _ := fs.Meminfo()
var out []*TopProcess
for _, p := range procs {
stat, err := p.Stat()
if err != nil {
continue
}
status, err := p.NewStatus()
if err != nil {
continue
}
cmdLine, _ := p.CmdLine()
if len(cmdLine) == 0 {
comm, _ := p.Comm()
cmdLine = []string{comm}
}
st, _ := stat.StartTime()
username := ""
uid := status.UIDs[0]
if uid != "" {
cachedUser := cache.uidCache[uid]
if cachedUser != nil {
username = cachedUser.Username
} else {
user, err := user.LookupId(uid)
if err == nil {
cache.uidCache[uid] = user
username = user.Username
} else if logger != nil {
logger.WithError(err).Debugf("Could not lookup user id %s for process id %d", uid, p.PID)
}
}
}
var memPercent float64
if hostMem.MemTotal != nil {
memPercent = 100.0 * float64(stat.RSS*cache.pageSize) / float64(*hostMem.MemTotal*1024)
}
out = append(out, &TopProcess{
ProcessID: p.PID,
CreatedTime: time.Unix(int64(st), 0),
Username: username,
Priority: stat.Priority,
Nice: &stat.Nice,
VirtualMemoryBytes: uint64(stat.VirtualMemory()),
WorkingSetSizeBytes: uint64(stat.RSS * cache.pageSize),
SharedMemBytes: status.RssShmem + status.RssFile,
Status: stat.State,
MemPercent: memPercent,
// gopsutil scales the times to seconds already
TotalCPUTime: time.Duration(stat.CPUTime() * float64(time.Second)),
Command: strings.Join(cmdLine, " "),
})
}
return out, nil
}
|
[
"func (p *Provider) ProcessList(app string, opts structs.ProcessListOptions) (structs.Processes, error) {\n\tlog := Logger.At(\"ProcessList\").Namespace(\"app=%q\", app).Start()\n\n\ttasks, err := p.appTaskARNs(app)\n\tif err != nil {\n\t\treturn nil, log.Error(err)\n\t}\n\n\tps, err := p.taskProcesses(tasks)\n\tif err != nil {\n\t\treturn nil, log.Error(err)\n\t}\n\n\tif opts.Service != nil {\n\t\tpss := structs.Processes{}\n\n\t\tfor _, p := range ps {\n\t\t\tif p.Name == *opts.Service {\n\t\t\t\tpss = append(pss, p)\n\t\t\t}\n\t\t}\n\n\t\tps = pss\n\t}\n\n\ttaskDefMap := map[string]bool{}\n\tfor i := range ps {\n\t\tps[i].App = app\n\t\ttaskDefMap[ps[i].TaskDefinition] = true\n\t}\n\n\tservices, err := p.clusterServices()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserviceNames := []string{}\n\ttaskToServiceMap := map[string]string{}\n\tfor _, s := range services {\n\t\tif s.ServiceName != nil && s.TaskDefinition != nil && taskDefMap[*s.TaskDefinition] {\n\t\t\tserviceNames = append(serviceNames, *s.ServiceName)\n\t\t\ttaskToServiceMap[*s.TaskDefinition] = *s.ServiceName\n\t\t}\n\t}\n\n\tmdqs := p.servicesMetricQueries(serviceNames)\n\tif len(mdqs) == 0 {\n\t\treturn ps, nil\n\t}\n\n\tms, err := p.cloudwatchMetrics(mdqs, structs.MetricsOptions{\n\t\tStart: aws.Time(time.Now().Add(-5 * time.Minute)),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmMap := map[string]structs.Metric{}\n\tfor _, m := range ms {\n\t\tmMap[m.Name] = m\n\t}\n\n\tfor i := range ps {\n\t\tif serviceName, has := taskToServiceMap[ps[i].TaskDefinition]; has {\n\t\t\tif m, has := mMap[serviceMetricsKey(\"mem\", serviceName)]; has && len(m.Values) > 0 {\n\t\t\t\t// normally there should be one point but if multiple points are fetched, pick the latest one\n\t\t\t\t// points are sorted in TimestampAscending order\n\t\t\t\tps[i].Memory = m.Values[len(m.Values)-1].Average\n\t\t\t}\n\t\t\tif m, has := mMap[serviceMetricsKey(\"cpu\", serviceName)]; has && len(m.Values) > 0 {\n\t\t\t\t// normally there should be one point but if multiple points are fetched, pick the latest one\n\t\t\t\t// points are sorted in TimestampAscending order\n\t\t\t\tps[i].Cpu = m.Values[len(m.Values)-1].Average\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ps, nil\n}",
"func (ps *ProcessServer) ListProcessInfo(ctx context.Context) ([]rex.ProcessInfo, error) {\n\tvar infoList []rex.ProcessInfo\n\tps.processes.Range(func(key, value interface{}) bool {\n\t\tinfoList = append(infoList, value.(*processHandle).getProcessInfo())\n\t\treturn true\n\t})\n\tsort.Slice(infoList, func(i, j int) bool {\n\t\treturn infoList[i].Create.After(infoList[j].Create)\n\t})\n\treturn infoList, nil\n}",
"func (m *Alert) GetProcesses()([]Processable) {\n return m.processes\n}",
"func Process() ([]Nps, error) {\n\tvar npsArr []Nps\n\tpid, err := process.Pids()\n\tif err != nil {\n\t\treturn npsArr, err\n\t}\n\n\tfor i := 0; i < len(pid); i++ {\n\t\tnps, _ := process.NewProcess(pid[i])\n\t\tnames, err := nps.Name()\n\t\tif err != nil {\n\t\t\treturn npsArr, err\n\t\t}\n\n\t\tnp := Nps{\n\t\t\tpid[i],\n\t\t\tnames,\n\t\t}\n\n\t\tnpsArr = append(npsArr, np)\n\t}\n\n\treturn npsArr, err\n}",
"func pidList() ([]int, error) {\n\tprocLs, err := ioutil.ReadDir(\"/proc\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ReadDir(/proc): %s\", err)\n\t}\n\n\tpids := make([]int, 0, len(procLs))\n\tfor _, pInfo := range procLs {\n\t\tif !isDigit(pInfo.Name()[0]) || !pInfo.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tpidInt, err := strconv.Atoi(pInfo.Name())\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Atoi(%s): %s\", pInfo.Name(), err)\n\t\t}\n\t\tpids = append(pids, pidInt)\n\t}\n\treturn pids, nil\n}",
"func (client AppsClient) ListProcesses(resourceGroupName string, name string) (result ProcessInfoCollection, err error) {\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: resourceGroupName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"resourceGroupName\", Name: validation.MaxLength, Rule: 90, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.MinLength, Rule: 1, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.Pattern, Rule: `^[-\\w\\._\\(\\)]+[^\\.]$`, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewErrorWithValidationError(err, \"web.AppsClient\", \"ListProcesses\")\n\t}\n\n\treq, err := client.ListProcessesPreparer(resourceGroupName, name)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"web.AppsClient\", \"ListProcesses\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListProcessesSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"web.AppsClient\", \"ListProcesses\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.ListProcessesResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"web.AppsClient\", \"ListProcesses\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}",
"func (client AppsClient) ListProcessesComplete(resourceGroupName string, name string, cancel <-chan struct{}) (<-chan ProcessInfo, <-chan error) {\n\tresultChan := make(chan ProcessInfo)\n\terrChan := make(chan error, 1)\n\tgo func() {\n\t\tdefer func() {\n\t\t\tclose(resultChan)\n\t\t\tclose(errChan)\n\t\t}()\n\t\tlist, err := client.ListProcesses(resourceGroupName, name)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\t\tif list.Value != nil {\n\t\t\tfor _, item := range *list.Value {\n\t\t\t\tselect {\n\t\t\t\tcase <-cancel:\n\t\t\t\t\treturn\n\t\t\t\tcase resultChan <- item:\n\t\t\t\t\t// Intentionally left blank\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor list.NextLink != nil {\n\t\t\tlist, err = client.ListProcessesNextResults(list)\n\t\t\tif err != nil {\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif list.Value != nil {\n\t\t\t\tfor _, item := range *list.Value {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-cancel:\n\t\t\t\t\t\treturn\n\t\t\t\t\tcase resultChan <- item:\n\t\t\t\t\t\t// Intentionally left blank\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\treturn resultChan, errChan\n}",
"func Filter(proc []string) ([]ps.Process, error) {\n\tprocs, err := ps.Processes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(proc) == 0 { // empty list return all\n\t\treturn procs, nil\n\t}\n\n\tlist := []ps.Process{}\n\tfor _, p := range procs {\n\t\tfor _, n := range proc {\n\t\t\tif p.Executable() == n {\n\t\t\t\tlist = append(list, p)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn list, nil\n}",
"func (client AppsClient) ListProcessesResponder(resp *http.Response) (result ProcessInfoCollection, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}",
"func (client AppsClient) ListInstanceProcessesResponder(resp *http.Response) (result ProcessInfoCollection, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}",
"func BuildRunningList() string {\n\tcmd := exec.Command(\"docker\", \"ps\", \"--format\", \"\\\"{{.Image}}\\\"\")\n\n\tvar stdout, stderr bytes.Buffer\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Printf(\"cmd.Run() failed with %s\\n\", err)\n\t}\n\toutStr, _ := string(stdout.Bytes()), string(stderr.Bytes())\n\treturn outStr\n}",
"func fmtProcesses(\n\tcfg *config.AgentConfig,\n\tprocs, lastProcs map[int32]*procutil.Process,\n\tctrByProc map[int32]string,\n\tsyst2, syst1 cpu.TimesStat,\n\tlastRun time.Time,\n\tconnsByPID map[int32][]*model.Connection,\n) map[int32]*model.Process {\n\tprocsByPID := make(map[int32]*model.Process)\n\tconnCheckIntervalS := int(cfg.CheckIntervals[config.ConnectionsCheckName] / time.Second)\n\n\tfor _, fp := range procs {\n\t\tif skipProcess(cfg, fp, lastProcs) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Hide blacklisted args if the Scrubber is enabled\n\t\tfp.Cmdline = cfg.Scrubber.ScrubProcessCommand(fp)\n\n\t\tproc := &model.Process{\n\t\t\tPid: fp.Pid,\n\t\t\tNsPid: fp.NsPid,\n\t\t\tCommand: formatCommand(fp),\n\t\t\tUser: formatUser(fp),\n\t\t\tMemory: formatMemory(fp.Stats),\n\t\t\tCpu: formatCPU(fp.Stats, fp.Stats.CPUTime, lastProcs[fp.Pid].Stats.CPUTime, syst2, syst1),\n\t\t\tCreateTime: fp.Stats.CreateTime,\n\t\t\tOpenFdCount: fp.Stats.OpenFdCount,\n\t\t\tState: model.ProcessState(model.ProcessState_value[fp.Stats.Status]),\n\t\t\tIoStat: formatIO(fp.Stats, lastProcs[fp.Pid].Stats.IOStat, lastRun),\n\t\t\tVoluntaryCtxSwitches: uint64(fp.Stats.CtxSwitches.Voluntary),\n\t\t\tInvoluntaryCtxSwitches: uint64(fp.Stats.CtxSwitches.Involuntary),\n\t\t\tContainerId: ctrByProc[fp.Pid],\n\t\t\tNetworks: formatNetworks(connsByPID[fp.Pid], connCheckIntervalS),\n\t\t}\n\t\tprocsByPID[proc.Pid] = proc\n\t}\n\n\tcfg.Scrubber.IncrementCacheAge()\n\n\treturn procsByPID\n}",
"func (client AppsClient) ListInstanceProcessesSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client,\n\t\treq,\n\t\tazure.DoRetryWithRegistration(client.Client))\n}",
"func (m *Mysql) gatherProcessListStatuses(db *sql.DB, servtag string, acc telegraf.Accumulator) error {\n\t// run query\n\trows, err := db.Query(infoSchemaProcessListQuery)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tvar (\n\t\tcommand string\n\t\tstate string\n\t\tcount uint32\n\t)\n\n\tfields := make(map[string]interface{})\n\n\t// mapping of state with its counts\n\tstateCounts := make(map[string]uint32, len(generalThreadStates))\n\t// set map with keys and default values\n\tfor k, v := range generalThreadStates {\n\t\tstateCounts[k] = v\n\t}\n\n\tfor rows.Next() {\n\t\terr = rows.Scan(&command, &state, &count)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// each state has its mapping\n\t\tfoundState := findThreadState(command, state)\n\t\t// count each state\n\t\tstateCounts[foundState] += count\n\t}\n\n\ttags := map[string]string{\"server\": servtag}\n\tfor s, c := range stateCounts {\n\t\tfields[newNamespace(\"threads\", s)] = c\n\t}\n\tif m.MetricVersion < 2 {\n\t\tacc.AddFields(\"mysql_info_schema\", fields, tags)\n\t} else {\n\t\tacc.AddFields(\"mysql_process_list\", fields, tags)\n\t}\n\n\t// get count of connections from each user\n\tconnRows, err := db.Query(\"SELECT user, sum(1) AS connections FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer connRows.Close()\n\n\tfor connRows.Next() {\n\t\tvar user string\n\t\tvar connections int64\n\n\t\terr = connRows.Scan(&user, &connections)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ttags := map[string]string{\"server\": servtag, \"user\": user}\n\t\tfields := make(map[string]interface{})\n\n\t\tfields[\"connections\"] = connections\n\t\tacc.AddFields(\"mysql_users\", fields, tags)\n\t}\n\n\treturn nil\n}",
"func waitForProcessList(cont *Container, want []*control.Process) error {\n\tcb := func() error {\n\t\tgot, err := cont.Processes()\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"error getting process data from container: %w\", err)\n\t\t\treturn &backoff.PermanentError{Err: err}\n\t\t}\n\t\tif !procListsEqual(got, want) {\n\t\t\treturn fmt.Errorf(\"container got process list: %s, want: %s\", procListToString(got), procListToString(want))\n\t\t}\n\t\treturn nil\n\t}\n\t// Gives plenty of time as tests can run slow under --race.\n\treturn testutil.Poll(cb, 30*time.Second)\n}",
"func (client AppsClient) ListProcessesSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client,\n\t\treq,\n\t\tazure.DoRetryWithRegistration(client.Client))\n}",
"func (client AppsClient) ListInstanceProcessesComplete(resourceGroupName string, name string, instanceID string, cancel <-chan struct{}) (<-chan ProcessInfo, <-chan error) {\n\tresultChan := make(chan ProcessInfo)\n\terrChan := make(chan error, 1)\n\tgo func() {\n\t\tdefer func() {\n\t\t\tclose(resultChan)\n\t\t\tclose(errChan)\n\t\t}()\n\t\tlist, err := client.ListInstanceProcesses(resourceGroupName, name, instanceID)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\t\tif list.Value != nil {\n\t\t\tfor _, item := range *list.Value {\n\t\t\t\tselect {\n\t\t\t\tcase <-cancel:\n\t\t\t\t\treturn\n\t\t\t\tcase resultChan <- item:\n\t\t\t\t\t// Intentionally left blank\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor list.NextLink != nil {\n\t\t\tlist, err = client.ListInstanceProcessesNextResults(list)\n\t\t\tif err != nil {\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif list.Value != nil {\n\t\t\t\tfor _, item := range *list.Value {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-cancel:\n\t\t\t\t\t\treturn\n\t\t\t\t\tcase resultChan <- item:\n\t\t\t\t\t\t// Intentionally left blank\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\treturn resultChan, errChan\n}",
"func (client Client) GetAllProcessInfo() (info []ProcessInfo, err error) {\n\tvar results []interface{}\n\tif err = client.RpcClient.Call(\"supervisor.getAllProcessInfo\", nil, &results); err == nil {\n\t\tinfo = make([]ProcessInfo, len(results))\n\t\tfor i, result := range results {\n\t\t\tinfo[i] = newProcessInfo(result.(map[string]interface{}))\n\t\t}\n\t}\n\treturn\n}",
"func (pq *PriorityQueue) List() []*Task {\n\treturn pq.heap\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
candle.rsi() returns rsi calculation
|
func (c *Candle) rsi() float32 {
return 100.0 - 100.0 / (1 + c.Indicators.AvgUp / c.Indicators.AvgDown)
}
|
[
"func (c *Calc) CalculateRSI(values []float64, indication MAIndication) RSI {\n\tvar (\n\t\trsi RSI\n\t\tgainValues []float64\n\t\tlossValues []float64\n\t\tmaCalcFunc func(values []float64) float64\n\t)\n\tif len(values) < 2 {\n\t\treturn rsi\n\t}\n\tswitch indication {\n\tcase SMAIndication:\n\t\tmaCalcFunc = c.SMACalc\n\tcase EMAIndication:\n\t\tmaCalcFunc = c.EMACalc\n\tcase WMAIndication:\n\t\tmaCalcFunc = c.WMACalc\n\tdefault:\n\t\treturn rsi\n\t}\n\n\tfor i, value := range values {\n\t\tif i == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tchange := value - values[i-1]\n\t\tif change > 0 {\n\t\t\tgainValues = append(gainValues, change)\n\t\t} else if change < 0 {\n\t\t\tlossValues = append(lossValues, math.Abs(change))\n\t\t}\n\t}\n\n\tgain := maCalcFunc(gainValues)\n\tloss := maCalcFunc(lossValues)\n\tvar rs float64\n\tif loss != 0 {\n\t\trs = gain / loss\n\t} else {\n\t\trs = 100\n\t}\n\trsiVal := 100 - (100 / (1 + rs))\n\trsi.Value = RoundFloat(rsiVal, 3)\n\treturn rsi\n}",
"func Rsi(inReal []float64, optInTimePeriod int) []float64 {\n var outBegIdx int\n var outNBElement int\n n := len(inReal)\n outReal := make([]float64, n)\n ta_Rsi(0, n - 1, (*float64)(&inReal[0]), optInTimePeriod, &outBegIdx, &outNBElement, (*float64)(&outReal[0]))\n outReal = append(make([]float64, outBegIdx), outReal[:outNBElement]...)\n return outReal\n}",
"func (r *RSI) Sum() float64 {\n\treturn r.Value\n}",
"func (t *Ticker) NewRSI(inTimePeriod int32) *RSI {\n\tcalculator := rsiCalculator{Ticker: t, Period: inTimePeriod}\n\treturn &RSI{\n\t\tCalculator: &calculator,\n\t}\n}",
"func (ins *psar) Calculate(newData utils.OHLCV) []float64 {\r\n\tnewPrice := newData.GetByType(ins.priceType)\r\n\r\n\tif math.IsNaN(newPrice) {\r\n\t\treturn []float64{ins.prev}\r\n\t}\r\n\r\n\tins.buf.Add(newData)\r\n\r\n\tins.count++\r\n\r\n\tif ins.count < 2 { //ins.buf.Size {\r\n\t\treturn []float64{math.NaN()}\r\n\t}\r\n\tminTick := 1e-7\r\n\tout := math.NaN()\r\n\tpos := math.NaN()\r\n\tmaxMin := math.NaN()\r\n\tacc := math.NaN()\r\n\tprev := ins.prev\r\n\r\n\toutSet := false\r\n\tif ins.count == 2 {\r\n\t\tif ins.count > 1 && newData.Close > ins.buf.Vals[ins.buf.Capacity-2].Close {\r\n\t\t\tpos = 1\r\n\t\t\tmaxMin = math.Max(newData.High, ins.buf.Vals[ins.buf.Capacity-2].High)\r\n\t\t\tprev = math.Min(newData.Low, ins.buf.Vals[ins.buf.Capacity-2].Low)\r\n\t\t} else {\r\n\t\t\tpos = -1\r\n\t\t\tmaxMin = math.Min(newData.Low, ins.buf.Vals[ins.buf.Capacity-2].Low)\r\n\t\t\tprev = math.Max(newData.High, ins.buf.Vals[ins.buf.Capacity-2].High)\r\n\t\t}\r\n\t\tacc = ins.start\r\n\t} else {\r\n\t\tpos = ins.prevPos\r\n\t\tacc = ins.prevAcc\r\n\t\tmaxMin = ins.prevMaxMin\r\n\t}\r\n\tif pos == 1 {\r\n\t\tif newData.High > maxMin {\r\n\t\t\tmaxMin = newData.High\r\n\t\t\tacc = math.Min(acc+ins.inc, ins.maximum)\r\n\t\t}\r\n\t\tif newData.Low <= prev {\r\n\t\t\tpos = -1\r\n\t\t\tout = maxMin\r\n\t\t\tmaxMin = newData.Low\r\n\t\t\tacc = ins.start\r\n\t\t\toutSet = true\r\n\t\t}\r\n\t} else {\r\n\t\tif newData.Low < maxMin {\r\n\t\t\tmaxMin = newData.Low\r\n\t\t\tacc = math.Min(acc+ins.inc, ins.maximum)\r\n\t\t}\r\n\t\tif newData.High >= prev {\r\n\t\t\tpos = 1\r\n\t\t\tout = maxMin\r\n\t\t\tmaxMin = newData.High\r\n\t\t\tacc = ins.start\r\n\t\t\toutSet = true\r\n\t\t}\r\n\t}\r\n\r\n\tif outSet == false {\r\n\t\tout = prev + acc*(maxMin-prev)\r\n\t\tif pos == 1 {\r\n\t\t\tif out >= newData.Low {\r\n\t\t\t\tout = newData.Low - minTick\r\n\t\t\t}\r\n\t\t}\r\n\t\tif pos == -1 {\r\n\t\t\tif out <= newData.High {\r\n\t\t\t\tout = newData.High + minTick\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\tins.prevPos = pos\r\n\tins.prevAcc = acc\r\n\tins.prevMaxMin = maxMin\r\n\tins.prev = out\r\n\r\n\treturn []float64{ins.prev}\r\n}",
"func (_this *AdvertisingEvent) Rssi() *int {\n\tvar ret *int\n\tvalue := _this.Value_JS.Get(\"rssi\")\n\tif value.Type() != js.TypeNull && value.Type() != js.TypeUndefined {\n\t\t__tmp := (value).Int()\n\t\tret = &__tmp\n\t}\n\treturn ret\n}",
"func StochRsi(inReal []float64, optInTimePeriod int, optInFastK_Period int, optInFastD_Period int, optInFastD_MAType int, outFastK *float64, outFastD *float64) []float64 {\n var outBegIdx int\n var outNBElement int\n n := len(inReal)\n outReal := make([]float64, n)\n ta_StochRsi(0, n - 1, (*float64)(&inReal[0]), optInTimePeriod, optInFastK_Period, optInFastD_Period, optInFastD_MAType, &outBegIdx, &outNBElement, outFastK, outFastD)\n outReal = append(make([]float64, outBegIdx), outReal[:outNBElement]...)\n return outReal\n}",
"func (s *Strategy) OnSignal(d data.Handler, _ funding.IFundingTransferer, _ portfolio.Handler) (signal.Event, error) {\n\tif d == nil {\n\t\treturn nil, common.ErrNilEvent\n\t}\n\tes, err := s.GetBaseData(d)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlatest, err := d.Latest()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tes.SetPrice(latest.GetClosePrice())\n\n\tif offset := latest.GetOffset(); offset <= s.rsiPeriod.IntPart() {\n\t\tes.AppendReason(\"Not enough data for signal generation\")\n\t\tes.SetDirection(order.DoNothing)\n\t\treturn &es, nil\n\t}\n\n\tdataRange, err := d.StreamClose()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar massagedData []float64\n\tmassagedData, err = s.massageMissingData(dataRange, es.GetTime())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trsi := indicators.RSI(massagedData, int(s.rsiPeriod.IntPart()))\n\tlatestRSIValue := decimal.NewFromFloat(rsi[len(rsi)-1])\n\thasDataAtTime, err := d.HasDataAtTime(latest.GetTime())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !hasDataAtTime {\n\t\tes.SetDirection(order.MissingData)\n\t\tes.AppendReasonf(\"missing data at %v, cannot perform any actions. RSI %v\", latest.GetTime(), latestRSIValue)\n\t\treturn &es, nil\n\t}\n\n\tswitch {\n\tcase latestRSIValue.GreaterThanOrEqual(s.rsiHigh):\n\t\tes.SetDirection(order.Sell)\n\tcase latestRSIValue.LessThanOrEqual(s.rsiLow):\n\t\tes.SetDirection(order.Buy)\n\tdefault:\n\t\tes.SetDirection(order.DoNothing)\n\t}\n\tes.AppendReasonf(\"RSI at %v\", latestRSIValue)\n\n\treturn &es, nil\n}",
"func Rssi() int32 {\n\t// Generate RSSI. Tend towards generating great signal strength.\n\tx := float64(src.Int31()) * float64(2e-9)\n\treturn int32(-1.6 * math.Exp(x))\n}",
"func (s *Status) IR(cell int) float64 {\n\tif cell < 0 || cell > 8 {\n\t\tpanic(\"invalid cell number\")\n\t}\n\n\tir := float64(s.read2(50+(cell*2))) / 6.3984 / s.VRAmps()\n\tif math.IsNaN(ir) || math.IsInf(ir, 1) || math.IsInf(ir, -1) {\n\t\tir = 0\n\t}\n\treturn ir\n}",
"func (r *RSI) Update(price float64, date time.Time) {\n\tr.Calculator.setPrice(price, date)\n\tr.Calculator.calcRSI()\n\tr.Value = r.Calculator.Result\n}",
"func (c *Client) StockTimeSeriesIntraday(ctx context.Context, timeInterval TimeInterval, symbol string) ([]*TimeSeriesValue, error) {\n\tendpoint := c.buildRequestPath(map[string]string{\n\t\tqueryEndpoint: timeSeriesIntraday.keyName(),\n\t\tqueryInterval: timeInterval.keyName(),\n\t\tquerySymbol: symbol,\n\t})\n\tresponse, err := c.Conn().Request(ctx, endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer response.Body.Close()\n\treturn parseTimeSeriesData(response.Body)\n}",
"func (c *Client) IndicatorSMA(symbol string, interval Interval, timePeriod int, seriesType string) (*IndicatorSMA, error) {\n\tconst functionName = \"SMA\"\n\turl := fmt.Sprintf(\"%s/query?function=%s&symbol=%s&interval=%s&time_period=%d&series_type=%s&apikey=%s\",\n\t\tbaseURL, functionName, symbol, interval, timePeriod, seriesType, c.apiKey)\n\tbody, err := c.makeHTTPRequest(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tindicator, err := toIndicatorSMA(body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse response: %w\", err)\n\t}\n\n\treturn indicator, nil\n}",
"func (ins *mesa) Calculate(newData utils.OHLCV) []float64 {\r\n\tnewPrice := newData.GetByType(ins.priceType)\r\n\r\n\tif math.IsNaN(newPrice) {\r\n\t\treturn []float64{ins.prev}\r\n\t}\r\n\r\n\tins.buf.Add(newData)\r\n\r\n\tfl := 0.2\r\n\tsl := 0.02\r\n\r\n\tsp := 0.0\r\n\tif ins.buf.Pushes >= 4 {\r\n\t\tsp = (4*newData.HL2() + 3*ins.buf.Vals[ins.buf.Capacity-2].HL2() + 2*ins.buf.Vals[ins.buf.Capacity-3].HL2() + ins.buf.Vals[ins.buf.Capacity-4].HL2()) / 10.0\r\n\t}\r\n\r\n\tdt := (0.0962*sp + 0.5769*(ins.spArr.Get(ins.spArr.Capacity-2)) - 0.5769*(ins.spArr.Get(ins.spArr.Capacity-4)) - 0.0962*(ins.spArr.Get(ins.spArr.Capacity-6))) * (0.075*(ins.pArr.Last()) + 0.54)\r\n\tq1 := (0.0962*dt + 0.5769*(ins.dtArr.Get(ins.dtArr.Capacity-2)) - 0.5769*(ins.dtArr.Get(ins.dtArr.Capacity-4)) - 0.0962*(ins.dtArr.Get(ins.dtArr.Capacity-6))) * (0.075*(ins.pArr.Last()) + 0.54)\r\n\r\n\ti1 := (ins.dtArr.Get(ins.dtArr.Capacity - 3))\r\n\tjI := (0.0962*i1 + 0.5769*(ins.i1Arr.Get(ins.i1Arr.Capacity-2)) - 0.5769*(ins.i1Arr.Get(ins.i1Arr.Capacity-4)) - 0.0962*(ins.i1Arr.Get(ins.i1Arr.Capacity-6))) * (0.075*(ins.pArr.Last()) + 0.54)\r\n\r\n\tjq := (0.0962*q1 + 0.5769*(ins.q1Arr.Get(ins.q1Arr.Capacity-2)) - 0.5769*(ins.q1Arr.Get(ins.q1Arr.Capacity-4)) - 0.0962*(ins.q1Arr.Get(ins.q1Arr.Capacity-6))) * (0.075*(ins.pArr.Last()) + 0.54)\r\n\ti2_ := i1 - jq\r\n\tq2_ := q1 + jI\r\n\r\n\ti2 := 0.2*i2_ + 0.8*(ins.i2Arr.Last())\r\n\tq2 := 0.2*q2_ + 0.8*(ins.q2Arr.Last())\r\n\r\n\tre_ := i2*(ins.i2Arr.Last()) + q2*(ins.q2Arr.Last())\r\n\tim_ := i2*(ins.q2Arr.Last()) - q2*(ins.i2Arr.Last())\r\n\tre := 0.2*re_ + 0.8*(ins.reArr.Last())\r\n\tim := 0.2*im_ + 0.8*(ins.imArr.Last())\r\n\r\n\tp1 := 0.0\r\n\tif math.Abs(im) > 0.00000001 && math.Abs(re) > 0.00000001 {\r\n\t\tp1 = 360.0 / math.Atan(im/re)\r\n\t} else {\r\n\t\tp1 = (ins.pArr.Last())\r\n\t}\r\n\r\n\tp2 := 0.0\r\n\tif p1 > 1.5*(ins.p1Arr.Last()) {\r\n\t\tp2 = 1.5 * (ins.p1Arr.Last())\r\n\t} else {\r\n\t\tif p1 < 0.67*(ins.p1Arr.Last()) {\r\n\t\t\tp2 = 0.67 * (ins.p1Arr.Last())\r\n\t\t} else {\r\n\t\t\tp2 = p1\r\n\t\t}\r\n\t}\r\n\r\n\tp3 := 0.0\r\n\tif p2 < 6 {\r\n\t\tp3 = 6.0\r\n\t} else {\r\n\t\tif p2 > 50 {\r\n\t\t\tp3 = 50\r\n\t\t} else {\r\n\t\t\tp3 = p2\r\n\t\t}\r\n\t}\r\n\tp := 0.2*p3 + 0.8*(ins.p3Arr.Last())\r\n\r\n\tspp := 0.33*p + 0.67*(ins.sppArr.Last())\r\n\tphase := math.Atan(q1 / i1)\r\n\tdphase_ := (ins.phaseArr.Last()) - phase\r\n\r\n\tdphase := 0.0\r\n\tif dphase_ < 1 {\r\n\t\tdphase = 1\r\n\t} else {\r\n\t\tdphase = dphase_\r\n\t}\r\n\r\n\talpha_ := fl / dphase\r\n\talpha := 0.0\r\n\tif alpha_ < sl {\r\n\t\talpha = sl\r\n\t} else {\r\n\t\tif alpha_ > fl {\r\n\t\t\talpha = fl\r\n\t\t} else {\r\n\t\t\talpha = alpha_\r\n\t\t}\r\n\t}\r\n\r\n\tmama := alpha*newData.HL2() + (1-alpha)*(ins.mamaArr.Last())\r\n\tfama := 0.5*alpha*mama + (1.0-0.5*alpha)*(ins.famaArr.Last())\r\n\r\n\tif !math.IsNaN(sp) && ins.buf.Pushes != 1 {\r\n\t\tins.spArr.Add(sp)\r\n\t}\r\n\tif !math.IsNaN(p) {\r\n\t\tins.pArr.Add(p)\r\n\t}\r\n\tif !math.IsNaN(dt) {\r\n\t\tins.dtArr.Add(dt)\r\n\t}\r\n\tif !math.IsNaN(i1) {\r\n\t\tins.i1Arr.Add(i1)\r\n\t}\r\n\tif !math.IsNaN(i2) {\r\n\t\tins.i2Arr.Add(i2)\r\n\t}\r\n\tif !math.IsNaN(q1) {\r\n\t\tins.q1Arr.Add(q1)\r\n\t}\r\n\tif !math.IsNaN(q2) {\r\n\t\tins.q2Arr.Add(q2)\r\n\t}\r\n\tif !math.IsNaN(im) {\r\n\t\tins.imArr.Add(im)\r\n\t}\r\n\tif !math.IsNaN(re) {\r\n\t\tins.reArr.Add(re)\r\n\t}\r\n\tif !math.IsNaN(p1) {\r\n\t\tins.p1Arr.Add(p1)\r\n\t}\r\n\tif !math.IsNaN(p3) {\r\n\t\tins.p3Arr.Add(p3)\r\n\t}\r\n\tif !math.IsNaN(phase) {\r\n\t\tins.phaseArr.Add(phase)\r\n\t}\r\n\tif !math.IsNaN(fama) {\r\n\t\tins.famaArr.Add(fama)\r\n\t}\r\n\tif !math.IsNaN(mama) {\r\n\t\tins.mamaArr.Add(mama)\r\n\t}\r\n\tif !math.IsNaN(spp) {\r\n\t\tins.sppArr.Add(spp)\r\n\t}\r\n\r\n\tins.prev = fama\r\n\treturn []float64{mama, fama}\r\n}",
"func (frame *LIDAR_frame) rpm() float32 {\n\treturn float32(frame.get_uint16(LIDAR_RPM_OFS)) / 64.0\n}",
"func (msg TempestObservation) SolarRadiation() (float64, error) {\n\tif len(msg.ObservationData) >= 1 && len(msg.ObservationData[0]) != TemperatureObservationMessageParameterCount {\n\t\treturn 0, WeatherFlowMessageLengthError\n\t}\n\n\treturn msg.ObservationData[0][11], nil\n}",
"func (c *Coder) get_vlc_symbol(state *State, bits uint) int32 {\n\ti := state.count\n\tk := uint32(0)\n\n\tfor i < state.error_sum {\n\t\tk++\n\t\ti += i\n\t}\n\n\tv := c.get_sr_golomb(k, bits)\n\n\tif 2*state.drift < -state.count {\n\t\tv = -1 - v\n\t}\n\n\tret := sign_extend(v+state.bias, bits)\n\n\tstate.error_sum += abs32(v)\n\tstate.drift += v\n\n\tif state.count == 128 {\n\t\tstate.count >>= 1\n\t\tstate.drift >>= 1\n\t\tstate.error_sum >>= 1\n\t}\n\tstate.count++\n\tif state.drift <= -state.count {\n\t\tstate.bias = max32(state.bias-1, -128)\n\t\tstate.drift = max32(state.drift+state.count, -state.count+1)\n\t} else if state.drift > 0 {\n\t\tstate.bias = min32(state.bias+1, 127)\n\t\tstate.drift = min32(state.drift-state.count, 0)\n\t}\n\n\treturn ret\n}",
"func stToSiTi(s float64) uint32 {\n\tif s < 0 {\n\t\treturn uint32(s*maxSiTi - 0.5)\n\t}\n\treturn uint32(s*maxSiTi + 0.5)\n}",
"func GetTrend(hprices []t.HistoricalPrice, period int) int {\n\ttrend := t.TrendNo\n\n\tif len(hprices) < 10 || hprices[len(hprices)-1].Open == 0 || period <= 0 {\n\t\treturn trend\n\t}\n\n\tp_0 := hprices[len(hprices)-1]\n\tp_1 := hprices[len(hprices)-2]\n\n\to_0 := p_0.Open\n\n\th_0 := p_0.High\n\th_1 := p_1.High\n\n\tl_0 := p_0.Low\n\tl_1 := p_1.Low\n\n\tc_0 := p_0.Close\n\n\tvar h, l, c []float64\n\tfor _, p := range hprices {\n\t\th = append(h, p.High)\n\t\tl = append(l, p.Low)\n\t\tc = append(c, p.Close)\n\t}\n\n\thwma := talib.WMA(h, period)\n\thma_0 := hwma[len(hwma)-1]\n\n\tlwma := talib.WMA(l, period)\n\tlma_0 := lwma[len(lwma)-1]\n\n\tcwma := talib.WMA(c, period)\n\tcma_0 := cwma[len(cwma)-1]\n\tcma_1 := cwma[len(cwma)-2]\n\tcma_2 := cwma[len(cwma)-3]\n\n\t// Not the J. Welles Wilder Jr.'s ATR\n\tatr := hma_0 - lma_0\n\n\t// Positive slope\n\tif cma_1 < cma_0 {\n\t\ttrend = t.TrendUp1\n\t\t// Higher low, and continued positive slope\n\t\tif l_1 < l_0 && cma_2 < cma_1 {\n\t\t\ttrend = t.TrendUp2\n\t\t\t// Green bar, or moving to top\n\t\t\tif o_0 < c_0 || h_0-c_0 < (c_0-l_0)*0.5 {\n\t\t\t\ttrend = t.TrendUp3\n\t\t\t\t// Low is greater than average close, or long green bar, or narrow upper band\n\t\t\t\tif l_0 > cma_0 || h_0-l_0 > atr || hma_0-cma_0 < (cma_0-lma_0)*0.6 {\n\t\t\t\t\ttrend = t.TrendUp4\n\t\t\t\t\t// Low is greater than average high, or very long green bar\n\t\t\t\t\tif l_0 > hma_0 || h_0-l_0 > 1.25*atr {\n\t\t\t\t\t\ttrend = t.TrendUp5\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// Negative slope\n\tif cma_1 > cma_0 {\n\t\ttrend = t.TrendDown1\n\t\t// Lower high, and continued negative slope\n\t\tif h_1 > h_0 && cma_2 > cma_1 {\n\t\t\ttrend = t.TrendDown2\n\t\t\t// Red bar, or moving to bottom\n\t\t\tif o_0 > c_0 || (h_0-c_0)*0.5 > c_0-l_0 {\n\t\t\t\ttrend = t.TrendDown3\n\t\t\t\t// High is less than average close, or long red bar, or narrow lower band\n\t\t\t\tif h_0 < cma_0 || h_0-l_0 > atr || (hma_0-cma_0)*0.6 > cma_0-lma_0 {\n\t\t\t\t\ttrend = t.TrendDown4\n\t\t\t\t\t// High is less than average low, or very long red bar\n\t\t\t\t\tif h_0 < lma_0 || h_0-l_0 > 1.25*atr {\n\t\t\t\t\t\ttrend = t.TrendDown5\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn trend\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Start will start a session in chosen EC2 instance
|
func Start(input *StartInput) error {
awsConfig := aws.NewConfig()
if *input.AWSRegion != "" {
awsConfig.Region = input.AWSRegion
}
sess := session.Must(session.NewSession(awsConfig))
var instanceID *string
switch *input.TargetType {
case "instance-id":
instanceID = input.Target
case "priv-dns":
id, err := getIDFromPrivDNS(sess, input.Target)
if err != nil {
return err
}
if id == "" {
return fmt.Errorf("no instance with private dns name: %s", *input.Target)
}
instanceID = &id
case "name-tag":
id, err := getIDFromName(sess, input.Target)
if err != nil {
return err
}
if id == "" {
return fmt.Errorf("no instance with name tag: %s", *input.Target)
}
instanceID = &id
default:
return fmt.Errorf("Unsupported target type: %s", *input.Target)
}
ssmClient := ssm.New(sess)
startSessionInput := &ssm.StartSessionInput{
Target: instanceID,
}
output, err := ssmClient.StartSession(startSessionInput)
if err != nil {
return err
}
defer terminateSession(ssmClient, output.SessionId)
log.WithFields(log.Fields{
"sessionID": *output.SessionId,
"streamURL": *output.StreamUrl,
"token": *output.TokenValue,
}).Debug("SSM Start Session Output")
payload, err := json.Marshal(output)
if err != nil {
return err
}
shell := exec.Command("session-manager-plugin", string(payload), *sess.Config.Region, "StartSession")
shell.Stdout = os.Stdout
shell.Stdin = os.Stdin
shell.Stderr = os.Stderr
err = shell.Run()
if err != nil {
return err
}
return nil
}
|
[
"func StartAWSSession(region, profile, mfa string) *session.Session {\n\toptions := session.Options{\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t\tAssumeRoleTokenProvider: awsMFATokenProvider(mfa),\n\t}\n\tif profile != \"\" {\n\t\toptions.Profile = profile\n\t}\n\tawsConfig := aws.NewConfig()\n\tif region != \"\" {\n\t\tawsConfig.Region = ®ion\n\t}\n\toptions.Config = *awsConfig\n\tsess := session.Must(session.NewSessionWithOptions(options))\n\treturn sess\n}",
"func (instance *Instance) StartInstance() (err error) {\n\tlaunchTemplateData := ec2.RequestLaunchTemplateData{\n\t\tImageId: instance.AMIID,\n\t\tKeyName: instance.KeyName,\n\t\tUserData: instance.UserData,\n\n\t\tNetworkInterfaces: []*ec2.LaunchTemplateInstanceNetworkInterfaceSpecificationRequest{\n\t\t\t&ec2.LaunchTemplateInstanceNetworkInterfaceSpecificationRequest{\n\t\t\t\tDeviceIndex: aws.Int64(0),\n\t\t\t\tAssociatePublicIpAddress: aws.Bool(false),\n\t\t\t\tSubnetId: instance.SubnetID,\n\t\t\t\tGroups: instance.SecurityGroupIDs,\n\t\t\t},\n\t\t},\n\n\t\tInstanceInitiatedShutdownBehavior: aws.String(\"terminate\"),\n\t\tInstanceMarketOptions: &ec2.LaunchTemplateInstanceMarketOptionsRequest{\n\t\t\tMarketType: aws.String(\"spot\"),\n\t\t\tSpotOptions: &ec2.LaunchTemplateSpotMarketOptionsRequest{\n\t\t\t\tInstanceInterruptionBehavior: aws.String(\"terminate\"),\n\t\t\t},\n\t\t},\n\t}\n\n\tif *instance.BlockDurationInMinutes > 0 {\n\t\tlaunchTemplateData.InstanceMarketOptions.SpotOptions.BlockDurationMinutes = instance.BlockDurationInMinutes\n\t}\n\n\tif len(*instance.Tags) > 0 {\n\t\tvar ec2Tags []*ec2.Tag\n\t\tfor key, value := range *instance.Tags {\n\t\t\tec2Tags = append(ec2Tags, &ec2.Tag{\n\t\t\t\tKey: aws.String(key),\n\t\t\t\tValue: aws.String(value),\n\t\t\t})\n\t\t}\n\n\t\tlaunchTemplateData.TagSpecifications = []*ec2.LaunchTemplateTagSpecificationRequest{\n\t\t\t&ec2.LaunchTemplateTagSpecificationRequest{\n\t\t\t\tResourceType: aws.String(\"instance\"),\n\t\t\t\tTags: ec2Tags,\n\t\t\t},\n\t\t\t&ec2.LaunchTemplateTagSpecificationRequest{\n\t\t\t\tResourceType: aws.String(\"volume\"),\n\t\t\t\tTags: ec2Tags,\n\t\t\t},\n\t\t}\n\t}\n\n\tif instance.IamInstanceProfile != nil {\n\t\tlaunchTemplateData.IamInstanceProfile = &ec2.LaunchTemplateIamInstanceProfileSpecificationRequest{Name: instance.IamInstanceProfile}\n\t}\n\n\tlaunchTemplate := &ec2.CreateLaunchTemplateInput{\n\t\tLaunchTemplateData: &launchTemplateData,\n\t\tLaunchTemplateName: instance.LaunchTemplateName,\n\t\tVersionDescription: aws.String(\"template generated by pentaho-cli for launching instances\"),\n\t}\n\n\t// Add overrides for each instance type\n\tvar overrides []*ec2.FleetLaunchTemplateOverridesRequest\n\tfor _, instanceType := range *instance.InstanceTypes {\n\t\toverride := ec2.FleetLaunchTemplateOverridesRequest{\n\t\t\tInstanceType: aws.String(instanceType),\n\t\t}\n\t\toverrides = append(overrides, &override)\n\t}\n\n\t// Create the fleet\n\tcreateFleetInput := &ec2.CreateFleetInput{\n\t\tLaunchTemplateConfigs: []*ec2.FleetLaunchTemplateConfigRequest{\n\t\t\t{\n\t\t\t\tLaunchTemplateSpecification: &ec2.FleetLaunchTemplateSpecificationRequest{\n\t\t\t\t\tLaunchTemplateName: instance.LaunchTemplateName,\n\t\t\t\t\tVersion: aws.String(\"1\"),\n\t\t\t\t},\n\t\t\t\tOverrides: overrides,\n\t\t\t},\n\t\t},\n\t\tReplaceUnhealthyInstances: aws.Bool(false),\n\t\tTargetCapacitySpecification: &ec2.TargetCapacitySpecificationRequest{\n\t\t\tTotalTargetCapacity: aws.Int64(1),\n\t\t\tDefaultTargetCapacityType: aws.String(\"spot\"),\n\t\t},\n\t\tType: aws.String(\"instant\"),\n\t}\n\n\t// Tell EC2 to create the template\n\t_, err = ec2Svc.CreateLaunchTemplate(launchTemplate)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating launch template for instance: %s\", err)\n\t}\n\n\t// Create the request for the instance.\n\treq, createOutput := ec2Svc.CreateFleetRequest(createFleetInput)\n\n\t// Send the fleet creation request with backoff\n\tvar retryCount int\n\tbackoffWithRetries := backoff.WithMaxRetries(backoff.NewExponentialBackOff(), uint64(*instance.CreateFleetRetries))\n\n\t// var createOutput *ec2.CreateFleetOutput\n\toperation := func() error {\n\t\terr := req.Send()\n\t\tif err == nil && createOutput.Instances != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err == nil && len(createOutput.Errors) > 0 {\n\t\t\terr = fmt.Errorf(\"%s\", *createOutput.Errors[0].ErrorMessage)\n\t\t}\n\t\t// schedule next retry if errornous\n\t\tif err != nil {\n\t\t\tt := time.Now()\n\t\t\tt = t.Add(backoffWithRetries.NextBackOff())\n\t\t\tfmt.Printf(\"error creating fleet (attempt %d of %d). Will retry %s: %s\\n\", retryCount, *instance.CreateFleetRetries, humanize.Time(t), err)\n\t\t}\n\n\t\t// TODO: Alter launch request to create a OnDemand Instance instead.\n\t\t// TODO: Launch on-demand if unable to fill capacity requirements - The below logic doesn't work because at least 1 spot instance request is required in a spot fleet request\n\t\t// // switch to on-demand after n attempts\n\t\t// if retryCount > *instance.MaxSpotRetries {\n\t\t// \tfmt.Println(\"switching to on-demand\")\n\t\t// \tcreateFleetInput.TargetCapacitySpecification.DefaultTargetCapacityType = aws.String(\"on-demand\")\n\t\t// }\n\n\t\tretryCount++\n\t\treturn err\n\t}\n\n\tbackoffErr := backoff.Retry(operation, backoffWithRetries)\n\tif backoffErr != nil {\n\t\tif createOutput.FleetId != nil {\n\t\t\treturn fmt.Errorf(\"Error waiting for fleet request (%s): %s\", *createOutput.FleetId, backoffErr)\n\t\t}\n\t\treturn fmt.Errorf(\"Error waiting for fleet request: %s\", backoffErr)\n\t}\n\tinstanceInput := ec2.DescribeInstancesInput{\n\t\tInstanceIds: createOutput.Instances[0].InstanceIds,\n\t}\n\tfmt.Printf(\"Launching %s %s instance: %s\\n\", *createOutput.Instances[0].InstanceType, *createOutput.Instances[0].Lifecycle, *createOutput.Instances[0].InstanceIds[0])\n\terr = ec2Svc.WaitUntilInstanceRunning(&instanceInput)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error waiting for instance to start running\")\n\t}\n\n\tdescribeInstancesOutput, err := ec2Svc.DescribeInstances(&instanceInput)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error describing instance\")\n\t}\n\n\tinstance.Reservation = describeInstancesOutput.Reservations[0]\n\n\tfor _, ri := range instance.Reservation.Instances {\n\t\tinstance.PrivateIPAddress = ri.PrivateIpAddress\n\t\tinstance.InstanceID = ri.InstanceId\n\t\tinstance.SelectedInstanceType = ri.InstanceType\n\t}\n\n\tif instance.PrivateIPAddress == nil {\n\t\treturn errors.New(\"looks like it didn't get created\")\n\t}\n\n\tdescSpot, err := ec2Svc.DescribeSpotInstanceRequests(&ec2.DescribeSpotInstanceRequestsInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t&ec2.Filter{\n\t\t\t\tName: aws.String(\"instance-id\"),\n\t\t\t\tValues: []*string{instance.InstanceID},\n\t\t\t}},\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to describe spot instance request: %s\", err)\n\t}\n\n\tfor _, sp := range descSpot.SpotInstanceRequests {\n\t\tif sp.ActualBlockHourlyPrice != nil {\n\t\t\tinstance.SpotPrice = sp.ActualBlockHourlyPrice\n\t\t} else {\n\t\t\tinstance.SpotPrice = sp.SpotPrice\n\t\t}\n\t}\n\n\treturn nil\n}",
"func EC2InstancesStart(event EC2InstancesStartEvent) (*ec2.StartInstancesOutput, error) {\n\n\t// log the received event, this will write the raw event to the\n\t// CloudWatch log stream\n\tlog.Println(\"loading function...\")\n\tlog.Println(\"received event:\", event.Instances)\n\n\t// if no EC2 instance names were provided by the event, return an error.\n\tif event.Instances == nil {\n\t\treturn nil, fmt.Errorf(\"no instance names were specified in triggering event %v\", event)\n\t}\n\n\t// using the IAM credentials asigned to the Lambda function, establish\n\t// a session in the 'us-west-2' AWS Region. If a session cannot be\n\t// established, return an empty string and the error returned by the\n\t// AWS SDK NewSession(...) method.\n\tsess, err := session.NewSession(&aws.Config{Region: aws.String(\"us-west-2\")})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create a new instance of the EC2 client using the 'us-west-2' session\n\tsvc := ec2.New(sess)\n\tif svc == nil {\n\t\treturn nil, fmt.Errorf(\"failed to create EC2 client for us-west-2 session. session.Config follows: %v\", sess.Config)\n\t}\n\n\t// declare a variable to hold the result of the AWS SDK call to\n\t// ec2.StartInstances(...)\n\tvar result *ec2.StartInstancesOutput\n\n\t// Iterate through the slice of EC2 instances provided by the incoming\n\t// event and build a slice of string pointers as required be the AWS\n\t// SDK ec2.StartInstancesInput struct.\n\t// Next, call the ec2.StartInstances method with the input structure.\n\t// Errors / new system statuses will be returned to the caller.\n\tvar instIds []*string\n\tfor _, inst := range event.Instances {\n\t\tinstIds = append(instIds, aws.String(inst))\n\t}\n\n\tinput := &ec2.StartInstancesInput{\n\t\tAdditionalInfo: nil,\n\t\tInstanceIds: instIds,\n\t\tDryRun: aws.Bool(false), // convert to *\n\t}\n\n\tresult, err = svc.StartInstances(input)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s\", err)\n\t}\n\n\t// no error, also no result(possible?)\n\tif result == nil || result.StartingInstances == nil {\n\t\treturn nil, fmt.Errorf(\"instance start for instances %v returned no information - status unknown\", instIds)\n\t}\n\n\treturn result, nil\n}",
"func StartAWSSessions(secretsCache wranglerv1.SecretCache, spec eksv1.EKSClusterConfigSpec) (*session.Session, *eks.EKS, error) {\n\tsess, err := newAWSSession(secretsCache, spec)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"error getting new aws session: %v\", err)\n\t}\n\treturn sess, eks.New(sess), nil\n}",
"func (c *Client) StartInstance(serviceName, instanceName string, dynamicConfig map[string]string) error {\n\tdata, err := json.Marshal(dynamicConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = c.cli.Put(data, c.ver+\"/services/%s/instances/%s/start\", serviceName, instanceName)\n\treturn err\n}",
"func (d *Driver) Start() error {\n\tcs := d.client()\n\t_, err := cs.RequestWithContext(context.TODO(), &egoscale.StartVirtualMachine{\n\t\tID: d.ID,\n\t})\n\n\treturn err\n}",
"func (c *Client) StartInstances(request *StartInstancesRequest) (response *StartInstancesResponse, err error) {\n if request == nil {\n request = NewStartInstancesRequest()\n }\n response = NewStartInstancesResponse()\n err = c.Send(request, response)\n return\n}",
"func init() {\n\tregion := flag.StringP(\"region\", \"r\", \"us-east-1\", \"Use given AWS region. Default: us-east-1\")\n\tprofile := flag.StringP(\"profile\", \"p\", \"default\", \"Use given AWS profile. Default: default\")\n\tverbose = flag.BoolP(\"verbose\", \"v\", false, \"Be verbose. Default: false\")\n\tflag.Parse()\n\n\tsession, err := session.NewSession(&aws.Config{\n\t\tRegion: aws.String(*region),\n\t\tCredentials: credentials.NewSharedCredentials(\"\", *profile),\n\t})\n\tif err != nil {\n\t\tfmt.Println(\"Error\", err)\n\t\tos.Exit(1)\n\t}\n\n\tec2Svc = ec2.New(session)\n}",
"func (m *Machine) Start() error {\n\tswitch m.State {\n\tcase driver.Running:\n\t\tmsg := fmt.Sprintf(\"VM %s has already been started\", m.Name)\n\t\tfmt.Println(msg)\n\t\treturn nil\n\tcase driver.Poweroff:\n\t\t// TODO add transactional or error handling in the following steps\n\t\tvcConn := NewVcConn(&cfg)\n\t\terr := vcConn.VmPowerOn(m.Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// this step waits for the vm to start and fetch its ip address;\n\t\t// this guarantees that the opem-vmtools has started working...\n\t\t_, err = vcConn.VmFetchIp(m.Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Fprintf(os.Stdout, \"Configuring virtual machine %s... \", m.Name)\n\t\terr = vcConn.GuestMkdir(\"docker\", \"tcuser\", m.Name, \"/home/docker/.ssh\")\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stdout, \"failed!\\n\")\n\t\t\treturn err\n\t\t}\n\t\terr = vcConn.GuestUpload(\"docker\", \"tcuser\", m.Name, m.SshPubKey,\n\t\t\t\"/home/docker/.ssh/authorized_keys\")\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stdout, \"failed!\\n\")\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintf(os.Stdout, \"ok!\\n\")\n\t}\n\treturn nil\n}",
"func AWSSession(loadConfig, profile, region string) (sess *session.Session, err error) {\n\n\tif os.Getenv(loadConfig) == \"true\" {\n\t\tsess, err = session.NewSession(&aws.Config{\n\t\t\tCredentials: credentials.NewSharedCredentials(\"\", profile),\n\t\t})\n\t} else {\n\t\tsess, err = session.NewSession(&aws.Config{\n\t\t\tRegion: aws.String(region),\n\t\t\tCredentials: credentials.NewSharedCredentials(\"\", profile),\n\t\t})\n\t}\n\n\treturn\n}",
"func StartSession(containerInstanceArn string, credentialProvider credentials.AWSCredentialProvider, cfg *config.Config, taskEngine engine.TaskEngine, ecsclient api.ECSClient, stateManager statemanager.StateManager, acceptInvalidCert bool) error {\n\tbackoff := utils.NewSimpleBackoff(time.Second, 1*time.Minute, 0.2, 2)\n\treturn utils.RetryWithBackoff(backoff, func() error {\n\t\tacsEndpoint, err := ecsclient.DiscoverPollEndpoint(containerInstanceArn)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Unable to discover poll endpoint\", \"err\", err)\n\t\t\treturn err\n\t\t}\n\n\t\turl := AcsWsUrl(acsEndpoint, cfg.Cluster, containerInstanceArn, taskEngine)\n\n\t\tclient := acsclient.New(url, cfg.AWSRegion, credentialProvider, acceptInvalidCert)\n\n\t\tclient.AddRequestHandler(payloadMessageHandler(client, cfg.Cluster, containerInstanceArn, taskEngine, ecsclient, stateManager))\n\t\tclient.AddRequestHandler(heartbeatHandler(client))\n\n\t\tupdater.AddAgentUpdateHandlers(client, cfg, stateManager, taskEngine)\n\n\t\terr = client.Connect()\n\t\tif err != nil {\n\t\t\tlog.Error(\"Error connecting to ACS: \" + err.Error())\n\t\t\treturn err\n\t\t}\n\t\treturn client.Serve()\n\t})\n}",
"func (v *Vsphere) StartInstance(ctx *Context, instancename string) error {\n\tf := find.NewFinder(v.client, true)\n\n\tdc, err := f.DatacenterOrDefault(context.TODO(), v.datacenter)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn err\n\t}\n\n\tf.SetDatacenter(dc)\n\n\tvms, err := f.VirtualMachineList(context.TODO(), instancename)\n\tif err != nil {\n\t\tif _, ok := err.(*find.NotFoundError); ok {\n\t\t\tfmt.Println(\"can't find vm \" + instancename)\n\t\t}\n\t\tfmt.Println(err)\n\t}\n\n\ttask, err := vms[0].PowerOn(context.TODO())\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\t_, err = task.WaitForResult(context.TODO(), nil)\n\treturn err\n}",
"func (i *Instance) Start() error {\n\tcmd := initCommand(\"instance\", \"start\")\n\n\tcmd = append(cmd, i.ImageURI, i.Name)\n\n\tif i.Cleanenv {\n\t\tcmd = append(cmd, \"--cleanenv\")\n\t}\n\n\tvar err error\n\tvar status = 1\n\tif !i.running {\n\t\t_, _, status, err = runCommand(cmd, &instanceOpts)\n\t\tif status == 0 {\n\t\t\ti.running = true\n\t\t}\n\t}\n\treturn err\n}",
"func (m *sessionManager) Start(w http.ResponseWriter, r *http.Request) (session *Session, err error) {\n\tsession = NewSession()\n\n\tvar raw string\n\tvar phpSession phpencode.PhpSession\n\n\tsessionID := m.getFromCookies(r.Cookies())\n\n\tif sessionID == \"\" {\n\t\tsessionID = m.sidCreator.CreateSID()\n\t\tsession.SessionID = sessionID\n\t\t// http.SetCookie(w, &http.Cookie{\n\t\t// \tName: m.sessionName,\n\t\t// \tValue: sessionID,\n\t\t// \tHttpOnly: m.config.CookieHttpOnly,\n\t\t// \tPath: m.config.CookiePath,\n\t\t// \tDomain: m.config.CookieDomain,\n\t\t// })\n\n\t\tw.Header().Add(\"Set-Cookie\", m.SetCookieString(sessionID))\n\t\treturn\n\t}\n\n\tsession.SessionID = sessionID\n\traw, err = Read(sessionID)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tphpSession, err = m.encoder.Decode(raw)\n\tif err != nil {\n\t\treturn\n\t}\n\tsession.Value = phpSession\n\n\treturn\n}",
"func (s *session) launch() {\n\t// Mark the session as started here, as we want to avoid double initialization.\n\tif s.started.Swap(true) {\n\t\ts.log.Debugf(\"Session has already started\")\n\t\treturn\n\t}\n\n\ts.log.Debug(\"Launching session\")\n\ts.BroadcastMessage(\"Connecting to %v over SSH\", s.serverMeta.ServerHostname)\n\n\ts.io.On()\n\n\tif err := s.tracker.UpdateState(s.serverCtx, types.SessionState_SessionStateRunning); err != nil {\n\t\ts.log.Warnf(\"Failed to set tracker state to %v\", types.SessionState_SessionStateRunning)\n\t}\n\n\t// If the identity is verified with an MFA device, we enabled MFA-based presence for the session.\n\tif s.presenceEnabled {\n\t\tgo func() {\n\t\t\tticker := s.registry.clock.NewTicker(PresenceVerifyInterval)\n\t\t\tdefer ticker.Stop()\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-ticker.Chan():\n\t\t\t\t\terr := s.checkPresence(s.serverCtx)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ts.log.WithError(err).Error(\"Failed to check presence, terminating session as a security measure\")\n\t\t\t\t\t\ts.Stop()\n\t\t\t\t\t}\n\t\t\t\tcase <-s.stopC:\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\t// copy everything from the pty to the writer. this lets us capture all input\n\t// and output of the session (because input is echoed to stdout in the pty).\n\t// the writer contains multiple writers: the session logger and a direct\n\t// connection to members of the \"party\" (other people in the session).\n\ts.term.AddParty(1)\n\tgo func() {\n\t\tdefer s.term.AddParty(-1)\n\n\t\t// once everything has been copied, notify the goroutine below. if this code\n\t\t// is running in a teleport node, when the exec.Cmd is done it will close\n\t\t// the PTY, allowing io.Copy to return. if this is a teleport forwarding\n\t\t// node, when the remote side closes the channel (which is what s.term.PTY()\n\t\t// returns) io.Copy will return.\n\t\tdefer close(s.doneCh)\n\n\t\t_, err := io.Copy(s.io, s.term.PTY())\n\t\ts.log.Debugf(\"Copying from PTY to writer completed with error %v.\", err)\n\t}()\n\n\ts.term.AddParty(1)\n\tgo func() {\n\t\tdefer s.term.AddParty(-1)\n\n\t\t_, err := io.Copy(s.term.PTY(), s.io)\n\t\ts.log.Debugf(\"Copying from reader to PTY completed with error %v.\", err)\n\t}()\n}",
"func StartSpecific(portnumber int) *mgo.Session {\n\tsession, err := mgo.Dial(\"localhost:\" + strconv.Itoa(portnumber))\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n\treturn session\n}",
"func Start(c cookoo.Context, p *cookoo.Params) (interface{}, cookoo.Interrupt) {\n\tdargs := []string{\"-e\", \"-D\"}\n\n\tsshd := exec.Command(\"/usr/sbin/sshd\", dargs...)\n\tsshd.Stdout = os.Stdout\n\tsshd.Stderr = os.Stderr\n\n\tif err := sshd.Start(); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn sshd.Process.Pid, nil\n}",
"func (c *ActClient) startProcessInstance(s ActStartProcessInstance) (*ActProcessInstance, error) {\n\tpi := &ActProcessInstance{}\n\n\treq, err := c.NewRequest(\"POST\", fmt.Sprintf(\"%s%s\", c.BaseURL, \"/runtime/process-instances\"), s)\n\tif err != nil {\n\t\treturn pi, err\n\t}\n\n\tif err = c.SendWithBasicAuth(req, pi); err != nil {\n\t\treturn pi, err\n\t}\n\n\treturn pi, nil\n}",
"func (s *session) startSession(cgroupID uint64) error {\n\tcgroupMap, err := s.module.GetMap(monitoredCGroups)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\n\tdummyVal := 0\n\terr = cgroupMap.Update(unsafe.Pointer(&cgroupID), unsafe.Pointer(&dummyVal))\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Start pulls data from nsq
|
func (n *NSQConsumer) Start(acc telegraf.Accumulator) error {
n.acc = acc
n.connect()
n.consumer.AddConcurrentHandlers(nsq.HandlerFunc(func(message *nsq.Message) error {
metrics, err := n.parser.Parse(message.Body)
if err != nil {
acc.AddError(fmt.Errorf("E! NSQConsumer Parse Error\nmessage:%s\nerror:%s", string(message.Body), err.Error()))
return nil
}
for _, metric := range metrics {
n.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time())
}
message.Finish()
return nil
}), n.MaxInFlight)
if len(n.Nsqlookupd) > 0 {
n.consumer.ConnectToNSQLookupds(n.Nsqlookupd)
}
n.consumer.ConnectToNSQDs(append(n.Nsqd, n.Server))
return nil
}
|
[
"func (item *Task) NSQStart() {\n\titem.NSQMessage.DisableAutoResponse()\n\tinterval := time.Duration(2) * time.Minute\n\tticker := time.NewTicker(interval)\n\tstopChannel := make(chan bool)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\titem.NSQMessage.Touch()\n\t\t\tcase <-stopChannel:\n\t\t\t\tticker.Stop()\n\t\t\t\titem.tickerStopped = true\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\titem.nsqStartCalled = true\n\titem.nsqStopChannel = stopChannel\n}",
"func (r *Puber) queryLookupd() {\r\n\tretries := 0\r\n\r\nretry:\r\n\tendpoint := r.nextLookupdEndpoint()\r\n\r\n\tr.log(LogLevelInfo, \"querying nsqlookupd %s\", endpoint)\r\n\r\n\tvar data nodesResp\r\n\terr := apiRequestNegotiateV1(\"GET\", endpoint, nil, &data)\r\n\tif err != nil {\r\n\t\tr.log(LogLevelError, \"error querying nsqlookupd (%s) - %s\", endpoint, err)\r\n\t\tretries++\r\n\t\tif retries < 3 {\r\n\t\t\tr.log(LogLevelInfo, \"retrying with next nsqlookupd\")\r\n\t\t\tgoto retry\r\n\t\t}\r\n\t\treturn\r\n\t}\r\n\r\n\tvar nsqdAddrs []string\r\n\tfor _, producer := range data.Producers {\r\n\t\tbroadcastAddress := producer.BroadcastAddress\r\n\t\tport := producer.TCPPort\r\n\t\tjoined := net.JoinHostPort(broadcastAddress, strconv.Itoa(port))\r\n\t\tnsqdAddrs = append(nsqdAddrs, joined)\r\n\t}\r\n\t// apply filter\r\n\tif discoveryFilter, ok := r.behaviorDelegate.(DiscoveryFilter); ok {\r\n\t\tnsqdAddrs = discoveryFilter.Filter(nsqdAddrs)\r\n\t}\r\n\tfor _, addr := range nsqdAddrs {\r\n\t\terr = r.connectToNSQD(addr)\r\n\t\tif err != nil && err != ErrAlreadyConnected {\r\n\t\t\tr.log(LogLevelError, \"(%s) error connecting to nsqd - %s\", addr, err)\r\n\t\t\tcontinue\r\n\t\t}\r\n\t}\r\n}",
"func (p *Pager) Start() {\n\tfor {\n\t\tif len(p.queue) > 0 {\n\t\t\tc := p.queue[0]\n\t\t\tcopy(p.queue[0:], p.queue[1:])\n\t\t\tp.queue[len(p.queue)-1] = Call{}\n\t\t\tp.queue = p.queue[:len(p.queue)-1]\n\t\t\tb := new(bytes.Buffer)\n\t\t\tencoder := json.NewEncoder(b)\n\t\t\tencoder.Encode(c)\n\t\t\treq, err := http.NewRequest(\"POST\", dapnetURL+\"calls\", b)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Error creating request:\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treq.Header.Set(\"Content-Type\", \"application/json\")\n\t\t\treq.SetBasicAuth(p.username, p.password)\n\t\t\tresp, err := http.DefaultClient.Do(req)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Error executing request: \" + err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif resp.StatusCode > 299 {\n\t\t\t\tdata, _ := ioutil.ReadAll(resp.Body)\n\t\t\t\tlog.Println(\"Status code greater 299:\", resp.StatusCode, \"\", string(data))\n\t\t\t}\n\t\t}\n\t}\n}",
"func StartMasterRetrieval(stop chan bool, filter filters.Filter,\n\tinitialDelay int, timeBetweenQueries int) {\n\tretrticker := time.NewTicker(time.Duration(timeBetweenQueries) * time.Second)\n\n\tlogger.WriteDebug(\n\t\t\"Waiting %d seconds before grabbing %s servers. Will retrieve servers every %d secs afterwards.\", initialDelay, filter.Game.Name, timeBetweenQueries)\n\n\tlogger.LogAppInfo(\n\t\t\"Waiting %d seconds before grabbing %s servers from master. Will retrieve every %d secs afterwards.\", initialDelay, filter.Game.Name, timeBetweenQueries)\n\n\tfirstretrieval := time.NewTimer(time.Duration(initialDelay) * time.Second)\n\t<-firstretrieval.C\n\tlogger.WriteDebug(\"Starting first retrieval of %s servers from master.\",\n\t\tfilter.Game.Name)\n\tsl, err := retrieve(filter)\n\tif err != nil {\n\t\tlogger.LogAppErrorf(\"Error when performing timed master retrieval: %s\", err)\n\t}\n\tmodels.MasterList = sl\n\n\tfor {\n\t\tselect {\n\t\tcase <-retrticker.C:\n\t\t\tgo func(filters.Filter) {\n\t\t\t\tlogger.WriteDebug(\"%s: Starting %s master server query\", time.Now().Format(\n\t\t\t\t\t\"Mon Jan 2 15:04:05 2006 EST\"), filter.Game.Name)\n\t\t\t\tlogger.LogAppInfo(\"%s: Starting %s master server query\", time.Now().Format(\n\t\t\t\t\t\"Mon Jan 2 15:04:05 2006 EST\"), filter.Game.Name)\n\t\t\t\tsl, err := retrieve(filter)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.LogAppErrorf(\"Error when performing timed master retrieval: %s\",\n\t\t\t\t\t\terr)\n\t\t\t\t}\n\t\t\t\tmodels.MasterList = sl\n\t\t\t}(filter)\n\t\tcase <-stop:\n\t\t\tretrticker.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (q *queue) startThread() {\n\t// I'm still learning, no bully\n\tq.threadCount++\n\tfor {\n\t\tselect {\n\t\tcase <-q.stop:\n\t\t\tfmt.Println(\"Stopping the queue\")\n\t\t\treturn\n\t\tcase s := <-q.next:\n\t\t\tq.inProgress++\n\t\t\tfunc() {\n\t\t\t\tfmt.Print(\"Downloading: \", s.ID())\n\t\t\t\tdbkey := s.SiteName() + s.ID()\n\t\t\t\tstr := db.Get(dbkey)\n\t\t\t\tif str != \"\" {\n\t\t\t\t\tfmt.Printf(\"\\nFound %s in database\\n\", dbkey)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\ttime.Sleep(time.Second * 2)\n\n\t\t\t\textra, err := s.GetDetails()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tfmt.Printf(\" by %s\\n\", s.User().Name())\n\n\t\t\t\tfor _, esub := range extra {\n\t\t\t\t\tfmt.Printf(\"Downloading extra submission %s\\n\", esub.ID())\n\t\t\t\t\tif db.Get(esub.SiteName()+esub.ID()) != \"\" {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\terr = q.download(esub)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tdb.Store(esub.SiteName()+esub.ID(), esub.FileURL())\n\t\t\t\t\ttime.Sleep(time.Second * 2)\n\t\t\t\t}\n\n\t\t\t\terr = q.download(s)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tdb.Store(dbkey, s.FileURL())\n\t\t\t}()\n\t\t\tq.inProgress--\n\t\t}\n\t}\n}",
"func (s *Server) startQuery(qr types.QueryResult, pipe chan types.BreweryResult) {\n\n\trequest := qr.Query\n\tpage := 0\n\tworkingPage := request\n\n\t// if qr.Filter.Page == 0 loop for all\n\t// else break after first\n\tfor {\n\n\t\tif qr.Filter.Page == 0 {\n\t\t\tpage += 1\n\t\t\tworkingPage = fmt.Sprintf(\"%s&page=%d\", request, page)\n\t\t}\n\n\t\tresp, err := s.queryImp(workingPage)\n\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"working=%s\\n\", workingPage)\n\t\t\tfmt.Println(err)\n\t\t\tclose(pipe)\n\t\t\treturn\n\t\t}\n\n\t\tdefer resp.Body.Close()\n\n\t\tvar b []types.BreweryResult\n\n\t\tif err = json.NewDecoder(resp.Body).Decode(&b); err != nil {\n\t\t\tlog.Printf(\"oops\")\n\t\t}\n\n\t\tif resp.StatusCode > 299 {\n\t\t\tlog.Printf(\"oops\")\n\t\t}\n\n\t\tfmt.Printf(\"count=%v\\n\", len(b))\n\n\t\tif len(b) < 1 {\n\t\t\tfmt.Printf(\"storing request=%s\\n\", request)\n\t\t\t// cache\n\t\t\t// queryResults[qr.Filter] = qr\n\t\t\tif pipe != nil {\n\t\t\t\tclose(pipe) // end here\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\t// add a page and empty Breweries slice\n\t\t// cache\n\t\t// qr.Pages = append(qr.Pages, types.Page{ID: page, Breweries: make([]types.BreweryResult, 0)})\n\n\t\tfor _, brewery := range b {\n\t\t\t//write to channel\n\t\t\tif pipe != nil {\n\t\t\t\tfmt.Println(brewery.Name)\n\t\t\t\tpipe <- brewery\n\t\t\t}\n\t\t\t// cache\n\t\t\t// qr.Pages[page-1].Breweries = append(qr.Pages[page-1].Breweries, brewery)\n\t\t}\n\n\t\tif qr.Filter.Page != 0 {\n\t\t\tif pipe != nil {\n\t\t\t\tclose(pipe) // end here\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\t//\t\tif page == 1 && pipe != nil {\n\t\t//\t\t\tclose(pipe)\n\t\t//\t\t}\n\t}\n\n}",
"func (a *Archiver) GetData(streamids []string, start, end uint64, query_uot UnitOfTime) ([]SmapResponse, error) {\n\treturn a.tsdb.GetData(streamids, start, end, query_uot)\n}",
"func Startup() {\n\tMapData = make(map[string]int64)\n\tMapServer = make(map[string]TCPHandler)\n\tdb := ardb.DbConn()\n\tcol := ardb.CreateCollection(db)\n\n\tif col == nil {\n\t\tlog.Println(\"collection not created\")\n\t}\n\n\tquery := \"FOR d IN tcpproxy RETURN d\"\n\tcursor, err := db.Query(nil, query, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer cursor.Close()\n\n\tts := ardb.TotalServer{}\n\n\tfor {\n\t\tmeta, err := cursor.ReadDocument(nil, &ts.Ps)\n\t\tif driver.IsNoMoreDocuments(err) {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tts.ID = meta.Key\n\n\t\tif ts.Ps.Active == true {\n\t\t\tserver := StartServer(ts)\n\t\t\tMapServer[ts.ID] = server\n\t\t\t//server.Tid <- ts.ID\n\t\t}\n\t}\n\n\tlog.Println(\"startup completed\")\n}",
"func (n *NSQ) connect() (err error) {\n\tcfg := nsq.NewConfig()\n\tcfg.UserAgent = n.conf.NSQ.UserAgent\n\tcfg.MaxInFlight = n.conf.NSQ.MaxInFlight\n\n\tif n.consumer, err = nsq.NewConsumer(n.conf.NSQ.Topic, n.conf.NSQ.Channel, cfg); err != nil {\n\t\treturn\n\t}\n\n\tn.consumer.SetLogger(n.log, nsq.LogLevelWarning)\n\tn.consumer.AddHandler(n)\n\n\tif err = n.consumer.ConnectToNSQDs(n.conf.NSQ.Addresses); err != nil {\n\t\treturn\n\t}\n\tif err = n.consumer.ConnectToNSQLookupds(n.conf.NSQ.LookupAddresses); err != nil {\n\t\treturn\n\t}\n\treturn\n}",
"func (s *Serial) startReadTagsLoop(srv *server) {\n\t// inventory check, TODO: if resp status = 0x94 More data available - read more with mode = 0x01\n\tt := time.Tick(500 * time.Millisecond)\n\tfor _ = range t {\n\t\ts.ReadTagsInRange(srv)\n\t}\n}",
"func (c *Client) Start() {\n\tgo func() {\n\t\tfor {\n\t\t\tmsq, err := c.stream.Recv()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Connection closed %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tc.updateChan <- *msq\n\t\t\tlog.Infof(\"Received response from sds server %v\", msq)\n\t\t\tif err := ValidateResponse(msq); err != nil {\n\t\t\t\tlog.Errorf(\"Failed to validate sds response %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}",
"func (f *QueueFetcher) StartFetching(requestBus chan server.Request) {\n\tfor {\n\t\tplainRequest, _ := f.queue.Pop()\n\t\tvar request server.Request\n\t\tjson.Unmarshal([]byte(plainRequest), &request)\n\t\trequestBus <- request\n\t}\n}",
"func (f *fetcher) start() {\n\tlog4go.Debug(\"Starting new fetcher\")\n\tfor f.crawlNewHost() {\n\t\t// Crawl until told to stop...\n\t}\n\tlog4go.Debug(\"Stopping fetcher\")\n\tclose(f.done)\n}",
"func (cl *ConsulLookup) start() {\n\tvar closed = false\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tfor range time.NewTicker(cl.pollIntervalSec * time.Second).C {\n\t\t\tendpoints, err := cl.lookup()\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error discovering service %s - %s\", cl.serviceName, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlog.Printf(\"Discovered services %s\", endpoints)\n\n\t\t\tif !closed {\n\t\t\t\tclose(done)\n\t\t\t\tclosed = true\n\t\t\t}\n\n\t\t\tcl.endpointsMu.Lock()\n\t\t\tcl.endpoints = endpoints\n\t\t\tcl.endpointsMu.Unlock()\n\t\t}\n\t}()\n\n\t<-done\n}",
"func (p *coreV1) GetNSSByRange(start, end time.Time) (resp *NSSResponse, err error) {\n var req *http.Request\n req, err = http.NewRequest(http.MethodGet, insightsEndpoint(p.client.Config(), \"nss\"), nil)\n if !start.IsZero() && !end.IsZero() {\n if end.Before(start) {\n return nil, ErrorInvalidDateRange\n }\n query := req.URL.Query()\n query.Set(\"start\", strconv.FormatInt(start.UnixNano(), 10))\n query.Set(\"end\", strconv.FormatInt(end.UnixNano(), 10))\n req.URL.RawQuery = query.Encode()\n } else if start.IsZero() != end.IsZero() {\n return nil, stackAfter(ErrorInvalidDateRange, \"start and end time both must be given\")\n }\n var httpResp *http.Response\n if httpResp, err = p.client.Do(req); err == nil {\n if httpResp.ContentLength > 0 {\n resp = &NSSResponse{}\n err = json.NewDecoder(httpResp.Body).Decode(resp)\n } else {\n resp = &NSSResponse{Status: &Status{Code: 0, Message: \"OK\"}}\n }\n }\n return\n}",
"func getData() {\n\tvar resp Response\n\tvar dataItem DataItem\n\t// new ticker that ticks every UPDATE_INTERVAL milliseconds\n\tticker := time.NewTicker(time.Millisecond * UPDATE_INTERVAL)\n\tdefer ticker.Stop()\n\tfor range ticker.C {\n\t\tresp.DataItems = nil\n\t\t// Read 10 values from address 0\n\t\tresults, err := client.ReadHoldingRegisters(address, 1)\n\t\tif err != nil {\n\t\t\t//some error happened\n\t\t\tstatus.SetCurrentValue(\"Connection error\")\n\t\t\tData <- nil\n\t\t\tcontinue\n\t\t}\n\t\tdataItem.data = results\n\t\tresp.DataItems = append(resp.DataItems, dataItem)\n\t\tData <- &resp\n\t\t// stop Goroutine if stop was pressed\n\t\tselect {\n\t\tcase <-Stop_getData:\n\t\t\tstatus.SetCurrentValue(\"It's true!\")\n\t\t\tfmt.Println(\"Goroutine Stops.\")\n\t\t\treturn\n\t\tdefault:\n\t\t\tstatus.SetCurrentValue(\"Keep running\")\n\t\t}\n\t}\n}",
"func newNsqReader(r *Room, channelName string) error {\n\n\tcfg := nsq.NewConfig()\n\tcfg.Set(\"LookupdPollInterval\", config.LookupdPollInterval*time.Second)\n\tcfg.Set(\"MaxInFlight\", config.MaxInFlight)\n\tcfg.UserAgent = fmt.Sprintf(\"Chat client go-nsq/%s\", nsq.VERSION)\n\n\tnsqConsumer, err := nsq.NewConsumer(config.TopicName, channelName, cfg)\n\n\tif err != nil {\n\t\tlog.Println(\"Create newNsqReader error: \", err)\n\t\treturn err\n\t}\n\n\tnsqReader := &NsqReader{\n\t\tchannelName: channelName,\n\t\trooms: map[*Room]bool{r: true},\n\t}\n\tr.nsqReaders[channelName] = nsqReader\n\n\tnsqConsumer.AddHandler(nsqReader)\n\n\tnsqErr := nsqConsumer.ConnectToNSQLookupd(config.AddrNsqlookupd)\n\tif nsqErr != nil {\n\t\tlog.Println(\"NSQ connection error: \", nsqErr)\n\t\treturn err\n\t}\n\tnsqReader.consumer = nsqConsumer\n\tlog.Printf(\"Subscribe to NSQ success to channel %s\", channelName)\n\n\treturn nil\n}",
"func (y *yahooSearch) startSearch(term string){\r\n\tfmt.Printf(\"Browser: %v; Search Engine: Yahoo; Search term: %v\\n\", y.browser, term)\r\n}",
"func main() {\n\n\tlogger.Println(\"Application is Starting\")\n\n\tlogSettings()\n\tmqsamputils.EnvSettings.LogSettings()\n\n\tnumConnections := mqsamputils.EnvSettings.GetConnectionCount()\n\tlogger.Printf(\"There are %d connections\", numConnections)\n\n\tfor i := 0; i < numConnections; i++ {\n\t\tqMgr, err := mqsamputils.CreateConnection(i)\n\t\tdefer qMgr.Disc()\n\t\tif err != nil {\n\t\t\tmqret := err.(*ibmmq.MQReturn)\n\t\t\tif mqret.MQRC == ibmmq.MQRC_Q_MGR_NOT_AVAILABLE {\n\t\t\t\tlogger.Println(\"Queue Manager not available, skipping this endpoint\")\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tlogger.Fatalln(\"Unable to Establish Connection to server\")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\n\t\tqObject, err := mqsamputils.OpenGetQueue(qMgr, mqsamputils.Get, i)\n\t\tif err != nil {\n\t\t\tlogger.Fatalln(\"Unable to Open Queue\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tdefer qObject.Close(0)\n\n\t\tgetMessage(qObject)\n\n\t\t// Need to Close the Queue and Connection so can be reused for next iteration\n\t\tqObject.Close(0)\n\t\tqMgr.Disc()\n\n\t}\n\n\tlogger.Println(\"Application is Ending\")\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
init fct cannot be called. init fct are automatically executed when the program starts pc[i] is the population count of i (in byte). For example if i = 27, then i(binary) = 11011 and pc[i] = 100 (4 in base 10)
|
func init() {
// could have written
// for i, _ := range pc
for i := range pc {
pc[i] = pc[i/2] + byte(i&1)
}
}
|
[
"func UA_ServiceCounterDataType_init(p []UA_ServiceCounterDataType) {\n\tnoarch.Memset(p, byte(0), 8)\n}",
"func UA_ServerState_init(p []int) {\n\tnoarch.Memset(p, byte(0), 4)\n}",
"func UA_MonitoringMode_init(p []int) {\n\tnoarch.Memset(p, byte(0), 4)\n}",
"func (vm *VM) init() {\n\n\t// set program counter to beginning of the memory\n\tvm.pc = 0x200\n\n\t// init audiochan\n\tvm.audioCh = make(chan struct{})\n\n\t// TODO: load fontset\n\t// vm.loadFontset()\n\n\t// TODO: init keypad\n\n}",
"func (c *Channel) init(ctx context.Context, initBals *channel.Allocation, initData channel.Data) error {\n\treturn c.machine.Init(ctx, *initBals, initData)\n}",
"func initCpu() {\n\trand.Seed(time.Now().UTC().UnixNano())\n\n\tfor i := 0; i < len(cpu.code); i++ {\n\t\tcpu.code[i] = 0x00\n\t}\n\tfor i := 0; i < len(cpu.data); i++ {\n\t\tcpu.data[i] = 0x00\n\t}\n\tfor i := 0; i < len(cpu.regs); i++ {\n\t\tcpu.regs[i] = 0x00\n\t}\n\tfor i := 0; i < len(cpu.in); i++ {\n\t\tcpu.in[i] = 0x00\n\t}\n\tfor i := 0; i < len(cpu.out); i++ {\n\t\tcpu.out[i] = 0x00\n\t}\n\tcpu.pc = 0x0000\n\tcpu.pchold = 0x0000\n\tcpu.flagc = false\n\tcpu.flagz = true\n\tcpu.cycles = 0\n\tcpu.running = false\n\n\tholdCpu = cpu\n}",
"func UA_Byte_init(p []UA_Byte) {\n\tnoarch.Memset(p, byte(0), 1)\n}",
"func UA_RegisterServerRequest_init(p []UA_RegisterServerRequest) {\n\tnoarch.Memset(p, byte(0), 880)\n}",
"func (ic *Intcode) PC() (pc int64) {\n\tpc = ic.pc\n\treturn\n}",
"func UA_RedundancySupport_init(p []int) {\n\tnoarch.Memset(p, byte(0), 4)\n}",
"func UA_FilterOperator_init(p []int) {\n\tnoarch.Memset(p, byte(0), 4)\n}",
"func UA_DataChangeTrigger_init(p []int) {\n\tnoarch.Memset(p, byte(0), 4)\n}",
"func UA_ServerOnNetwork_init(p []UA_ServerOnNetwork) {\n\tnoarch.Memset(p, byte(0), 56)\n}",
"func UA_Int32_init(p []UA_Int32) {\n\tnoarch.Memset(p, byte(0), 4)\n}",
"func UA_DiagnosticInfo_init(p []UA_DiagnosticInfo) {\n\tnoarch.Memset(p, byte(0), 72)\n}",
"func UA_RegisteredServer_init(p []UA_RegisteredServer) {\n\tnoarch.Memset(p, byte(0), 104)\n}",
"func UA_ApplicationType_init(p []int) {\n\tnoarch.Memset(p, byte(0), 4)\n}",
"func UA_CallMethodRequest_init(p []UA_CallMethodRequest) {\n\tnoarch.Memset(p, byte(0), 256)\n}",
"func UA_StatusCode_init(p []UA_StatusCode) {\n\tnoarch.Memset(p, byte(0), 4)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
extrapolatedRate is a utility function for rate/increase/delta. It calculates the rate (allowing for counter resets if isCounter is true), extrapolates if the first/last sample is close to the boundary, and returns the result as either persecond (if isRate is true) or overall.
|
func extrapolatedRate(samples []sample, isCounter, isRate bool, stepTime int64, selectRange int64, offset int64) (float64, *histogram.FloatHistogram) {
var (
rangeStart = stepTime - (selectRange + offset)
rangeEnd = stepTime - offset
resultValue float64
resultHistogram *histogram.FloatHistogram
)
if samples[0].H != nil {
resultHistogram = histogramRate(samples, isCounter)
} else {
resultValue = samples[len(samples)-1].F - samples[0].F
if isCounter {
var lastValue float64
for _, sample := range samples {
if sample.F < lastValue {
resultValue += lastValue
}
lastValue = sample.F
}
}
}
// Duration between first/last Samples and boundary of range.
durationToStart := float64(samples[0].T-rangeStart) / 1000
durationToEnd := float64(rangeEnd-samples[len(samples)-1].T) / 1000
sampledInterval := float64(samples[len(samples)-1].T-samples[0].T) / 1000
averageDurationBetweenSamples := sampledInterval / float64(len(samples)-1)
if isCounter && resultValue > 0 && samples[0].F >= 0 {
// Counters cannot be negative. If we have any slope at
// all (i.e. resultValue went up), we can extrapolate
// the zero point of the counter. If the duration to the
// zero point is shorter than the durationToStart, we
// take the zero point as the start of the series,
// thereby avoiding extrapolation to negative counter
// values.
durationToZero := sampledInterval * (samples[0].F / resultValue)
if durationToZero < durationToStart {
durationToStart = durationToZero
}
}
// If the first/last Samples are close to the boundaries of the range,
// extrapolate the result. This is as we expect that another sample
// will exist given the spacing between Samples we've seen thus far,
// with an allowance for noise.
extrapolationThreshold := averageDurationBetweenSamples * 1.1
extrapolateToInterval := sampledInterval
if durationToStart < extrapolationThreshold {
extrapolateToInterval += durationToStart
} else {
extrapolateToInterval += averageDurationBetweenSamples / 2
}
if durationToEnd < extrapolationThreshold {
extrapolateToInterval += durationToEnd
} else {
extrapolateToInterval += averageDurationBetweenSamples / 2
}
factor := extrapolateToInterval / sampledInterval
if isRate {
factor /= float64(selectRange / 1000)
}
if resultHistogram == nil {
resultValue *= factor
} else {
resultHistogram.Mul(factor)
}
return resultValue, resultHistogram
}
|
[
"func extendedRate(samples []sample, isCounter, isRate bool, stepTime int64, selectRange int64, offset int64, metricAppearedTs int64) (float64, *histogram.FloatHistogram) {\n\tvar (\n\t\trangeStart = stepTime - (selectRange + offset)\n\t\trangeEnd = stepTime - offset\n\t\tresultValue float64\n\t\tresultHistogram *histogram.FloatHistogram\n\t)\n\n\tif samples[0].H != nil {\n\t\t// TODO - support extended rate for histograms\n\t\tresultHistogram = histogramRate(samples, isCounter)\n\t\treturn resultValue, resultHistogram\n\t}\n\n\tsameVals := true\n\tfor i := range samples {\n\t\tif i > 0 && samples[i-1].F != samples[i].F {\n\t\t\tsameVals = false\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// This effectively injects a \"zero\" series for xincrease if we only have one sample.\n\t// Only do it for some time when the metric appears the first time.\n\tuntil := selectRange + metricAppearedTs\n\tif isCounter && !isRate && sameVals {\n\t\t// Make sure we are not at the end of the range.\n\t\tif stepTime-offset <= until {\n\t\t\treturn samples[0].F, nil\n\t\t}\n\t}\n\n\tsampledInterval := float64(samples[len(samples)-1].T - samples[0].T)\n\taverageDurationBetweenSamples := sampledInterval / float64(len(samples)-1)\n\n\tfirstPoint := 0\n\t// Only do this for not xincrease\n\tif !(isCounter && !isRate) {\n\t\t// If the point before the range is too far from rangeStart, drop it.\n\t\tif float64(rangeStart-samples[0].T) > averageDurationBetweenSamples {\n\t\t\tif len(samples) < 3 {\n\t\t\t\treturn resultValue, nil\n\t\t\t}\n\t\t\tfirstPoint = 1\n\t\t\tsampledInterval = float64(samples[len(samples)-1].T - samples[1].T)\n\t\t\taverageDurationBetweenSamples = sampledInterval / float64(len(samples)-2)\n\t\t}\n\t}\n\n\tvar (\n\t\tcounterCorrection float64\n\t\tlastValue float64\n\t)\n\tif isCounter {\n\t\tfor i := firstPoint; i < len(samples); i++ {\n\t\t\tsample := samples[i]\n\t\t\tif sample.F < lastValue {\n\t\t\t\tcounterCorrection += lastValue\n\t\t\t}\n\t\t\tlastValue = sample.F\n\t\t}\n\t}\n\tresultValue = samples[len(samples)-1].F - samples[firstPoint].F + counterCorrection\n\n\t// Duration between last sample and boundary of range.\n\tdurationToEnd := float64(rangeEnd - samples[len(samples)-1].T)\n\t// If the points cover the whole range (i.e. they start just before the\n\t// range start and end just before the range end) adjust the value from\n\t// the sampled range to the requested range.\n\t// Only do this for not xincrease.\n\tif !(isCounter && !isRate) {\n\t\tif samples[firstPoint].T <= rangeStart && durationToEnd < averageDurationBetweenSamples {\n\t\t\tadjustToRange := float64(selectRange / 1000)\n\t\t\tresultValue = resultValue * (adjustToRange / (sampledInterval / 1000))\n\t\t}\n\t}\n\n\tif isRate {\n\t\tresultValue = resultValue / float64(selectRange/1000)\n\t}\n\n\treturn resultValue, nil\n}",
"func (r *RateTracer) Rate() float64 {\n\treturn r.RateAt(time.Now())\n}",
"func RATE(nper int, pmt float64, pv float64, fv float64, due float64, guess float64) float64 {\n\trate := guess\n\ti := 0\n\tvar x0, x1, y, f float64\n\n\tx1 = rate\n\n\tlamda := func(rate float64) float64 {\n\t\tif math.Abs(rate) < Accuracy {\n\t\t\treturn pv*(1+float64(nper)*rate) + pmt*(1+rate*due)*float64(nper) + fv\n\t\t}\n\n\t\tf = math.Exp(float64(nper) * math.Log(1+rate))\n\t\treturn pv*f + pmt*(1/rate+due)*(f-1) + fv\n\t}\n\n\ty = lamda(rate)\n\n\ty0 := pv + pmt*float64(nper) + fv\n\ty1 := pv*f + pmt*(1/rate+due)*(f-1) + fv\n\n\t// find root by secant method\n\tfor (math.Abs(y0-y1) > Accuracy) && (i < MaxIterations) {\n\t\trate = (y1*x0 - y0*x1) / (y1 - y0)\n\t\tx0 = x1\n\t\tx1 = rate\n\n\t\ty = lamda(rate)\n\n\t\ty0 = y1\n\t\ty1 = y\n\t\ti++\n\t}\n\n\treturn rate\n}",
"func steppedRate(r, step uint64) uint64 {\n\tsteps := math.Round(float64(r) / float64(step))\n\tif steps == 0 {\n\t\treturn step\n\t}\n\treturn uint64(math.Round(steps * float64(step)))\n}",
"func (r *RateTracer) RateAt(instant time.Time) float64 {\n\treturn (metric.ReadCounter(r.Counter) - r.base) / instant.Sub(r.start).Seconds()\n}",
"func (rl *RateLimiter) Estimate() time.Duration {\n\treturn time.Duration((float64(rl.l.Rate.Period) * (1 + 0.5*rl.b)) / float64(rl.l.Rate.Limit))\n}",
"func (l *AIMDLimit) BackOffRatio() float64 {\n\tl.mu.RLock()\n\tdefer l.mu.RUnlock()\n\treturn l.backOffRatio\n}",
"func (src *StandardRateCounter) sampleCountAndUpdateRate(currentSampleTimeMs int64) {\n\t// Record newest up to date second sample time. Clear rate.\n\tsrc.lastSampleTimeMs = currentSampleTimeMs\n\n\t// Advance head and write values.\n\tsrc.headIndex = src.advance(src.headIndex)\n\tsrc.timestampsMs[src.headIndex] = currentSampleTimeMs\n\n\tsrc.lastCount = atomic.LoadInt64(&src.counter)\n\tsrc.counts[src.headIndex] = src.lastCount\n\n\t// Ensure tail is always ahead of head.\n\tif src.tailIndex == src.headIndex {\n\t\tsrc.tailIndex = src.advance(src.tailIndex)\n\t}\n\n\t// Advance the 'tail' to the newest sample which is at least windowTimeMs old.\n\tfor {\n\t\tnextWindowStart := src.advance(src.tailIndex)\n\t\tif nextWindowStart == src.headIndex ||\n\t\t\tsrc.timestampsMs[src.headIndex]-src.timestampsMs[nextWindowStart] < src.windowSizeMs {\n\t\t\tbreak\n\t\t}\n\t\tsrc.tailIndex = nextWindowStart\n\t}\n\n\ttimeDeltaMs := src.timestampsMs[src.headIndex] - src.timestampsMs[src.tailIndex]\n\tif timeDeltaMs == 0 {\n\t\tsrc.lastRate = 0.0\n\t} else {\n\t\tif timeDeltaMs > src.windowSizeMs {\n\t\t\ttimeDeltaMs = src.windowSizeMs\n\t\t}\n\n\t\tdeltaTimeSecs := timeDeltaMs / 1000.0\n\t\tdeltaCount := src.counts[src.headIndex] - src.counts[src.tailIndex]\n\t\tif deltaTimeSecs <= 0.0 {\n\t\t\tsrc.lastRate = 0\n\t\t} else {\n\t\t\tsrc.lastRate = float64(deltaCount) / float64(deltaTimeSecs)\n\t\t}\n\t}\n}",
"func perSecond(prev, curr Datapoint, flags FeatureFlags) Datapoint {\n\tif prev.TimeNanos >= curr.TimeNanos || math.IsNaN(prev.Value) || math.IsNaN(curr.Value) {\n\t\treturn emptyDatapoint\n\t}\n\tdiff := curr.Value - prev.Value\n\tif diff < 0 {\n\t\treturn emptyDatapoint\n\t}\n\trate := diff * float64(nanosPerSecond) / float64(curr.TimeNanos-prev.TimeNanos)\n\treturn Datapoint{TimeNanos: curr.TimeNanos, Value: rate}\n}",
"func growthRateIncrease(rate float64, max time.Duration) func(t *testing.T, i int, last, delay time.Duration) {\n\treturn func(t *testing.T, i int, last, delay time.Duration) {\n\t\tvar expected time.Duration\n\t\tif i == 0 {\n\t\t\tassert.Greater(t, delay, expected)\n\t\t\treturn\n\t\t}\n\t\t// the expectation is that we added the growth rate percentage from the last\n\t\t// amount to get the new amount.\n\t\texpected = last + time.Duration(float64(last)/100*rate)\n\n\t\t// if our expected value (after adding the growth rate) is greater\n\t\t// than the actual value, then we must have reached our upper bound limit\n\t\t// assert that the returned delay matches our expected max upper bound.\n\t\tif expected > delay {\n\t\t\tassert.Equal(t, max, delay)\n\t\t\treturn\n\t\t}\n\n\t\t// jitter is applied to the delay making the returned delay\n\t\t// anywhere from 75% up to 100% of the actual exponential growth value.\n\t\texpected -= time.Duration(float64(expected) / 100 * 0.75)\n\t\tassert.GreaterOrEqual(t, delay, expected)\n\t}\n}",
"func (r *RampRate) NegSlopeRate() uint16 {\n\treturn binary.BigEndian.Uint16(r[:2])\n}",
"func NewRate(fps float64, dropFrame bool) (Rate, error) {\n\trate := Rate{}\n\n\tfps = math.Round(fps*100) / 100\n\n\tif fps < 1 {\n\t\treturn rate, fmt.Errorf(\"rate must be at least 1 fps but got: %f\", fps)\n\t}\n\n\ttimeBase := math.Round(fps)\n\n\treturn Rate{\n\t\tfps: fps,\n\t\ttimeBase: timeBase,\n\t\tdropFrame: dropFrame,\n\t}, nil\n}",
"func (s *CappedSender) Rate(metric string, value float64, hostname string, tags []string) {\n\tcapValue, found := s.rateCaps[metric]\n\tif !found { // Metric not capped, skip capping system\n\t\ts.Sender.Rate(metric, value, hostname, tags)\n\t\treturn\n\t}\n\n\t// Previous value lookup\n\tsort.Strings(tags)\n\tcacheKeyParts := []string{rateCappingCacheKey, metric, hostname}\n\tcacheKeyParts = append(cacheKeyParts, tags...)\n\tcacheKey := cache.BuildAgentKey(cacheKeyParts...)\n\tprevious, found := s.getPoint(cacheKey)\n\n\tif !found {\n\t\t// First submit of the rate for that context\n\t\ts.storePoint(cacheKey, value, s.timestamp)\n\t\ts.Sender.Rate(metric, value, hostname, tags)\n\t\treturn\n\t}\n\n\ttimeDelta := s.timestamp.Sub(previous.time).Seconds()\n\tif timeDelta == 0 {\n\t\t// Let's avoid a divide by zero and pass through, the aggregator will handle it\n\t\ts.storePoint(cacheKey, value, s.timestamp)\n\t\ts.Sender.Rate(metric, value, hostname, tags)\n\t\treturn\n\t}\n\trate := (value - previous.value) / timeDelta\n\tif rate < capValue {\n\t\t// Under cap, transmit\n\t\ts.storePoint(cacheKey, value, s.timestamp)\n\t\ts.Sender.Rate(metric, value, hostname, tags)\n\t\treturn\n\t}\n\n\t// Over cap, store but don't transmit\n\tlog.Debugf(\"Dropped latest value %.0f (raw sample: %.0f) of metric %s as it was above the cap for this metric.\", rate, value, metric)\n\ts.storePoint(cacheKey, value, s.timestamp)\n}",
"func (er *ExchangeRates) GetRateRatio(base, dest string) (float64, error) {\n\n\t// Check if the currency rate is available on our database.\n\tbaseRate, isExist := er.rates[base]\n\tif !isExist {\n\t\treturn 0, fmt.Errorf(\"Unknown rate: %s\", base)\n\t}\n\tdestRate, isExist := er.rates[dest]\n\tif !isExist {\n\t\treturn 0, fmt.Errorf(\"Unknown rate: %s\", dest)\n\t}\n\treturn destRate / baseRate, nil\n\n}",
"func (q *statsQueue) Rate() (float64, float64) {\n\tfront, back := q.frontAndBack()\n\n\tif front == nil || back == nil {\n\t\treturn 0, 0\n\t}\n\n\tif time.Now().Sub(back.Time()) > time.Second {\n\t\tq.Clear()\n\t\treturn 0, 0\n\t}\n\n\tsampleDuration := back.Time().Sub(front.Time())\n\n\tpr := float64(q.Len()) / float64(sampleDuration) * float64(time.Second)\n\n\tbr := float64(q.PkgSize()) / float64(sampleDuration) * float64(time.Second)\n\n\treturn pr, br\n}",
"func (l *StoreLimit) Rate() float64 {\n\treturn l.bucket.Rate() / float64(l.regionInfluence)\n}",
"func (r *RampRate) PosSlopeRate() uint16 {\n\treturn binary.BigEndian.Uint16(r[2:])\n}",
"func (_Crowdsale *CrowdsaleCaller) Rate(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Crowdsale.contract.Call(opts, out, \"rate\")\n\treturn *ret0, err\n}",
"func (c CheckRateCalculator) RateLimiter() (Limiter, error) {\n\tvar rateOfChecks rate.Limit\n\tif c.MaxChecksPerSecond == -1 {\n\t\t// UNLIMITED POWER\n\t\trateOfChecks = rate.Inf\n\t} else if c.MaxChecksPerSecond == 0 {\n\t\t// Fetch the number of checkables (resource config scopes) in the database\n\t\tcheckableCount, err := c.CheckableCounter.CheckableCount()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Calculate the number of checks that need to be run per second in order\n\t\t// to check all the checkables within the resource checking interval\n\t\teverythingRate := float64(checkableCount) / c.ResourceCheckingInterval.Seconds()\n\n\t\trateOfChecks = rate.Limit(everythingRate)\n\t} else {\n\t\trateOfChecks = rate.Limit(c.MaxChecksPerSecond)\n\t}\n\n\treturn rate.NewLimiter(rateOfChecks, 1), nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
extendedRate is a utility function for xrate/xincrease/xdelta. It calculates the rate (allowing for counter resets if isCounter is true), taking into account the last sample before the range start, and returns the result as either persecond (if isRate is true) or overall.
|
func extendedRate(samples []sample, isCounter, isRate bool, stepTime int64, selectRange int64, offset int64, metricAppearedTs int64) (float64, *histogram.FloatHistogram) {
var (
rangeStart = stepTime - (selectRange + offset)
rangeEnd = stepTime - offset
resultValue float64
resultHistogram *histogram.FloatHistogram
)
if samples[0].H != nil {
// TODO - support extended rate for histograms
resultHistogram = histogramRate(samples, isCounter)
return resultValue, resultHistogram
}
sameVals := true
for i := range samples {
if i > 0 && samples[i-1].F != samples[i].F {
sameVals = false
break
}
}
// This effectively injects a "zero" series for xincrease if we only have one sample.
// Only do it for some time when the metric appears the first time.
until := selectRange + metricAppearedTs
if isCounter && !isRate && sameVals {
// Make sure we are not at the end of the range.
if stepTime-offset <= until {
return samples[0].F, nil
}
}
sampledInterval := float64(samples[len(samples)-1].T - samples[0].T)
averageDurationBetweenSamples := sampledInterval / float64(len(samples)-1)
firstPoint := 0
// Only do this for not xincrease
if !(isCounter && !isRate) {
// If the point before the range is too far from rangeStart, drop it.
if float64(rangeStart-samples[0].T) > averageDurationBetweenSamples {
if len(samples) < 3 {
return resultValue, nil
}
firstPoint = 1
sampledInterval = float64(samples[len(samples)-1].T - samples[1].T)
averageDurationBetweenSamples = sampledInterval / float64(len(samples)-2)
}
}
var (
counterCorrection float64
lastValue float64
)
if isCounter {
for i := firstPoint; i < len(samples); i++ {
sample := samples[i]
if sample.F < lastValue {
counterCorrection += lastValue
}
lastValue = sample.F
}
}
resultValue = samples[len(samples)-1].F - samples[firstPoint].F + counterCorrection
// Duration between last sample and boundary of range.
durationToEnd := float64(rangeEnd - samples[len(samples)-1].T)
// If the points cover the whole range (i.e. they start just before the
// range start and end just before the range end) adjust the value from
// the sampled range to the requested range.
// Only do this for not xincrease.
if !(isCounter && !isRate) {
if samples[firstPoint].T <= rangeStart && durationToEnd < averageDurationBetweenSamples {
adjustToRange := float64(selectRange / 1000)
resultValue = resultValue * (adjustToRange / (sampledInterval / 1000))
}
}
if isRate {
resultValue = resultValue / float64(selectRange/1000)
}
return resultValue, nil
}
|
[
"func extrapolatedRate(samples []sample, isCounter, isRate bool, stepTime int64, selectRange int64, offset int64) (float64, *histogram.FloatHistogram) {\n\tvar (\n\t\trangeStart = stepTime - (selectRange + offset)\n\t\trangeEnd = stepTime - offset\n\t\tresultValue float64\n\t\tresultHistogram *histogram.FloatHistogram\n\t)\n\n\tif samples[0].H != nil {\n\t\tresultHistogram = histogramRate(samples, isCounter)\n\t} else {\n\t\tresultValue = samples[len(samples)-1].F - samples[0].F\n\t\tif isCounter {\n\t\t\tvar lastValue float64\n\t\t\tfor _, sample := range samples {\n\t\t\t\tif sample.F < lastValue {\n\t\t\t\t\tresultValue += lastValue\n\t\t\t\t}\n\t\t\t\tlastValue = sample.F\n\t\t\t}\n\t\t}\n\t}\n\n\t// Duration between first/last Samples and boundary of range.\n\tdurationToStart := float64(samples[0].T-rangeStart) / 1000\n\tdurationToEnd := float64(rangeEnd-samples[len(samples)-1].T) / 1000\n\n\tsampledInterval := float64(samples[len(samples)-1].T-samples[0].T) / 1000\n\taverageDurationBetweenSamples := sampledInterval / float64(len(samples)-1)\n\n\tif isCounter && resultValue > 0 && samples[0].F >= 0 {\n\t\t// Counters cannot be negative. If we have any slope at\n\t\t// all (i.e. resultValue went up), we can extrapolate\n\t\t// the zero point of the counter. If the duration to the\n\t\t// zero point is shorter than the durationToStart, we\n\t\t// take the zero point as the start of the series,\n\t\t// thereby avoiding extrapolation to negative counter\n\t\t// values.\n\t\tdurationToZero := sampledInterval * (samples[0].F / resultValue)\n\t\tif durationToZero < durationToStart {\n\t\t\tdurationToStart = durationToZero\n\t\t}\n\t}\n\n\t// If the first/last Samples are close to the boundaries of the range,\n\t// extrapolate the result. This is as we expect that another sample\n\t// will exist given the spacing between Samples we've seen thus far,\n\t// with an allowance for noise.\n\textrapolationThreshold := averageDurationBetweenSamples * 1.1\n\textrapolateToInterval := sampledInterval\n\n\tif durationToStart < extrapolationThreshold {\n\t\textrapolateToInterval += durationToStart\n\t} else {\n\t\textrapolateToInterval += averageDurationBetweenSamples / 2\n\t}\n\tif durationToEnd < extrapolationThreshold {\n\t\textrapolateToInterval += durationToEnd\n\t} else {\n\t\textrapolateToInterval += averageDurationBetweenSamples / 2\n\t}\n\tfactor := extrapolateToInterval / sampledInterval\n\tif isRate {\n\t\tfactor /= float64(selectRange / 1000)\n\t}\n\tif resultHistogram == nil {\n\t\tresultValue *= factor\n\t} else {\n\t\tresultHistogram.Mul(factor)\n\n\t}\n\n\treturn resultValue, resultHistogram\n}",
"func (src *StandardRateCounter) sampleCountAndUpdateRate(currentSampleTimeMs int64) {\n\t// Record newest up to date second sample time. Clear rate.\n\tsrc.lastSampleTimeMs = currentSampleTimeMs\n\n\t// Advance head and write values.\n\tsrc.headIndex = src.advance(src.headIndex)\n\tsrc.timestampsMs[src.headIndex] = currentSampleTimeMs\n\n\tsrc.lastCount = atomic.LoadInt64(&src.counter)\n\tsrc.counts[src.headIndex] = src.lastCount\n\n\t// Ensure tail is always ahead of head.\n\tif src.tailIndex == src.headIndex {\n\t\tsrc.tailIndex = src.advance(src.tailIndex)\n\t}\n\n\t// Advance the 'tail' to the newest sample which is at least windowTimeMs old.\n\tfor {\n\t\tnextWindowStart := src.advance(src.tailIndex)\n\t\tif nextWindowStart == src.headIndex ||\n\t\t\tsrc.timestampsMs[src.headIndex]-src.timestampsMs[nextWindowStart] < src.windowSizeMs {\n\t\t\tbreak\n\t\t}\n\t\tsrc.tailIndex = nextWindowStart\n\t}\n\n\ttimeDeltaMs := src.timestampsMs[src.headIndex] - src.timestampsMs[src.tailIndex]\n\tif timeDeltaMs == 0 {\n\t\tsrc.lastRate = 0.0\n\t} else {\n\t\tif timeDeltaMs > src.windowSizeMs {\n\t\t\ttimeDeltaMs = src.windowSizeMs\n\t\t}\n\n\t\tdeltaTimeSecs := timeDeltaMs / 1000.0\n\t\tdeltaCount := src.counts[src.headIndex] - src.counts[src.tailIndex]\n\t\tif deltaTimeSecs <= 0.0 {\n\t\t\tsrc.lastRate = 0\n\t\t} else {\n\t\t\tsrc.lastRate = float64(deltaCount) / float64(deltaTimeSecs)\n\t\t}\n\t}\n}",
"func (r *RateTracer) Rate() float64 {\n\treturn r.RateAt(time.Now())\n}",
"func (m *StandardMeterX) Rate2x() float64 {\n m.lock.RLock()\n rate2x := m.snapshot.rate2x\n m.lock.RUnlock()\n return rate2x\n}",
"func growthRateIncrease(rate float64, max time.Duration) func(t *testing.T, i int, last, delay time.Duration) {\n\treturn func(t *testing.T, i int, last, delay time.Duration) {\n\t\tvar expected time.Duration\n\t\tif i == 0 {\n\t\t\tassert.Greater(t, delay, expected)\n\t\t\treturn\n\t\t}\n\t\t// the expectation is that we added the growth rate percentage from the last\n\t\t// amount to get the new amount.\n\t\texpected = last + time.Duration(float64(last)/100*rate)\n\n\t\t// if our expected value (after adding the growth rate) is greater\n\t\t// than the actual value, then we must have reached our upper bound limit\n\t\t// assert that the returned delay matches our expected max upper bound.\n\t\tif expected > delay {\n\t\t\tassert.Equal(t, max, delay)\n\t\t\treturn\n\t\t}\n\n\t\t// jitter is applied to the delay making the returned delay\n\t\t// anywhere from 75% up to 100% of the actual exponential growth value.\n\t\texpected -= time.Duration(float64(expected) / 100 * 0.75)\n\t\tassert.GreaterOrEqual(t, delay, expected)\n\t}\n}",
"func steppedRate(r, step uint64) uint64 {\n\tsteps := math.Round(float64(r) / float64(step))\n\tif steps == 0 {\n\t\treturn step\n\t}\n\treturn uint64(math.Round(steps * float64(step)))\n}",
"func RATE(nper int, pmt float64, pv float64, fv float64, due float64, guess float64) float64 {\n\trate := guess\n\ti := 0\n\tvar x0, x1, y, f float64\n\n\tx1 = rate\n\n\tlamda := func(rate float64) float64 {\n\t\tif math.Abs(rate) < Accuracy {\n\t\t\treturn pv*(1+float64(nper)*rate) + pmt*(1+rate*due)*float64(nper) + fv\n\t\t}\n\n\t\tf = math.Exp(float64(nper) * math.Log(1+rate))\n\t\treturn pv*f + pmt*(1/rate+due)*(f-1) + fv\n\t}\n\n\ty = lamda(rate)\n\n\ty0 := pv + pmt*float64(nper) + fv\n\ty1 := pv*f + pmt*(1/rate+due)*(f-1) + fv\n\n\t// find root by secant method\n\tfor (math.Abs(y0-y1) > Accuracy) && (i < MaxIterations) {\n\t\trate = (y1*x0 - y0*x1) / (y1 - y0)\n\t\tx0 = x1\n\t\tx1 = rate\n\n\t\ty = lamda(rate)\n\n\t\ty0 = y1\n\t\ty1 = y\n\t\ti++\n\t}\n\n\treturn rate\n}",
"func Rate() (float64, error) {\n\tp, err := Ticker(\"usdt_cny\")\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, util.FuncName())\n\t}\n\n\treturn p.Last, nil\n}",
"func (dev *InputDevice) GetRepeatRate() *[2]uint {\n\trepeat_delay := new([2]uint)\n\tioctl(dev.File.Fd(), uintptr(EVIOCGREP), unsafe.Pointer(repeat_delay))\n\n\treturn repeat_delay\n}",
"func (q *QuotaCenter) getRealTimeRate(rateType internalpb.RateType) float64 {\n\tvar rate float64\n\tfor _, metric := range q.proxyMetrics {\n\t\tfor _, r := range metric.Rms {\n\t\t\tif r.Label == rateType.String() {\n\t\t\t\trate += r.Rate\n\t\t\t}\n\t\t}\n\t}\n\treturn rate\n}",
"func (m *StandardMeterX) Rate1x() float64 {\n m.lock.RLock()\n rate1x := m.snapshot.rate1x\n m.lock.RUnlock()\n return rate1x\n}",
"func (_Crowdsale *CrowdsaleCaller) Rate(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Crowdsale.contract.Call(opts, out, \"rate\")\n\treturn *ret0, err\n}",
"func NewRate(fps float64, dropFrame bool) (Rate, error) {\n\trate := Rate{}\n\n\tfps = math.Round(fps*100) / 100\n\n\tif fps < 1 {\n\t\treturn rate, fmt.Errorf(\"rate must be at least 1 fps but got: %f\", fps)\n\t}\n\n\ttimeBase := math.Round(fps)\n\n\treturn Rate{\n\t\tfps: fps,\n\t\ttimeBase: timeBase,\n\t\tdropFrame: dropFrame,\n\t}, nil\n}",
"func (q *statsQueue) Rate() (float64, float64) {\n\tfront, back := q.frontAndBack()\n\n\tif front == nil || back == nil {\n\t\treturn 0, 0\n\t}\n\n\tif time.Now().Sub(back.Time()) > time.Second {\n\t\tq.Clear()\n\t\treturn 0, 0\n\t}\n\n\tsampleDuration := back.Time().Sub(front.Time())\n\n\tpr := float64(q.Len()) / float64(sampleDuration) * float64(time.Second)\n\n\tbr := float64(q.PkgSize()) / float64(sampleDuration) * float64(time.Second)\n\n\treturn pr, br\n}",
"func (_Rated *RatedTransactor) Rate(opts *bind.TransactOpts, rating *big.Int) (*types.Transaction, error) {\n\treturn _Rated.contract.Transact(opts, \"rate\", rating)\n}",
"func (r *RateTracer) RateAt(instant time.Time) float64 {\n\treturn (metric.ReadCounter(r.Counter) - r.base) / instant.Sub(r.start).Seconds()\n}",
"func (m *MeterSnapshot) Rate2x() float64 { return m.rate2x }",
"func (tuo *TransactionUpdateOne) AddExchangeRate(f float64) *TransactionUpdateOne {\n\ttuo.mutation.AddExchangeRate(f)\n\treturn tuo\n}",
"func (d *FileBackedDevice) SampleRate(actionType DiskActionType, intervalMilliseconds uint64) uint64 {\n\tbytes := float64(d.diskActionTracker.Sample(actionType, intervalMilliseconds))\n\tseconds := float64(intervalMilliseconds) / float64(1000)\n\n\treturn uint64(bytes / seconds)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
histogramRate is a helper function for extrapolatedRate. It requires points[0] to be a histogram. It returns nil if any other Point in points is not a histogram.
|
func histogramRate(points []sample, isCounter bool) *histogram.FloatHistogram {
prev := points[0].H // We already know that this is a histogram.
last := points[len(points)-1].H
if last == nil {
return nil // Range contains a mix of histograms and floats.
}
minSchema := prev.Schema
if last.Schema < minSchema {
minSchema = last.Schema
}
// https://github.com/prometheus/prometheus/blob/ccea61c7bf1e6bce2196ba8189a209945a204c5b/promql/functions.go#L183
// First iteration to find out two things:
// - What's the smallest relevant schema?
// - Are all data points histograms?
// []FloatPoint and a []HistogramPoint separately.
for _, currPoint := range points[1 : len(points)-1] {
curr := currPoint.H
if curr == nil {
return nil // Range contains a mix of histograms and floats.
}
if !isCounter {
continue
}
if curr.Schema < minSchema {
minSchema = curr.Schema
}
}
h := last.CopyToSchema(minSchema)
h.Sub(prev)
if isCounter {
// Second iteration to deal with counter resets.
for _, currPoint := range points[1:] {
curr := currPoint.H
if curr.DetectReset(prev) {
h.Add(prev)
}
prev = curr
}
}
h.CounterResetHint = histogram.GaugeType
return h.Compact(0)
}
|
[
"func extrapolatedRate(samples []sample, isCounter, isRate bool, stepTime int64, selectRange int64, offset int64) (float64, *histogram.FloatHistogram) {\n\tvar (\n\t\trangeStart = stepTime - (selectRange + offset)\n\t\trangeEnd = stepTime - offset\n\t\tresultValue float64\n\t\tresultHistogram *histogram.FloatHistogram\n\t)\n\n\tif samples[0].H != nil {\n\t\tresultHistogram = histogramRate(samples, isCounter)\n\t} else {\n\t\tresultValue = samples[len(samples)-1].F - samples[0].F\n\t\tif isCounter {\n\t\t\tvar lastValue float64\n\t\t\tfor _, sample := range samples {\n\t\t\t\tif sample.F < lastValue {\n\t\t\t\t\tresultValue += lastValue\n\t\t\t\t}\n\t\t\t\tlastValue = sample.F\n\t\t\t}\n\t\t}\n\t}\n\n\t// Duration between first/last Samples and boundary of range.\n\tdurationToStart := float64(samples[0].T-rangeStart) / 1000\n\tdurationToEnd := float64(rangeEnd-samples[len(samples)-1].T) / 1000\n\n\tsampledInterval := float64(samples[len(samples)-1].T-samples[0].T) / 1000\n\taverageDurationBetweenSamples := sampledInterval / float64(len(samples)-1)\n\n\tif isCounter && resultValue > 0 && samples[0].F >= 0 {\n\t\t// Counters cannot be negative. If we have any slope at\n\t\t// all (i.e. resultValue went up), we can extrapolate\n\t\t// the zero point of the counter. If the duration to the\n\t\t// zero point is shorter than the durationToStart, we\n\t\t// take the zero point as the start of the series,\n\t\t// thereby avoiding extrapolation to negative counter\n\t\t// values.\n\t\tdurationToZero := sampledInterval * (samples[0].F / resultValue)\n\t\tif durationToZero < durationToStart {\n\t\t\tdurationToStart = durationToZero\n\t\t}\n\t}\n\n\t// If the first/last Samples are close to the boundaries of the range,\n\t// extrapolate the result. This is as we expect that another sample\n\t// will exist given the spacing between Samples we've seen thus far,\n\t// with an allowance for noise.\n\textrapolationThreshold := averageDurationBetweenSamples * 1.1\n\textrapolateToInterval := sampledInterval\n\n\tif durationToStart < extrapolationThreshold {\n\t\textrapolateToInterval += durationToStart\n\t} else {\n\t\textrapolateToInterval += averageDurationBetweenSamples / 2\n\t}\n\tif durationToEnd < extrapolationThreshold {\n\t\textrapolateToInterval += durationToEnd\n\t} else {\n\t\textrapolateToInterval += averageDurationBetweenSamples / 2\n\t}\n\tfactor := extrapolateToInterval / sampledInterval\n\tif isRate {\n\t\tfactor /= float64(selectRange / 1000)\n\t}\n\tif resultHistogram == nil {\n\t\tresultValue *= factor\n\t} else {\n\t\tresultHistogram.Mul(factor)\n\n\t}\n\n\treturn resultValue, resultHistogram\n}",
"func (_m *Backend) Histogram(ctx context.Context, name string, value float64, tags []string, rate float64) error {\n\tret := _m.Called(ctx, name, value, tags, rate)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, float64, []string, float64) error); ok {\n\t\tr0 = rf(ctx, name, value, tags, rate)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func extendedRate(samples []sample, isCounter, isRate bool, stepTime int64, selectRange int64, offset int64, metricAppearedTs int64) (float64, *histogram.FloatHistogram) {\n\tvar (\n\t\trangeStart = stepTime - (selectRange + offset)\n\t\trangeEnd = stepTime - offset\n\t\tresultValue float64\n\t\tresultHistogram *histogram.FloatHistogram\n\t)\n\n\tif samples[0].H != nil {\n\t\t// TODO - support extended rate for histograms\n\t\tresultHistogram = histogramRate(samples, isCounter)\n\t\treturn resultValue, resultHistogram\n\t}\n\n\tsameVals := true\n\tfor i := range samples {\n\t\tif i > 0 && samples[i-1].F != samples[i].F {\n\t\t\tsameVals = false\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// This effectively injects a \"zero\" series for xincrease if we only have one sample.\n\t// Only do it for some time when the metric appears the first time.\n\tuntil := selectRange + metricAppearedTs\n\tif isCounter && !isRate && sameVals {\n\t\t// Make sure we are not at the end of the range.\n\t\tif stepTime-offset <= until {\n\t\t\treturn samples[0].F, nil\n\t\t}\n\t}\n\n\tsampledInterval := float64(samples[len(samples)-1].T - samples[0].T)\n\taverageDurationBetweenSamples := sampledInterval / float64(len(samples)-1)\n\n\tfirstPoint := 0\n\t// Only do this for not xincrease\n\tif !(isCounter && !isRate) {\n\t\t// If the point before the range is too far from rangeStart, drop it.\n\t\tif float64(rangeStart-samples[0].T) > averageDurationBetweenSamples {\n\t\t\tif len(samples) < 3 {\n\t\t\t\treturn resultValue, nil\n\t\t\t}\n\t\t\tfirstPoint = 1\n\t\t\tsampledInterval = float64(samples[len(samples)-1].T - samples[1].T)\n\t\t\taverageDurationBetweenSamples = sampledInterval / float64(len(samples)-2)\n\t\t}\n\t}\n\n\tvar (\n\t\tcounterCorrection float64\n\t\tlastValue float64\n\t)\n\tif isCounter {\n\t\tfor i := firstPoint; i < len(samples); i++ {\n\t\t\tsample := samples[i]\n\t\t\tif sample.F < lastValue {\n\t\t\t\tcounterCorrection += lastValue\n\t\t\t}\n\t\t\tlastValue = sample.F\n\t\t}\n\t}\n\tresultValue = samples[len(samples)-1].F - samples[firstPoint].F + counterCorrection\n\n\t// Duration between last sample and boundary of range.\n\tdurationToEnd := float64(rangeEnd - samples[len(samples)-1].T)\n\t// If the points cover the whole range (i.e. they start just before the\n\t// range start and end just before the range end) adjust the value from\n\t// the sampled range to the requested range.\n\t// Only do this for not xincrease.\n\tif !(isCounter && !isRate) {\n\t\tif samples[firstPoint].T <= rangeStart && durationToEnd < averageDurationBetweenSamples {\n\t\t\tadjustToRange := float64(selectRange / 1000)\n\t\t\tresultValue = resultValue * (adjustToRange / (sampledInterval / 1000))\n\t\t}\n\t}\n\n\tif isRate {\n\t\tresultValue = resultValue / float64(selectRange/1000)\n\t}\n\n\treturn resultValue, nil\n}",
"func (p *Protocol) GetProbationHistoricalRate(startEpoch int, epochCount int, delegateName string) (string, error) {\n\tif _, ok := p.indexer.Registry.Find(votings.ProtocolID); !ok {\n\t\treturn \"\", errors.New(\"votings protocol is unregistered\")\n\t}\n\tdb := p.indexer.Store.GetDB()\n\tappearingCount, err := p.getAppearingCount(db, startEpoch, epochCount, delegateName)\n\tif err != nil {\n\t\treturn \"0\", errors.New(\"get Appearing Count error\")\n\t}\n\tif appearingCount == 0 {\n\t\treturn \"0\", nil\n\t}\n\tprobationCount := uint64(0)\n\tfor i := startEpoch; i < startEpoch+epochCount; i++ {\n\t\taddress, err := p.getOperatorAddress(delegateName, i)\n\t\tswitch {\n\t\tcase errors.Cause(err) == indexprotocol.ErrNotExist:\n\t\t\tcontinue\n\t\tcase err != nil:\n\t\t\treturn \"0\", err\n\t\t}\n\t\texist, _ := queryprotocol.RowExists(db, fmt.Sprintf(selectProbationExist,\n\t\t\tvotings.ProbationListTableName, i, address))\n\t\tif exist {\n\t\t\tprobationCount++\n\t\t}\n\t}\n\trate := float64(probationCount) / float64(appearingCount)\n\treturn fmt.Sprintf(\"%0.2f\", rate), nil\n}",
"func (sit *seriesIterator) AtHistogram() (int64, *histogram.Histogram) {\n\tlog.Fatal(\"seriesIterator.AtHistogram not implemented\")\n\treturn 0, nil // @TODO\n}",
"func ExampleHistogram() {\n\th := &Histogram{\n\t\tBuckets: make([]int, 10),\n\t\tResolution: time.Millisecond,\n\t}\n\th.Record(16 * time.Millisecond)\n}",
"func recordDistributionPoint(pts []metricdata.Point, recorder recordFunc) error {\n\t// only use the most recent datapoint for now.\n\tpt := pts[len(pts)-1]\n\tval, ok := pt.Value.(*metricdata.Distribution)\n\tif !ok {\n\t\treturn fmt.Errorf(\"%w: %v\", errBadPoint, pt.Value)\n\t}\n\tbucketCounts := make([]uint64, len(val.Buckets))\n\tfor i, bucket := range val.Buckets {\n\t\tif bucket.Count < 0 {\n\t\t\treturn fmt.Errorf(\"%w: bucket count may not be negative\", errBadPoint)\n\t\t}\n\t\tbucketCounts[i] = uint64(bucket.Count)\n\t}\n\tif val.Count < 0 {\n\t\treturn fmt.Errorf(\"%w: count may not be negative\", errBadPoint)\n\t}\n\treturn recorder(&ocDistAggregator{\n\t\tsum: number.NewFloat64Number(val.Sum),\n\t\tcount: uint64(val.Count),\n\t\tbuckets: aggregation.Buckets{\n\t\t\tBoundaries: val.BucketOptions.Bounds,\n\t\t\tCounts: bucketCounts,\n\t\t},\n\t}, pts[len(pts)-1].Time)\n}",
"func newHistogramMetricWithDataPoints(\n\tname string,\n\ttemporality pmetric.AggregationTemporality,\n\tnumBucketCountsForEachDataPoint []int,\n) pmetric.Metric {\n\tresult := newMetric(name, pmetric.MetricTypeHistogram)\n\taHistogram := result.Histogram()\n\taHistogram.SetAggregationTemporality(temporality)\n\taHistogram.DataPoints().EnsureCapacity(len(numBucketCountsForEachDataPoint))\n\tfor _, count := range numBucketCountsForEachDataPoint {\n\t\tpoint := aHistogram.DataPoints().AppendEmpty()\n\t\tpoint.BucketCounts().FromRaw(make([]uint64, count))\n\t\tpoint.ExplicitBounds().FromRaw(make([]float64, count-1))\n\t}\n\treturn result\n}",
"func (NilHistogramFloat64) Percentile(p float64) float64 { return 0.0 }",
"func histDataPointToSummary(dp pmetric.HistogramDataPoint) (float64, float64, float64) {\n\tbounds := dp.ExplicitBounds()\n\tcounts := dp.BucketCounts()\n\n\t// shortcut if min, max, and sum are provided\n\tif dp.HasMin() && dp.HasMax() && dp.HasSum() {\n\t\treturn dp.Min(), dp.Max(), dp.Sum()\n\t}\n\n\t// a single-bucket histogram is a special case\n\tif counts.Len() == 1 {\n\t\treturn estimateSingleBucketHistogram(dp)\n\t}\n\n\t// If any of min, max, sum is not provided in the data point,\n\t// loop through the buckets to estimate them.\n\t// All three values are estimated in order to avoid looping multiple times\n\t// or complicating the loop with branches. After the loop, estimates\n\t// will be overridden with any values provided by the data point.\n\tfoundNonEmptyBucket := false\n\tvar min, max, sum float64 = 0, 0, 0\n\n\t// Because we do not know the actual min, max, or sum, we estimate them based on non-empty buckets\n\tfor i := 0; i < counts.Len(); i++ {\n\t\t// empty bucket\n\t\tif counts.At(i) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// range for bucket counts[i] is bounds[i-1] to bounds[i]\n\n\t\t// min estimation\n\t\tif !foundNonEmptyBucket {\n\t\t\tfoundNonEmptyBucket = true\n\t\t\tif i == 0 {\n\t\t\t\t// if we're in the first bucket, the best estimate we can make for min is the upper bound\n\t\t\t\tmin = bounds.At(i)\n\t\t\t} else {\n\t\t\t\tmin = bounds.At(i - 1)\n\t\t\t}\n\t\t}\n\n\t\t// max estimation\n\t\tif i == counts.Len()-1 {\n\t\t\t// if we're in the last bucket, the best estimate we can make for max is the lower bound\n\t\t\tmax = bounds.At(i - 1)\n\t\t} else {\n\t\t\tmax = bounds.At(i)\n\t\t}\n\n\t\t// sum estimation\n\t\tswitch i {\n\t\tcase 0:\n\t\t\t// in the first bucket, estimate sum using the upper bound\n\t\t\tsum += float64(counts.At(i)) * bounds.At(i)\n\t\tcase counts.Len() - 1:\n\t\t\t// in the last bucket, estimate sum using the lower bound\n\t\t\tsum += float64(counts.At(i)) * bounds.At(i-1)\n\t\tdefault:\n\t\t\t// in any other bucket, estimate sum using the bucket midpoint\n\t\t\tsum += float64(counts.At(i)) * (bounds.At(i) + bounds.At(i-1)) / 2\n\t\t}\n\t}\n\n\t// Override estimates with any values provided by the data point\n\tif dp.HasMin() {\n\t\tmin = dp.Min()\n\t}\n\tif dp.HasMax() {\n\t\tmax = dp.Max()\n\t}\n\tif dp.HasSum() {\n\t\tsum = dp.Sum()\n\t}\n\n\t// Set min to average when higher than average. This can happen when most values are lower than first boundary (falling in first bucket).\n\t// Set max to average when lower than average. This can happen when most values are higher than last boundary (falling in last bucket).\n\t// dp.Count() will never be zero\n\tavg := sum / float64(dp.Count())\n\tif min > avg {\n\t\tmin = avg\n\t}\n\tif max < avg {\n\t\tmax = avg\n\t}\n\n\treturn min, max, sum\n}",
"func (q *statsQueue) Rate() (float64, float64) {\n\tfront, back := q.frontAndBack()\n\n\tif front == nil || back == nil {\n\t\treturn 0, 0\n\t}\n\n\tif time.Now().Sub(back.Time()) > time.Second {\n\t\tq.Clear()\n\t\treturn 0, 0\n\t}\n\n\tsampleDuration := back.Time().Sub(front.Time())\n\n\tpr := float64(q.Len()) / float64(sampleDuration) * float64(time.Second)\n\n\tbr := float64(q.PkgSize()) / float64(sampleDuration) * float64(time.Second)\n\n\treturn pr, br\n}",
"func (rt RateTable) RateAt(point math.Duration) Rate {\n\trate := Rate(0)\n\t// the nature of rate tables is that we want the smallest rate\n\t// for which point >= row.From. The obvious way would be to iterate\n\t// in reverse, and return the first time that the point >= the row's\n\t// From value. However, reverse iteration is tedious in go, so we\n\t// take a different tack instead.\n\t//\n\t// We iterate forward, but keep a cache of the row rates, so that the\n\t// currently active rate always trails behind the table by one row.\n\t// This means that the first time the point < row.From, we can return\n\t// the active rate.\n\tfor _, row := range rt {\n\t\tif point < row.From {\n\t\t\treturn rate\n\t\t}\n\t\trate = row.Rate\n\t}\n\treturn rate\n}",
"func (b *MemoizedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {\n\treturn b.it.AtFloatHistogram()\n}",
"func histogramLineValue(metric metrics.Histogram, metricName, endpoint, oldtags string, step, ts int64) []Point {\n\tdata := make([]Point, 0)\n\ttags := getTags(metricName, oldtags)\n\n\tvalues := make(map[string]interface{})\n\tps := metric.Percentiles([]float64{0.75, 0.95, 0.99})\n\tvalues[\"min\"] = metric.Min()\n\tvalues[\"max\"] = metric.Max()\n\tvalues[\"mean\"] = metric.Mean()\n\tvalues[\"75th\"] = ps[0]\n\tvalues[\"95th\"] = ps[1]\n\tvalues[\"99th\"] = ps[2]\n\tfor key, val := range values {\n\t\tc := newLineValue(endpoint, key, val, step, GAUGE, tags, ts)\n\t\tdata = append(data, c)\n\t}\n\n\treturn data\n}",
"func (r *Registry) Histogram(desc Desc, bounds []float64) HistogramFamily {\n\tfam, err := r.AddHistogram(desc, bounds)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn fam\n}",
"func (d *Dogstatsd) NewHistogram(name string, sampleRate float64) *Histogram {\n\td.rates.Set(d.prefix+name, sampleRate)\n\treturn &Histogram{\n\t\tname: d.prefix + name,\n\t\tobs: d.histograms.Observe,\n\t}\n}",
"func (m *StandardMeterX) Rate1x() float64 {\n m.lock.RLock()\n rate1x := m.snapshot.rate1x\n m.lock.RUnlock()\n return rate1x\n}",
"func (a Chart_Histogram) Get(fieldName string) (value []SampleStream, found bool) {\n\tif a.AdditionalProperties != nil {\n\t\tvalue, found = a.AdditionalProperties[fieldName]\n\t}\n\treturn\n}",
"func binPoints(xys XYer, n int) (bins []HistogramBin, width float64) {\n\txmin, xmax := Range(XValues{xys})\n\tif n <= 0 {\n\t\tm := 0.0\n\t\tfor i := 0; i < xys.Len(); i++ {\n\t\t\t_, y := xys.XY(i)\n\t\t\tm += math.Max(y, 1.0)\n\t\t}\n\t\tn = int(math.Ceil(math.Sqrt(m)))\n\t}\n\tif n < 1 || xmax <= xmin {\n\t\tn = 1\n\t}\n\n\tbins = make([]HistogramBin, n)\n\n\tw := (xmax - xmin) / float64(n)\n\tif w == 0 {\n\t\tw = 1\n\t}\n\tfor i := range bins {\n\t\tbins[i].Min = xmin + float64(i)*w\n\t\tbins[i].Max = xmin + float64(i+1)*w\n\t}\n\n\tfor i := 0; i < xys.Len(); i++ {\n\t\tx, y := xys.XY(i)\n\t\tbin := int((x - xmin) / w)\n\t\tif x == xmax {\n\t\t\tbin = n - 1\n\t\t}\n\t\tif bin < 0 || bin >= n {\n\t\t\tpanic(fmt.Sprintf(\"%g, xmin=%g, xmax=%g, w=%g, bin=%d, n=%d\\n\",\n\t\t\t\tx, xmin, xmax, w, bin, n))\n\t\t}\n\t\tbins[bin].Weight += y\n\t}\n\treturn bins, w\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewWitnessVerifier creates a witness signature verifier from a public key.
|
func NewWitnessVerifier(pk crypto.PublicKey) (*WitnessVerifier, error) {
sv, err := ct.NewSignatureVerifier(pk)
if err != nil {
return nil, fmt.Errorf("failed to create signature verifier: %v", err)
}
return &WitnessVerifier{SigVerifier: sv}, nil
}
|
[
"func NewVerifier(ks PublicKeySource, df ClaimsDecoderFunc, vf VerifyFunc) *Verifier {\n\treturn &Verifier{\n\t\tks: ks,\n\t\tdf: df,\n\t\tvf: vf,\n\t\tskewAllowance: int64(defaultSkewAllowance.Seconds()),\n\t}\n}",
"func NewVerifier(publicKey string) *Verifier {\n\tkey, err := readPublicKey(\"-----BEGIN PUBLIC KEY-----\\n\" + publicKey + \"\\n-----END PUBLIC KEY-----\")\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn &Verifier{\n\t\tpublicKey: key,\n\t}\n}",
"func NewVerifier(keys ...[]byte) (*Verifier, error) {\n\tpubKeys := map[string]*jose.JSONWebKey{}\n\tfor i := range keys {\n\t\tpub, err := LoadJSONWebKey(keys[i], true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif _, dupKeyID := pubKeys[pub.KeyID]; dupKeyID {\n\t\t\treturn nil, fmt.Errorf(\"%w: %s\", ErrDuplicateKeyID, pub.KeyID)\n\t\t}\n\t\tpubKeys[pub.KeyID] = pub\n\t}\n\treturn &Verifier{\n\t\tkeys: pubKeys,\n\t}, nil\n}",
"func NewVerifier(h *keyset.Handle) (tink.Verifier, error) {\n\treturn NewVerifierWithKeyManager(h, nil /*keyManager*/)\n}",
"func NewVerifier(basename string) (v *Verifier, err error) {\n\tbuf, err := ioutil.ReadFile(basename + \".pub\")\n\tif err != nil {\n\t\treturn\n\t}\n\tpub := &ecdsa.PublicKey{\n\t\tCurve: curveEll,\n\t}\n\tpub.x, pub.y = elliptic.Unmarshal(curveEll, buf)\n\tjws, err := jose.LoadPublicKey(pub)\n\tif err != nil {\n\t\treturn\n\t}\n}",
"func NewVRFVerifier(pubkey *bls.PublicKey) vrf.PublicKey {\n\treturn &PublicKey{pubkey}\n}",
"func NewVerifier(alg jwa.SignatureAlgorithm) (Verifier, error) {\n\tf, ok := verifierDB[alg]\n\tif ok {\n\t\treturn f.Create()\n\t}\n\treturn nil, errors.Errorf(`unsupported signature algorithm \"%s\"`, alg)\n}",
"func NewProofVerifier(opts ...OptionFunc) (*ProofVerifier, error) {\n\toptions, err := applyOpts(opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinner, err := postrs.NewVerifier(options.powFlags)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ProofVerifier{inner}, nil\n}",
"func CreatePublicKeyVerifier(rootPublicKey ed25519.PublicKey) func([]byte, []byte) bool {\n\treturn func(publicKey, proof []byte) bool {\n\t\t// ed25519.Verify panics if len(publicKey) is not PublicKeySize. We need to avoid that\n\t\tif len(publicKey) != 32 {\n\t\t\treturn false\n\t\t}\n\t\treturn ed25519.Verify(rootPublicKey, publicKey, proof)\n\t}\n}",
"func NewVerifier(t testing.TB) *Verifier {\n\tmock := &Verifier{}\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func newAddressFromPublicKeyHash(scheme Scheme, pubKeyHash crypto.Digest) (WavesAddress, error) {\n\tvar body [wavesAddressBodySize]byte\n\tcopy(body[:], pubKeyHash[:])\n\treturn newWavesAddress(scheme, body)\n}",
"func New(alg jwa.SignatureAlgorithm) (Verifier, error) {\n\tswitch alg {\n\tcase jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512:\n\t\treturn newRSA(alg)\n\tcase jwa.HS256, jwa.HS384, jwa.HS512:\n\t\treturn newHMAC(alg)\n\tdefault:\n\t\treturn nil, errors.Errorf(`unsupported signature algorithm: %#v`, alg)\n\t}\n}",
"func New(alg jwa.SignatureAlgorithm) (Verifier, error) {\n\tswitch alg {\n\tcase jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512:\n\t\treturn newRSA(alg)\n\tcase jwa.ES256, jwa.ES384, jwa.ES512:\n\t\treturn newECDSA(alg)\n\tcase jwa.HS256, jwa.HS384, jwa.HS512:\n\t\treturn newHMAC(alg)\n\tdefault:\n\t\treturn nil, fmt.Errorf(`unsupported signature algorithm: %s`, alg)\n\t}\n}",
"func NewSignatureVerifierMock(t minimock.Tester) *SignatureVerifierMock {\n\tm := &SignatureVerifierMock{t: t}\n\n\tif controller, ok := t.(minimock.MockController); ok {\n\t\tcontroller.RegisterMocker(m)\n\t}\n\n\tm.IsDigestMethodSupportedMock = mSignatureVerifierMockIsDigestMethodSupported{mock: m}\n\tm.IsSignMethodSupportedMock = mSignatureVerifierMockIsSignMethodSupported{mock: m}\n\tm.IsSignOfSignatureMethodSupportedMock = mSignatureVerifierMockIsSignOfSignatureMethodSupported{mock: m}\n\tm.IsValidDataSignatureMock = mSignatureVerifierMockIsValidDataSignature{mock: m}\n\tm.IsValidDigestSignatureMock = mSignatureVerifierMockIsValidDigestSignature{mock: m}\n\n\treturn m\n}",
"func NewPreservationVerifier(context *common.Context, workItemID int64, ingestObject *service.IngestObject) *PreservationVerifier {\n\treturn &PreservationVerifier{\n\t\tBase: Base{\n\t\t\tContext: context,\n\t\t\tIngestObject: ingestObject,\n\t\t\tWorkItemID: workItemID,\n\t\t},\n\t}\n}",
"func NewVerifierWithKeyManager(h *keyset.Handle, km registry.KeyManager) (tink.Verifier, error) {\n\tps, err := h.PrimitivesWithKeyManager(km)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"verifier_factory: cannot obtain primitive set: %s\", err)\n\t}\n\treturn newWrappedVerifier(ps)\n}",
"func newPublicKeyMinimumLengthValidator(length int) publicKeyMinimumLengthValidator {\n\treturn publicKeyMinimumLengthValidator{\n\t\tlength: length,\n\t}\n}",
"func NewSigner(key *Key) *Signer {\n\treturn &Signer{\n\t\tkey: key,\n\t\tConfig: &packet.Config{\n\t\t\tDefaultHash: crypto.SHA256,\n\t\t},\n\t}\n}",
"func NewSigner(key []byte) (*Signer, error) {\n\tpriv, err := LoadJSONWebKey(key, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\talg := jose.SignatureAlgorithm(priv.Algorithm)\n\tsigner, err := jose.NewSigner(jose.SigningKey{Algorithm: alg, Key: priv}, nil)\n\tp := &Signer{\n\t\tBuilder: jwt.Signed(signer),\n\t}\n\treturn p, err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
VerifySignature finds and verifies this witness' signature on a cosigned STH. This may mean that there are other witness signatures that remain unverified, so future implementations may want to take in multiple signature verifiers like in the Note package (
|
func (wv WitnessVerifier) VerifySignature(sth api.CosignedSTH) error {
if len(sth.WitnessSigs) == 0 {
return errors.New("no witness signature present in the STH")
}
sigData, err := tls.Marshal(sth.SignedTreeHead)
if err != nil {
return fmt.Errorf("failed to marshal internal STH: %v", err)
}
for _, sig := range sth.WitnessSigs {
// If we find a signature that verifies then we're okay.
if err := wv.SigVerifier.VerifySignature(sigData, tls.DigitallySigned(sig)); err == nil {
return nil
}
}
return errors.New("failed to verify any signature for this witness")
}
|
[
"func VerifySignature(id *did.Identifier, data []byte, ldSignature []byte) error {\n\t// Decode signature document\n\tsignature := &did.SignatureLD{}\n\tif err := json.Unmarshal(ldSignature, signature); err != nil {\n\t\treturn errors.New(\"invalid signature document\")\n\t}\n\n\t// Retrieve key\n\tkey := id.Key(signature.Creator)\n\tif key == nil {\n\t\treturn errors.New(\"invalid key identifier\")\n\t}\n\n\t// Hash original signed data\n\tinput := sha3.Sum256(data)\n\n\t// Verify signature\n\tif !key.VerifySignatureLD(input[:], signature) {\n\t\treturn errors.New(\"invalid signature\")\n\t}\n\n\t// All good!\n\treturn nil\n}",
"func (s *Syncer) verifyTSpendSignature(msgTx *wire.MsgTx, signature, pubKey []byte) error {\n\t// Calculate signature hash.\n\tsigHash, err := txscript.CalcSignatureHash(nil,\n\t\ttxscript.SigHashAll, msgTx, 0, nil)\n\tif err != nil {\n\t\treturn errors.Errorf(\"CalcSignatureHash: %w\", err)\n\t}\n\n\t// Lift Signature from bytes.\n\tsig, err := schnorr.ParseSignature(signature)\n\tif err != nil {\n\t\treturn errors.Errorf(\"ParseSignature: %w\", err)\n\t}\n\n\t// Lift public PI key from bytes.\n\tpk, err := schnorr.ParsePubKey(pubKey)\n\tif err != nil {\n\t\treturn errors.Errorf(\"ParsePubKey: %w\", err)\n\t}\n\n\t// Verify transaction was properly signed.\n\tif !sig.Verify(sigHash, pk) {\n\t\treturn errors.Errorf(\"Verify failed\")\n\t}\n\n\treturn nil\n}",
"func (c *Client) VerifySignature(body []byte, headers http.Header) error {\n\t// Algorithm from https://api.slack.com/docs/verifying-requests-from-slack\n\n\texpectedSignature := headers.Get(\"X-Slack-Signature\")\n\tif expectedSignature == \"\" {\n\t\treturn fmt.Errorf(\"X-Slack-Signature missing\")\n\t}\n\texpectedSignatureBytes, err := hex.DecodeString(strings.TrimPrefix(expectedSignature, \"v0=\"))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"X-Slack-Signature is not a valid hex string\")\n\t}\n\n\t// Step 2\n\ttsHeader := headers.Get(\"X-Slack-Request-Timestamp\")\n\tif tsHeader == \"\" {\n\t\treturn fmt.Errorf(\"X-Slack-Request-Timestamp header missing\")\n\t}\n\ttsInt, err := strconv.ParseInt(tsHeader, 10, 64)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't parse timestamp %q: %v\", tsHeader, err)\n\t}\n\tts := time.Unix(tsInt, 0)\n\tnow := time.Now()\n\tdiff := now.Sub(ts)\n\tif math.Abs(diff.Minutes()) > 5 {\n\t\treturn fmt.Errorf(\"clock difference %s too high\", diff)\n\t}\n\n\t// Step 3\n\tsigBase := append([]byte(\"v0:\"+tsHeader+\":\"), body...)\n\th := hmac.New(sha256.New, []byte(c.Config.SigningSecret))\n\t_, _ = h.Write(sigBase)\n\tourSignature := h.Sum(nil)\n\tif !hmac.Equal(ourSignature, expectedSignatureBytes) {\n\t\treturn fmt.Errorf(\"signature mismatch\")\n\t}\n\treturn nil\n}",
"func (dkgs *DKGKeyShareImpl) VerifySignature(msg string, sig *bls.Sign) bool {\n\treturn sig.Verify(dkgs.pi, msg)\n}",
"func (msg *PlainMessageReader) VerifySignature() (err error) {\n\tif !msg.readAll {\n\t\treturn errors.New(\"gopenpgp: can't verify the signature until the message reader has been read entirely\")\n\t}\n\tif msg.verifyKeyRing != nil {\n\t\tprocessSignatureExpiration(msg.details, msg.verifyTime)\n\t\terr = verifyDetailsSignature(msg.details, msg.verifyKeyRing, msg.verificationContext)\n\t} else {\n\t\terr = errors.New(\"gopenpgp: no verify keyring was provided before decryption\")\n\t}\n\treturn\n}",
"func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) {\n\tif !pk.CanSign() {\n\t\treturn errors.InvalidArgumentError(\"public key cannot generate signatures\")\n\t}\n\tif sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) {\n\t\tsig.AddMetadataToHashSuffix()\n\t}\n\tsigned.Write(sig.HashSuffix)\n\thashBytes := signed.Sum(nil)\n\tif sig.Version == 5 && (hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1]) {\n\t\treturn errors.SignatureError(\"hash tag doesn't match\")\n\t}\n\n\tif pk.PubKeyAlgo != sig.PubKeyAlgo {\n\t\treturn errors.InvalidArgumentError(\"public key and signature use different algorithms\")\n\t}\n\n\tswitch pk.PubKeyAlgo {\n\tcase PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:\n\t\trsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey)\n\t\terr = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.Bytes()))\n\t\tif err != nil {\n\t\t\treturn errors.SignatureError(\"RSA verification failure\")\n\t\t}\n\t\treturn nil\n\tcase PubKeyAlgoDSA:\n\t\tdsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey)\n\t\t// Need to truncate hashBytes to match FIPS 186-3 section 4.6.\n\t\tsubgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8\n\t\tif len(hashBytes) > subgroupSize {\n\t\t\thashBytes = hashBytes[:subgroupSize]\n\t\t}\n\t\tif !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.Bytes()), new(big.Int).SetBytes(sig.DSASigS.Bytes())) {\n\t\t\treturn errors.SignatureError(\"DSA verification failure\")\n\t\t}\n\t\treturn nil\n\tcase PubKeyAlgoECDSA:\n\t\tecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey)\n\t\tif !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.Bytes()), new(big.Int).SetBytes(sig.ECDSASigS.Bytes())) {\n\t\t\treturn errors.SignatureError(\"ECDSA verification failure\")\n\t\t}\n\t\treturn nil\n\tcase PubKeyAlgoEdDSA:\n\t\teddsaPublicKey := pk.PublicKey.(*eddsa.PublicKey)\n\t\tif !eddsa.Verify(eddsaPublicKey, hashBytes, sig.EdDSASigR.Bytes(), sig.EdDSASigS.Bytes()) {\n\t\t\treturn errors.SignatureError(\"EdDSA verification failure\")\n\t\t}\n\t\treturn nil\n\tdefault:\n\t\treturn errors.SignatureError(\"Unsupported public key algorithm used in signature\")\n\t}\n}",
"func Signature(signatureVerifier jws.Verifier) Verifier {\n\treturn VerifierFunc(func(token *Token) error {\n\t\terr := token.VerifySignature(signatureVerifier)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%w: %s\", ErrVerificationFailed, err)\n\t\t}\n\t\treturn nil\n\t})\n}",
"func (m *ModuleCredential) VerifySignature(_, _ []byte) bool {\n\treturn false\n}",
"func CheckSignature(envelope *tokenserver.MachineTokenEnvelope, certs *signing.PublicCertificates) error {\n\treturn certs.CheckSignature(envelope.KeyId, envelope.TokenBody, envelope.RsaSha256)\n}",
"func (h SignerVerifier) VerifySignature(sig, message io.Reader, opts ...signature.VerifyOption) error {\n\tvar digest []byte\n\tvar signerOpts crypto.SignerOpts = h.hashFunc\n\n\tfor _, opt := range opts {\n\t\topt.ApplyDigest(&digest)\n\t\topt.ApplyCryptoSignerOpts(&signerOpts)\n\t}\n\n\tdigest, hf, err := signature.ComputeDigestForVerifying(message, signerOpts.HashFunc(), hvSupportedHashFuncs, opts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsigBytes, err := io.ReadAll(sig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"reading signature: %w\", err)\n\t}\n\n\treturn h.client.verify(sigBytes, digest, hf, opts...)\n}",
"func (f *FileMetaData) VerifySignature(signature []byte) bool {\n\tif f.FileDecryptor == nil {\n\t\tpanic(\"decryption not set propertly, cannot verify signature\")\n\t}\n\n\tserializer := thrift.NewThriftSerializer()\n\tdata, _ := serializer.Write(context.Background(), f.FileMetaData)\n\tnonce := signature[:encryption.NonceLength]\n\ttag := signature[encryption.NonceLength : encryption.NonceLength+encryption.GcmTagLength]\n\n\tkey := f.FileDecryptor.GetFooterKey()\n\taad := encryption.CreateFooterAad(f.FileDecryptor.FileAad())\n\n\tenc := encryption.NewAesEncryptor(f.FileDecryptor.Algorithm(), true)\n\tvar buf bytes.Buffer\n\tbuf.Grow(enc.CiphertextSizeDelta() + len(data))\n\tencryptedLen := enc.SignedFooterEncrypt(&buf, data, []byte(key), []byte(aad), nonce)\n\treturn bytes.Equal(buf.Bytes()[encryptedLen-encryption.GcmTagLength:], tag)\n}",
"func (s Signature) Verify(l *Link) error {\n\tpayload, err := l.Search(s.Payload)\n\tif err != nil {\n\t\treturn errors.Wrap(err, ErrBadJMESPATHQuery)\n\t}\n\tif payload == nil {\n\t\treturn errors.New(ErrEmptyJMESPATHResult)\n\t}\n\n\tpayloadBytes, err := cj.Marshal(payload)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tif err := signatures.Verify(&signatures.Signature{\n\t\tAI: s.Type,\n\t\tPublicKey: []byte(s.PublicKey),\n\t\tMessage: payloadBytes,\n\t\tSignature: []byte(s.Signature),\n\t}); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\treturn nil\n}",
"func (bc *bls12Crypto) VerifyThresholdSignature(signature consensus.ThresholdSignature, hash consensus.Hash) bool {\n\tsig, ok := signature.(*AggregateSignature)\n\tif !ok {\n\t\treturn false\n\t}\n\tpubKeys := make([]*PublicKey, 0)\n\tsig.participants.ForEach(func(id hotstuff.ID) {\n\t\treplica, ok := bc.mods.Configuration().Replica(id)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tpubKeys = append(pubKeys, replica.PublicKey().(*PublicKey))\n\t})\n\tps, err := bls12.NewG2().HashToCurve(hash[:], domain)\n\tif err != nil {\n\t\tbc.mods.Logger().Error(err)\n\t\treturn false\n\t}\n\tif len(pubKeys) < bc.mods.Configuration().QuorumSize() {\n\t\treturn false\n\t}\n\tengine := bls12.NewEngine()\n\tengine.AddPairInv(&bls12.G1One, &sig.sig)\n\tfor _, pub := range pubKeys {\n\t\tengine.AddPair(pub.p, ps)\n\t}\n\treturn engine.Result().IsOne()\n}",
"func (s *MockSignature) Verify(message string, sig []byte) (bool, error) {\n\treturn true, nil\n}",
"func VerifyRequestSignature(xml string, publicCert string) error {\n\treturn verify(xml, publicCert, xmlRequestID)\n}",
"func (g *GHClient) ValidateSignature(receivedHash []string, bodyBuffer []byte) error {\n\thash := hmac.New(sha1.New, []byte(g.GitHubSecret))\n\tif _, err := hash.Write(bodyBuffer); err != nil {\n\t\tmsg := fmt.Sprintf(\"Cannot compute the HMAC for request: %s\\n\", err)\n\t\treturn errors.New(msg)\n\t}\n\n\texpectedHash := hex.EncodeToString(hash.Sum(nil))\n\tif receivedHash[1] != expectedHash {\n\t\tmsg := fmt.Sprintf(\"Expected Hash does not match the received hash: %s\\n\", expectedHash)\n\t\treturn errors.New(msg)\n\t}\n\n\treturn nil\n}",
"func verifySignature(signed, signature io.Reader) bool {\n\tentities, err := openpgp.ReadArmoredKeyRing(bytes.NewReader(MustAsset(\"pubkey.gpg.asc\")))\n\tif err != nil {\n\t\tlog.Fatalf(\"%s\", err) // Shouldn't happen\n\t}\n\tsigner, err := openpgp.CheckArmoredDetachedSignature(entities, signed, signature)\n\tif err != nil {\n\t\tlog.Error(\"Bad signature: %s\", err)\n\t\treturn false\n\t}\n\tlog.Notice(\"Good signature from %s\", signer.Identities[identity].UserId.Email)\n\treturn true\n}",
"func VerifySlackSignature(rawBody []byte, request *http.Request) bool {\n\ttimestamp := request.Header.Get(SlackTimestampHeader)\n\tslackSignature := []byte(request.Header.Get(SlackSignatureHeader))\n\treturn VerifySigningSignature(timestamp, rawBody, slackSignature)\n}",
"func SignVerify(sig []byte, m []byte, ctx string, pk []byte) bool {\n\tCheckSize(sig, SignBytes, \"sign sig\")\n\tCheckCtx(ctx, SignContextBytes)\n\tCheckSize(pk, SignPublicKeyBytes, \"sign pk\")\n\tmlen := len(m)\n\tcCtx := []byte(ctx)\n\n\t// Returns 0 on success\n\texit := int(C.hydro_sign_verify(\n\t\t(*C.uint8_t)(&sig[0]),\n\t\tunsafe.Pointer(&m[0]),\n\t\t(C.size_t)(mlen),\n\t\t(*C.char)(unsafe.Pointer(&cCtx[0])),\n\t\t(*C.uint8_t)(&pk[0])))\n\n\treturn bool(exit == 0)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
AssetAndInfo loads and returns the asset and asset info for the given name. It returns an error if the asset could not be found or could not be loaded.
|
func AssetAndInfo(name string) ([]byte, os.FileInfo, error) {
a, ok := _bindata[filepath.ToSlash(name)]
if !ok {
return nil, nil, &os.PathError{Op: "open", Path: name, Err: os.ErrNotExist}
}
a.once.Do(func() {
fr := flate.NewReader(strings.NewReader(a.data))
var buf bytes.Buffer
if _, a.err = io.Copy(&buf, fr); a.err != nil {
return
}
if a.err = fr.Close(); a.err == nil {
a.bytes = buf.Bytes()
}
})
if a.err != nil {
return nil, nil, &os.PathError{Op: "read", Path: name, Err: a.err}
}
return a.bytes, a, nil
}
|
[
"func AssetInfo(name string) (os.FileInfo, error) {\n\treturn nil, errNoAssets\n}",
"func GetAssetByName(c *gin.Context) {\n\tnameID := c.Param(assetNameParam)\n\tasset, getErr := assetService.GetAssetByName(nameID)\n\tif getErr != nil {\n\t\tc.JSON(getErr.Status, getErr)\n\t} else {\n\t\tc.JSON(http.StatusOK, asset)\n\t}\n}",
"func FindAsset(path string) *Asset {\n\tfilename := TransformAssetPath(path)\n\tdata, err := ioutil.ReadFile(filename)\n\n\t// No asset found.\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\t// Creates and returns the asset\n\tvar asset *Asset = new(Asset)\n\tasset.Route = path\n\tasset.Filename = filename\n\tasset.Data = data\n\n\t// Sets the last modified date if readable.\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to read information on the file '%s'.\", filename)\n\t\treturn nil\n\t}\n\n\tstats, err := file.Stat()\n\tif err == nil {\n\t\tasset.LastModified = stats.ModTime().Format(DATE_FORMAT)\n\t} else {\n\t\tasset.LastModified = time.Now().Format(DATE_FORMAT)\n\t}\n\n\treturn asset\n}",
"func Asset(string) ([]byte, error) {\n\treturn nil, errNoAssets\n}",
"func AssetRead(asset *Asset, buf unsafe.Pointer, count uint32) int32 {\n\tcasset, _ := (*C.AAsset)(unsafe.Pointer(asset)), cgoAllocsUnknown\n\tcbuf, _ := buf, cgoAllocsUnknown\n\tccount, _ := (C.size_t)(count), cgoAllocsUnknown\n\t__ret := C.AAsset_read(casset, cbuf, ccount)\n\t__v := (int32)(__ret)\n\treturn __v\n}",
"func Open(name string) (File, error) {\n\treturn asset.Open(name)\n}",
"func RenderAsset(name string, data interface{}) (render.Render, error) {\n\td, err := Asset(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tt, err := template.New(name).Parse(string(d))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &render.HTML{Template: t, Data: data, Name: name}, nil\n}",
"func (c *Client) GetAsset(tag string) (Asset, error) {\n\treqURL := c.config.URL + \"/api/asset/\" + tag\n\treq, err := http.NewRequest(\"GET\", reqURL, nil)\n\tif err != nil {\n\t\treturn Asset{}, err\n\t}\n\treq.SetBasicAuth(c.config.User, c.config.Password)\n\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn Asset{}, err\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn Asset{}, errored.Errorf(\"failed to read response body. Error: %s\", err)\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn Asset{}, errored.Errorf(\"status code %d unexpected. Response body: %q\",\n\t\t\tresp.StatusCode, body)\n\t}\n\n\tlogrus.Debugf(\"response: %s\", body)\n\tcollinsResp := &struct {\n\t\tData struct {\n\t\t\tAsset Asset `json:\"ASSET\"`\n\t\t} `json:\"data\"`\n\t}{}\n\tif err := json.Unmarshal(body, collinsResp); err != nil {\n\t\treturn Asset{}, errored.Errorf(\"failed to unmarshal response. Error: %s\", err)\n\t}\n\n\tlogrus.Debugf(\"collins asset: %+v\", collinsResp.Data.Asset)\n\treturn collinsResp.Data.Asset, nil\n}",
"func getAsset(path string) ([]byte, error) {\n\tif opts.resourcePath != \"\" && fileExists(filepath.Join(opts.resourcePath, path)) {\n\t\treturn ioutil.ReadFile(filepath.Join(opts.resourcePath, path))\n\t}\n\n\treturn assetBox.Find(path)\n}",
"func (conn *MapConn) GetAsset(name string, asset *skydb.Asset) error {\n\tpanic(\"not implemented\")\n}",
"func (m *Client) GetAsset(symbolOrSlug string, options *GetAssetOptions) (*GetAssetResp, error) {\n\t// default options\n\topts := &GetAssetOptions{\n\t\tFields: nil,\n\t}\n\n\tif options != nil {\n\t\tif err := copier.CopyWithOption(opts, options, copier.Option{IgnoreEmpty: true}); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not copy options: %w\", err)\n\t\t}\n\t}\n\tquery := map[string][]string{}\n\n\tif opts.Fields != nil {\n\t\tquery[\"fields\"] = opts.Fields\n\t}\n\n\tresp, err := m.request(http.MethodGet, fmt.Sprintf(\"/api/v1/assets/%s\", symbolOrSlug), nil, query)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not make request: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"server returned Status %d instead of 200\", resp.StatusCode)\n\t}\n\n\tvar assetResp GetAssetResp\n\tif err := json.NewDecoder(resp.Body).Decode(&assetResp); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not unmarshal json from response body: %w\", err)\n\t}\n\n\treturn &assetResp, nil\n\n}",
"func (m *MockDb) GetAsset(assetID string) (*data.AssetInfo, error) {\n\tif !m.ShouldStatusBeCreated {\n\t\treturn &data.AssetInfo{\n\t\t\tName: assetID,\n\t\t\tURL: constants.MockURL,\n\t\t\tUploadStatus: constants.AssetStatusUploaded,\n\t\t}, m.Err\n\t}\n\n\treturn &data.AssetInfo{\n\t\tName: assetID,\n\t\tURL: constants.MockURL,\n\t\tUploadStatus: constants.AssetStatusCreated,\n\t}, m.Err\n}",
"func ValidateAsset(\n\tctx context.Context,\n\tasset string,\n) (*mint.AssetResource, error) {\n\ta, err := mint.AssetResourceFromName(ctx, asset)\n\tif err != nil {\n\t\treturn nil, errors.Trace(errors.NewUserErrorf(err,\n\t\t\t400, \"asset_invalid\",\n\t\t\t\"The asset you provided is invalid: %s.\",\n\t\t\tasset,\n\t\t))\n\t}\n\n\treturn a, nil\n}",
"func (s *storeImpl) Load(a asset.Asset) (asset.Asset, error) {\n\tfoundOnDisk, err := s.load(a, \"\")\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to load asset\")\n\t}\n\n\tif foundOnDisk.source == unfetched {\n\t\treturn nil, nil\n\t}\n\n\treturn s.assets[reflect.TypeOf(a)].asset, nil\n}",
"func (a *Asset) Name() string {\n\treturn a.name\n}",
"func (f *FileGroup) loadAsset(filename string) ([]byte, error) {\n\t// Check internal\n\tstoredAsset := f.assetDirectory[filename]\n\tif storedAsset != \"\" {\n\t\treturn DecompressHexString(storedAsset)\n\t}\n\n\t// Get caller directory\n\t_, file, _, _ := runtime.Caller(2)\n\tcallerDir := filepath.Dir(file)\n\n\t// Calculate full path\n\tfullFilePath := filepath.Join(callerDir, f.baseDirectory, filename)\n\n\treturn ioutil.ReadFile(fullFilePath)\n}",
"func (c *Context) createAsset(absPath string, info os.FileInfo) (*Asset, error) {\n\trawContent, err := c.loadAssetContent(absPath)\n\tif err != nil {\n\t\t/*fmt.Printf(\"failed to load asset content for %q\\n\", absPath)*/\n\t\treturn nil, err\n\t}\n\tcontent, dependencies := extractDependencies(rawContent)\n\n\tfor i, dep := range dependencies {\n\t\tif path.Ext(dep) == \"\" {\n\t\t\text := strings.Split(path.Base(absPath), \".\")[1]\n\t\t\tdependencies[i] = fmt.Sprintf(\"%s.%s\", dep, ext)\n\t\t}\n\t}\n\n\treturn &Asset{info, content, dependencies}, nil\n}",
"func (e *ExternalAssetService) Get(id int) (*ExternalAsset, *Response, error) {\n\tendpoint := fmt.Sprintf(\"/assets/external/%d?depth=complete\", id)\n\texternalAsset := &ExternalAsset{}\n\tresp, err := e.client.getRequestDecode(endpoint, externalAsset)\n\treturn externalAsset, resp, err\n}",
"func LoadAsset(env Environment) (err error) {\n\tvar path string\n\tswitch env {\n\tcase DTEnvironment:\n\t\tpath = assetDTPath\n\tcase QAEnvironment:\n\t\tpath = assetQAPath\n\tdefault:\n\t\treturn errors.New(\"not support environment\")\n\t}\n\tAssetClient, err = NewClient(\"Asset\", path)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func GetAssetByID(c *gin.Context) {\n\tnameID := c.Param(assetIDParam)\n\tasset, getErr := assetService.GetAssetByID(nameID)\n\tif getErr != nil {\n\t\tc.JSON(getErr.Status, getErr)\n\t} else {\n\t\tc.JSON(http.StatusOK, asset)\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
AssetNames returns the names of the assets.
|
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
|
[
"func (c *Converter) Assets() []Asset {\n\tlist := make([]Asset, 0, len(c.assets))\n\tfor _, a := range c.assets {\n\t\tlist = append(list, a)\n\t}\n\tsort.Sort(byName(list))\n\treturn list\n}",
"func Names() []string {\n\tnmMap := map[string]string{}\n\t// Get filesystem\n\tstatikFS, err := fs.New()\n\tif err != nil {\n\t\treturn []string{}\n\t}\n\t//Get precure names in files\n\tfor _, path := range getPaths(statikFS) {\n\t\tfor _, nm := range getNames(statikFS, path) {\n\t\t\tif len(nm) > 0 && !strings.HasSuffix(nm, \")\") {\n\t\t\t\tnmMap[nm] = nm\n\t\t\t}\n\t\t}\n\t}\n\t//Output name list\n\tnames := []string{}\n\tfor k, _ := range nmMap {\n\t\tnames = append(names, k)\n\t}\n\treturn names\n}",
"func (a *Assets) List() ([]*Asset, error) {\n\treq, err := http.NewRequest(\"GET\", a.BuildURL(a.themeBaseURL(), \"assets\"), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar assets []*Asset\n\tif err := a.RequestAndDecode(req, \"assets\", &assets); err != nil {\n\t\treturn nil, err\n\t}\n\treturn assets, nil\n}",
"func (c *Client) GetJobAssetFileNames(ctx context.Context, jobID string) ([]string, error) {\n\trequest, err := createListAssetsRequest(ctx, c.URL, c.Username, c.AccessKey, jobID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn doListAssetsRequest(c.HTTPClient, request)\n}",
"func (a *Asset) Name() string {\n\treturn a.name\n}",
"func (d *DefaultImplBundle) Names() []string {\n\tvar a []string\n\tfor _, tmpl := range d.t.Templates() {\n\t\ta = append(a, tmpl.Name())\n\t}\n\tsort.Strings(a)\n\treturn a\n}",
"func (o ProjectFeedOutput) AssetTypes() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *ProjectFeed) pulumi.StringArrayOutput { return v.AssetTypes }).(pulumi.StringArrayOutput)\n}",
"func Names() []string {\n\tvar names []string\n\n\tfor _, f := range Faces() {\n\t\tnames = utils.AppendIfMissing(names, f.Name)\n\t}\n\n\treturn names\n}",
"func (accs Accounts) Names() (names []string) {\n\tnames = make([]string, 0, len(accs))\n\n\tfor name := range accs {\n\t\tnames = append(names, name)\n\t}\n\n\tsort.Strings(names)\n\n\treturn\n}",
"func (r *RPMs) Names() []string {\n\tvar names []string\n\tfor _, rpm := range *r {\n\t\tnames = append(names, rpm.Name())\n\t}\n\n\treturn names\n}",
"func names() []string {\n\tonce.Do(load)\n\tnames, _ := nameCache.Load().([]string)\n\tif names == nil {\n\t\tnames = make([]string, 0, len(modMap))\n\t\tfor name := range modMap {\n\t\t\tnames = append(names, name)\n\t\t}\n\t\tsort.Strings(names)\n\t\tnameCache.Store(names)\n\t}\n\treturn names\n}",
"func (d *DeploymentsUpdated) Names() (names []string) {\n\tfor name := range *d {\n\t\tnames = append(names, name)\n\t}\n\treturn names\n}",
"func (o *Objects) Names() (s sort.StringSlice) {\n\tfor _, obj := range o.src {\n\t\ts = append(s, obj.name)\n\t}\n\tsort.Sort(s)\n\treturn s\n}",
"func (f *Files) Names() []string {\n\tvar names []string\n\tfor _, ff := range *f {\n\t\tnames = append(names, ff.Name())\n\t}\n\n\treturn names\n}",
"func (a Attributes) Names() []string {\n\tnames := make([]string, len(a))\n\ti := 0\n\tfor n := range a {\n\t\tnames[i] = n\n\t\ti++\n\t}\n\treturn names\n}",
"func (x *XferPipe) Names() []string {\n\tvar names = make([]string, x.Len())\n\tif x.Len() == 0 {\n\t\treturn names\n\t}\n\tfor i, filter := range x.filters {\n\t\tnames[i] = filter.Name()\n\t}\n\treturn names\n}",
"func (colibri *Colibri) ListNames() []string {\n\tnames := make([]string, len(colibri.cache))\n\n\ti := 0\n\tfor _, flower := range colibri.cache {\n\t\tnames[i] = flower.Container.Name\n\t\ti++\n\t}\n\n\treturn names\n}",
"func (t *Template) Names() []string {\n\tkeys := make([]string, len(t.templates))\n\tfor k := range t.templates {\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys\n}",
"func (d *Downloader) FileNames() []string {\n\tnames := make([]string, len(d.items))\n\tfor c, i := range d.items {\n\t\tnames[c] = i.name\n\t}\n\treturn names\n}",
"func (c *DeployomentCollection) GetNames() []string {\n\tvar names []string\n\tfor _, item := range c.Items {\n\t\tnames = append(names, item.Name)\n\t}\n\treturn names\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
RestoreAsset restores an asset under the given directory
|
func RestoreAsset(dir, name string) error {
return restore.Asset(dir, name, AssetAndInfo)
}
|
[
"func (s *Snapshot) Restore(ctx context.Context, a *ARC) error {\n\tcur, err := NewSnapshot(ctx, a)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := s.checkUsable(ctx, a, cur); err != nil {\n\t\treturn err\n\t}\n\tif err := s.restorePackages(ctx, a, cur); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (r Restorer) Restore() error {\n\tvar stashFiles []string\n\terr := filepath.Walk(r.path, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tif strings.HasSuffix(path, failpointStashFileSuffix) {\n\t\t\tstashFiles = append(stashFiles, path)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, filePath := range stashFiles {\n\t\toriginFileName := filePath[:len(filePath)-len(failpointStashFileSuffix)]\n\t\tif err := os.Remove(originFileName); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := os.Rename(filePath, originFileName); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (s *Snapshot) Restore(filename string) error {\n\tval, ok := s.table[filename]\n\tif !ok {\n\t\treturn errors.Errorf(\"snapshot of path %s not found\", filename)\n\t}\n\tif err := ioutil.WriteFile(filename, val.content, val.mode); err != nil {\n\t\treturn errors.Wrap(err, \"failed to restore file content\")\n\t}\n\t// Always set the permission again; necessary when the file exist already when calling WriteFile.\n\tif err := os.Chmod(filename, val.mode); err != nil {\n\t\treturn errors.Wrap(err, \"failed to restore file permission\")\n\t}\n\tif err := os.Chown(filename, val.uid, val.gid); err != nil {\n\t\treturn errors.Wrap(err, \"failed to restore file ownership\")\n\t}\n\treturn nil\n}",
"func (c *Client) UnarchiveAsset(asset *Asset) (unarchived *Asset, err error) {\n\tif asset == nil {\n\t\terr = fmt.Errorf(\"UnarchiveAsset failed. Asset cannot be nil!\")\n\t\treturn\n\t}\n\n\tif err = asset.Validate(); err != nil {\n\t\treturn\n\t}\n\n\tc.rl.Wait()\n\n\tunarchived = new(Asset)\n\tcontentfulError := new(Error)\n\tpath := fmt.Sprintf(\"spaces/%v/assets/%v/archived\", asset.Space.ID, asset.System.ID)\n\t_, err = c.sling.New().\n\t\tDelete(path).\n\t\tReceive(unarchived, contentfulError)\n\n\treturn unarchived, handleError(err, contentfulError)\n}",
"func (a *Asset) Unpack(reader io.Reader) error {\n\ta.resetDigest()\n\n\ttee := io.TeeReader(reader, a.digest.Hash())\n\n\tif a.virtual {\n\t\tif err := a.checkVirtualSymlink(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tf, err := os.Create(a.path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdefer f.Close()\n\n\t\tif _, err := io.Copy(f, tee); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tif err := checkDir(a.path, ErrInvalidAsset); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// FIXME there's probably a double-unarchive bug here.\n\t\terr := archive.Unpack(tee, a.path, &archive.TarOptions{NoLchown: os.Geteuid() != 0})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func restoreBackup(spec volumeSpec) error {\n\tvar source = spec.BackupURL\n\tglog.V(4).Infof(\"Restoring `%s` from backup\", spec.Name)\n\tif source == \"\" {\n\t\tglog.V(4).Infof(\"Backup url not found. Skipping.\")\n\t\treturn nil\n\t}\n\tglog.V(4).Infof(\"Downloading `%s`.\", source)\n\tbackupFile, err := getBackupFile(source)\n\tif err != nil {\n\t\tglog.V(4).Infof(\"Error, while downloading: %q\", err)\n\t\treturn err\n\t}\n\tdefer os.Remove(backupFile.fileName)\n\n\textract, err := getExtractor(source)\n\tif err != nil {\n\t\treturn err\n\t}\n\tglog.V(4).Infof(\"Extracting backup\")\n\terr = extract(backupFile, spec.Path)\n\tif err != nil {\n\t\tglog.V(4).Infof(\"Error, while extraction: %q\", err)\n\t\treturn err\n\t}\n\n\tif err := updatePermissions(spec.Path); err != nil {\n\t\tglog.V(4).Infof(\"Error, while chcon: %q\", err)\n\t\treturn err\n\t}\n\n\tglog.V(4).Infof(\"Restoring complete\")\n\treturn nil\n}",
"func (r *Repo) restoreDir(d files.Dir, destination string, buffer []byte) error {\n\tfor _, childFile := range d.Files {\n\t\tpkg.Log.Debugf(\"Restoring file %s in %s\", childFile.Name, destination)\n\t\tif err := utils.CopyFile(r.getPathInRepo(childFile), filepath.Join(destination, childFile.Name), buffer); err != nil {\n\t\t\tif pkg.OmitErrors {\n\t\t\t\tpkg.Log.Error(err.Error())\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, childDir := range d.Dirs {\n\t\tpkg.Log.Debugf(\"Restoring directory %s in %s\", childDir.Name, destination)\n\t\tchildPath := filepath.Join(destination, childDir.Name)\n\t\tif err := os.Mkdir(childPath, 0700); err != nil {\n\t\t\tif pkg.OmitErrors {\n\t\t\t\tpkg.Log.Errorf(\"Error restoring folder \\\"%s\\\": %s\\n\", childPath, err.Error())\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif err := r.restoreDir(childDir, childPath, buffer); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (c *Catalog) Restore(path string) error {\n\tvar records []*Record\n\n\tf, err := os.OpenFile(path, os.O_RDONLY, 0640)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\terr = gob.NewDecoder(f).Decode(&records)\n\tif err == io.EOF {\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn err\n\t}\n\n\tfor _, r := range records {\n\t\tc.Insert(r)\n\t}\n\n\treturn nil\n}",
"func mustRestore(baseDir string, assets map[string][]byte, mappings map[string]string) {\n\tfor basename, data := range assets {\n\t\tif mappings != nil {\n\t\t\treplacement := mappings[basename]\n\t\t\tif replacement != \"\" {\n\t\t\t\tbasename = replacement\n\t\t\t}\n\t\t}\n\t\tfilename := path.Join(baseDir, basename)\n\t\tdirname := path.Dir(filename)\n\t\tif err := os.MkdirAll(dirname, os.ModePerm); err != nil {\n\t\t\tlog.Fatalf(\"Failed to create asset dir %s: %v\", dirname, err)\n\t\t}\n\n\t\tif err := ioutil.WriteFile(filename, data, os.ModePerm); err != nil {\n\t\t\tlog.Fatalf(\"Failed to write asset %s: %v\", filename, err)\n\t\t}\n\t}\n}",
"func Restore(ctx context.Context, build cib.Service, analyzed llb.State, cache llb.RunOption) llb.State {\n\t// Execute restorer\n\t// See https://github.com/buildpacks/spec/blob/main/platform.md#restorer\n\treturn analyzed.Run(\n\t\tllb.Args([]string{\"/cnb/lifecycle/restorer\"}),\n\t\tllb.WithCustomName(\"Restore\"),\n\t\tcache,\n\t).Root()\n}",
"func (e *extension) Restore(ctx context.Context, shootState *gardencorev1alpha1.ShootState) error {\n\tfns := e.forEach(func(ctx context.Context, ext *extensionsv1alpha1.Extension, extType string, providerConfig *runtime.RawExtension, _ time.Duration) error {\n\t\treturn extensions.RestoreExtensionWithDeployFunction(\n\t\t\tctx,\n\t\t\te.client,\n\t\t\tshootState,\n\t\t\textensionsv1alpha1.ExtensionResource,\n\t\t\tfunc(ctx context.Context, operationAnnotation string) (extensionsv1alpha1.Object, error) {\n\t\t\t\treturn e.deploy(ctx, ext, extType, providerConfig, operationAnnotation)\n\t\t\t},\n\t\t)\n\t})\n\n\treturn flow.Parallel(fns...)(ctx)\n}",
"func (e *extension) Restore(ctx context.Context, shootState *gardencorev1alpha1.ShootState) error {\n\tfns := e.forEach(func(ctx context.Context, extension extensionsv1alpha1.Extension, _ time.Duration) error {\n\t\tdeployer := &deployer{e.client, extension}\n\n\t\treturn common.RestoreExtensionWithDeployFunction(\n\t\t\tctx,\n\t\t\te.client,\n\t\t\tshootState,\n\t\t\textensionsv1alpha1.ExtensionResource,\n\t\t\te.values.Namespace,\n\t\t\tdeployer.deploy,\n\t\t)\n\t})\n\n\treturn flow.Parallel(fns...)(ctx)\n}",
"func decryptAndRestore(source string, destination string) {\n\n\tfileName := extractName(source)\n\tif fileName != \".meta\" {\n\t\tdecrypted := decryptFile(source)\n\t\twriteToDisk(destination+fileName, decrypted)\n\t}\n}",
"func (d *zfs) RestoreVolume(vol Volume, snapshotName string, op *operations.Operation) error {\n\tsnapVol := NewVolume(d, d.name, vol.volType, vol.contentType, fmt.Sprintf(\"%s/%s\", vol.name, snapshotName), vol.config, vol.poolConfig)\n\n\t// Get the list of snapshots.\n\tentries, err := d.getDatasets(d.dataset(vol, false))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Check if more recent snapshots exist.\n\tidx := -1\n\tsnapshots := []string{}\n\tfor i, entry := range entries {\n\t\tif entry == fmt.Sprintf(\"@snapshot-%s\", snapshotName) {\n\t\t\t// Located the current snapshot.\n\t\t\tidx = i\n\t\t\tcontinue\n\t\t} else if idx < 0 {\n\t\t\t// Skip any previous snapshot.\n\t\t\tcontinue\n\t\t}\n\n\t\tif strings.HasPrefix(entry, \"@snapshot-\") {\n\t\t\t// Located a normal snapshot following ours.\n\t\t\tsnapshots = append(snapshots, strings.TrimPrefix(entry, \"@snapshot-\"))\n\t\t\tcontinue\n\t\t}\n\n\t\tif strings.HasPrefix(entry, \"@\") {\n\t\t\t// Located an internal snapshot.\n\t\t\treturn fmt.Errorf(\"Snapshot %q cannot be restored due to subsequent internal snapshot(s) (from a copy)\", snapshotName)\n\t\t}\n\t}\n\n\t// Check if snapshot removal is allowed.\n\tif len(snapshots) > 0 {\n\t\tif shared.IsFalseOrEmpty(vol.ExpandedConfig(\"zfs.remove_snapshots\")) {\n\t\t\treturn fmt.Errorf(\"Snapshot %q cannot be restored due to subsequent snapshot(s). Set zfs.remove_snapshots to override\", snapshotName)\n\t\t}\n\n\t\t// Setup custom error to tell the backend what to delete.\n\t\terr := ErrDeleteSnapshots{}\n\t\terr.Snapshots = snapshots\n\t\treturn err\n\t}\n\n\t// Restore the snapshot.\n\t_, err = shared.RunCommand(\"zfs\", \"rollback\", d.dataset(snapVol, false))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vol.contentType == ContentTypeFS && d.isBlockBacked(vol) && renegerateFilesystemUUIDNeeded(vol.ConfigBlockFilesystem()) {\n\t\t_, err = d.activateVolume(vol)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdefer func() { _, _ = d.deactivateVolume(vol) }()\n\n\t\tvolPath, err := d.GetVolumeDiskPath(vol)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\td.logger.Debug(\"Regenerating filesystem UUID\", logger.Ctx{\"dev\": volPath, \"fs\": vol.ConfigBlockFilesystem()})\n\t\terr = regenerateFilesystemUUID(vol.ConfigBlockFilesystem(), volPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// For VM images, restore the associated filesystem dataset too.\n\tif vol.IsVMBlock() {\n\t\tfsVol := vol.NewVMBlockFilesystemVolume()\n\t\terr := d.RestoreVolume(fsVol, snapshotName, op)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (service *AssetsService) Unarchive(spaceID string, asset *Asset) error {\n\tpath := fmt.Sprintf(\"/spaces/%s/assets/%s/archived\", spaceID, asset.Sys.ID)\n\tmethod := \"DELETE\"\n\n\treq, err := service.c.newRequest(method, path, nil, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tversion := strconv.Itoa(asset.Sys.Version)\n\treq.Header.Set(\"X-Contentful-Version\", version)\n\n\treturn service.c.do(req, asset)\n}",
"func (s *Sandbox) Restore() error {\n\tss, _, err := s.newStore.FromDisk(s.id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.loadState(ss)\n\ts.loadHypervisor(ss.HypervisorState)\n\ts.loadDevices(ss.Devices)\n\ts.loadAgent(ss.AgentState)\n\ts.loadNetwork(ss.Network)\n\treturn nil\n}",
"func RestoreGitFolder(repositoryFullPath string, sourceRepositoryName string) error {\n\n\tif !helper.IsDirExists(repositoryFullPath + \"/source/\" + sourceRepositoryName + \"/.git\") {\n\t\terr := helper.CreateNewDir(repositoryFullPath + \"/source/\" + sourceRepositoryName + \"/.git\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr := helper.CopyDirContent(repositoryFullPath+\"/tmp/.git\", repositoryFullPath+\"/source/\"+sourceRepositoryName+\"/.git\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (c *Container) Restore() error {\n\t_, css, err := c.sandbox.newStore.FromDisk(c.sandbox.id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcs, ok := css[c.id]\n\tif !ok {\n\t\treturn errContainerPersistNotExist\n\t}\n\n\tc.loadContState(cs)\n\tc.loadContDevices(cs)\n\tc.loadContProcess(cs)\n\tc.loadContMounts(cs)\n\treturn nil\n}",
"func (a *actuator) Restore(ctx context.Context, log logr.Logger, infrastructure *extensionsv1alpha1.Infrastructure, cluster *extensionscontroller.Cluster) error {\n\tflowState, err := a.getStateFromInfraStatus(infrastructure)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif flowState != nil {\n\t\treturn a.reconcileWithFlow(ctx, log, infrastructure, flowState)\n\t}\n\tif a.shouldUseFlow(infrastructure, cluster) {\n\t\tflowState, err = a.migrateFromTerraformerState(ctx, log, infrastructure)\n\t\tif err != nil {\n\t\t\treturn util.DetermineError(err, helper.KnownCodes)\n\t\t}\n\t\treturn a.reconcileWithFlow(ctx, log, infrastructure, flowState)\n\t}\n\treturn a.restoreWithTerraformer(ctx, log, infrastructure)\n}",
"func downloadAsset(uri string) (localfile string, err error) {\n\tbasename := path.Base(uri)\n\tfileExt := path.Ext(basename)\n\n\t// We'll be appending 65 chars to create a local file name for the asset,\n\t// this 60-char limit prevents creating a file name longer than 255 chars. We\n\t// could allow a few more characters until 255 but 60 sounds like a sane\n\t// limit.\n\tif len(basename) > 60 {\n\t\tbasename = basename[:60]\n\t}\n\n\tlocalfile = assetsDirectory + fmt.Sprintf(\"%s.%x\", basename, sha256.Sum256([]byte(uri)))\n\n\tif !fileExists(localfile) {\n\t\tvar body io.Reader\n\t\tvar res *http.Response\n\n\t\tif res, err = http.Get(uri); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif res.StatusCode != http.StatusOK {\n\t\t\treturn \"\", fmt.Errorf(\"Expecting 200 OK, got: %s\", res.Status)\n\t\t}\n\n\t\tdefer res.Body.Close()\n\n\t\tvar fp *os.File\n\n\t\tif fp, err = os.Create(localfile); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tdefer fp.Close()\n\n\t\tif fileExt == \".bz2\" {\n\t\t\tbody = bzip2.NewReader(res.Body)\n\t\t} else {\n\t\t\tbody = res.Body\n\t\t}\n\n\t\tif _, err = io.Copy(fp, body); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t}\n\n\treturn localfile, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
placeholders returns a string with count ? placeholders joined with commas.
|
func placeholders(cql *bytes.Buffer, count int) {
if count < 1 {
return
}
for i := 0; i < count-1; i++ {
cql.WriteByte('?')
cql.WriteByte(',')
}
cql.WriteByte('?')
}
|
[
"func replacePlaceholders(input string) string {\n\tif input == \"\" {\n\t\treturn input\n\t}\n\n\tvar sb strings.Builder\n\tpCount := 1\n\tvar i int\n\tfor {\n\t\ti = strings.IndexRune(input, '?')\n\t\tif i == -1 {\n\t\t\tsb.WriteString(input)\n\t\t\tbreak\n\t\t} else {\n\t\t\t// Found a ?\n\t\t\tsb.WriteString(input[0:i])\n\t\t\tsb.WriteString(\"@p\")\n\t\t\tsb.WriteString(strconv.Itoa(pCount))\n\t\t\tpCount++\n\t\t\tif i == len(input)-1 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tinput = input[i+1:]\n\t\t}\n\t}\n\n\treturn sb.String()\n}",
"func replacePlaceholders(query string) string {\n\tbuf := &strings.Builder{}\n\ti := 0\n\tfor pos := strings.Index(query, \"?\"); pos >= 0; pos = strings.Index(query, \"?\") {\n\t\ti++\n\t\tbuf.WriteString(query[:pos] + \"$\" + strconv.Itoa(i))\n\t\tquery = query[pos+1:]\n\t}\n\tbuf.WriteString(query)\n\treturn buf.String()\n}",
"func writePlaceholders(buf *bytes.Buffer, length int, join string, offset int) {\n\tfor i := 0; i < length; i++ {\n\t\tif i > 0 {\n\t\t\tbuf.WriteString(join)\n\t\t}\n\t\twritePlaceholder(buf, i+offset)\n\t}\n}",
"func joinPlaceholders(length int, join string, offset int) string {\n\tbuf := bufPool.Get()\n\tdefer bufPool.Put(buf)\n\n\tfor i := 0; i < length; i++ {\n\t\tif i > 0 {\n\t\t\tbuf.WriteString(join)\n\t\t}\n\t\twritePlaceholder(buf, i+offset)\n\t}\n\treturn buf.String()\n}",
"func GenerateNamedPlaceholders(fields []string) string {\n\tvar placeholders string\n\tfor _, field := range fields {\n\t\tplaceholders += fmt.Sprintf(\":%s, \", field)\n\t}\n\treturn strings.TrimSuffix(placeholders, \", \")\n}",
"func Interpolate(sql string, vals []interface{}) (string, error) {\n\t// Get the number of arguments to add to this query\n\tmaxVals := len(vals)\n\n\t// If our query is blank and has no args return early\n\t// Args with a blank query is an error\n\tif sql == \"\" {\n\t\tif maxVals != 0 {\n\t\t\treturn \"\", ErrArgumentMismatch\n\t\t}\n\t\treturn \"\", nil\n\t}\n\n\t// If we have no args and the query has no place holders return early\n\t// No args for a query with place holders is an error\n\tif len(vals) == 0 {\n\t\tfor _, c := range sql {\n\t\t\tif c == '?' {\n\t\t\t\treturn \"\", ErrArgumentMismatch\n\t\t\t}\n\t\t}\n\t\treturn sql, nil\n\t}\n\n\t// Iterate over each rune in the sql string and replace with the next arg if it's a place holder\n\tcurVal := 0\n\tbuf := bytes.Buffer{}\n\n\tfor _, r := range sql {\n\t\tif r != '?' {\n\t\t\tbuf.WriteRune(r)\n\t\t} else if r == '?' && curVal < maxVals {\n\t\t\tv := vals[curVal]\n\n\t\t\tvaluer, ok := v.(driver.Valuer)\n\t\t\tif ok {\n\t\t\t\tval, err := valuer.Value()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\t\t\t\tv = val\n\t\t\t}\n\n\t\t\tvalueOfV := reflect.ValueOf(v)\n\t\t\tkindOfV := valueOfV.Kind()\n\n\t\t\tif v == nil {\n\t\t\t\tbuf.WriteString(\"NULL\")\n\t\t\t} else if isInt(kindOfV) {\n\t\t\t\tvar ival = valueOfV.Int()\n\n\t\t\t\tbuf.WriteString(strconv.FormatInt(ival, 10))\n\t\t\t} else if isUint(kindOfV) {\n\t\t\t\tvar uival = valueOfV.Uint()\n\n\t\t\t\tbuf.WriteString(strconv.FormatUint(uival, 10))\n\t\t\t} else if kindOfV == reflect.String {\n\t\t\t\tvar str = valueOfV.String()\n\n\t\t\t\tif !utf8.ValidString(str) {\n\t\t\t\t\treturn \"\", ErrNotUTF8\n\t\t\t\t}\n\n\t\t\t\tbuf.WriteString(escapeAndQuoteString(str))\n\t\t\t} else if isFloat(kindOfV) {\n\t\t\t\tvar fval = valueOfV.Float()\n\n\t\t\t\tbuf.WriteString(strconv.FormatFloat(fval, 'f', -1, 64))\n\t\t\t} else if kindOfV == reflect.Bool {\n\t\t\t\tvar bval = valueOfV.Bool()\n\n\t\t\t\tif bval {\n\t\t\t\t\tbuf.WriteRune('1')\n\t\t\t\t} else {\n\t\t\t\t\tbuf.WriteRune('0')\n\t\t\t\t}\n\t\t\t} else if kindOfV == reflect.Struct {\n\t\t\t\tif typeOfV := valueOfV.Type(); typeOfV == typeOfTime {\n\t\t\t\t\tt := valueOfV.Interface().(time.Time)\n\t\t\t\t\tbuf.WriteString(escapeAndQuoteString(t.UTC().Format(timeFormat)))\n\t\t\t\t} else {\n\t\t\t\t\treturn \"\", ErrInvalidValue\n\t\t\t\t}\n\t\t\t} else if kindOfV == reflect.Slice {\n\t\t\t\ttypeOfV := reflect.TypeOf(v)\n\t\t\t\tsubtype := typeOfV.Elem()\n\t\t\t\tkindOfSubtype := subtype.Kind()\n\n\t\t\t\tsliceLen := valueOfV.Len()\n\t\t\t\tstringSlice := make([]string, 0, sliceLen)\n\n\t\t\t\tif sliceLen == 0 {\n\t\t\t\t\treturn \"\", ErrInvalidSliceLength\n\t\t\t\t} else if isInt(kindOfSubtype) {\n\t\t\t\t\tfor i := 0; i < sliceLen; i++ {\n\t\t\t\t\t\tvar ival = valueOfV.Index(i).Int()\n\t\t\t\t\t\tstringSlice = append(stringSlice, strconv.FormatInt(ival, 10))\n\t\t\t\t\t}\n\t\t\t\t} else if isUint(kindOfSubtype) {\n\t\t\t\t\tfor i := 0; i < sliceLen; i++ {\n\t\t\t\t\t\tvar uival = valueOfV.Index(i).Uint()\n\t\t\t\t\t\tstringSlice = append(stringSlice, strconv.FormatUint(uival, 10))\n\t\t\t\t\t}\n\t\t\t\t} else if kindOfSubtype == reflect.String {\n\t\t\t\t\tfor i := 0; i < sliceLen; i++ {\n\t\t\t\t\t\tvar str = valueOfV.Index(i).String()\n\t\t\t\t\t\tif !utf8.ValidString(str) {\n\t\t\t\t\t\t\treturn \"\", ErrNotUTF8\n\t\t\t\t\t\t}\n\t\t\t\t\t\tstringSlice = append(stringSlice, escapeAndQuoteString(str))\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\treturn \"\", ErrInvalidSliceValue\n\t\t\t\t}\n\t\t\t\tbuf.WriteRune('(')\n\t\t\t\tbuf.WriteString(strings.Join(stringSlice, \",\"))\n\t\t\t\tbuf.WriteRune(')')\n\t\t\t} else {\n\t\t\t\treturn \"\", ErrInvalidValue\n\t\t\t}\n\n\t\t\tcurVal++\n\t\t} else {\n\t\t\treturn \"\", ErrArgumentMismatch\n\t\t}\n\t}\n\n\tif curVal != maxVals {\n\t\treturn \"\", ErrArgumentMismatch\n\t}\n\n\treturn buf.String(), nil\n}",
"func (d *sqlDialect) placeHolderSQL(b sb.SQLBuilder, i interface{}) {\n\tb.WriteRunes(d.dialectOptions.PlaceHolderRune)\n\tif d.dialectOptions.IncludePlaceholderNum {\n\t\tb.WriteStrings(strconv.FormatInt(int64(b.CurrentArgPosition()), 10))\n\t}\n\tb.WriteArg(i)\n}",
"func questionToDollarPlaceholders(buf *strings.Builder, query string) {\n\ti := 0\n\tfor {\n\t\tp := strings.Index(query, \"?\")\n\t\tif p < 0 {\n\t\t\tbreak\n\t\t}\n\t\tbuf.WriteString(query[:p])\n\t\tif len(query[p:]) > 1 && query[p:p+2] == \"??\" {\n\t\t\tbuf.WriteString(\"?\")\n\t\t\tquery = query[p+2:]\n\t\t} else {\n\t\t\ti++\n\t\t\tbuf.WriteString(\"$\" + strconv.Itoa(i))\n\t\t\tquery = query[p+1:]\n\t\t}\n\t}\n\tbuf.WriteString(query)\n}",
"func BuildMultiColumnsPlaceholderStrWithKey(strs []string, key string) string {\n\tif len(strs) == 0 {\n\t\treturn \"\"\n\t}\n\tvar holderStrSlice []string\n\tfor _, rawparam := range strs {\n\t\tbuffer := bytes.NewBufferString(\"\")\n\t\tbuffer.WriteByte('(')\n\t\tsplittedParam := strings.Split(rawparam, key)\n\t\tvar tmpHolders []string\n\t\tfor range splittedParam {\n\t\t\ttmpHolders = append(tmpHolders, \"?\")\n\t\t}\n\t\tbuffer.WriteString(strings.Join(tmpHolders, \",\"))\n\t\tbuffer.WriteByte(')')\n\t\tholderStrSlice = append(holderStrSlice, buffer.String())\n\t}\n\tholderStr := strings.Join(holderStrSlice, \",\")\n\treturn holderStr\n}",
"func (s ShortLinkSQL) composeParamList(numParams int) string {\n\tparams := make([]string, 0, numParams)\n\tfor i := 0; i < numParams; i++ {\n\t\tparams = append(params, fmt.Sprintf(\"$%d\", i+1))\n\t}\n\n\tparameterStr := strings.Join(params, \", \")\n\treturn parameterStr\n}",
"func formatString(numKeysAndValues int) string {\n\tvar sb strings.Builder\n\n\tsb.WriteString(\"%s\")\n\n\tif numKeysAndValues > 0 {\n\t\tsb.WriteString(\", \")\n\t}\n\n\tfor i := 0; i < numKeysAndValues/2; i++ {\n\t\tif i > 0 {\n\t\t\tsb.WriteString(\", \")\n\t\t}\n\n\t\tsb.WriteString(\"%v=%v\")\n\t}\n\n\treturn sb.String()\n}",
"func resolvePlaceholders(tokenInfo m.TokenPayload) (resolvedPlaceholder ResolvedPlaceholder) {\n\n\tvar selectField string\n\tvar placeholderValues []interface{}\n\n\tif len(tokenInfo.Placeholders) == 0 {\n\t\t// No placeholders.\n\t\tselectField = \"text\"\n\t} else {\n\t\t// There is a collection of placeholders in the request\n\t\tsqlSelectPlaceholder := \"\"\n\t\t// Iterate over the placeholders:\n\t\t// Concatenate the final sql request with bind variables\n\t\t// Keep collecting the values that will be applied to the query\n\t\tfor i, ph := range tokenInfo.Placeholders {\n\t\t\tif i == 0 {\n\t\t\t\tsqlSelectPlaceholder = \"replace (text, ?, ?)\"\n\t\t\t\tplaceholderValues = append(placeholderValues, \"{\"+ph.Name+\"}\", ph.Value)\n\t\t\t} else {\n\t\t\t\tsqlSelectPlaceholder = \"replace(\" + sqlSelectPlaceholder + \", ?, ?)\"\n\t\t\t\tplaceholderValues = append(placeholderValues, \"{\"+ph.Name+\"}\", ph.Value)\n\t\t\t}\n\t\t}\n\t\tselectField = sqlSelectPlaceholder + \" as text\"\n\t}\n\n\treturn ResolvedPlaceholder{selectField, placeholderValues}\n}",
"func questionInterpolate(query string, args ...interface{}) string {\n\tbuf := &strings.Builder{}\n\t// i is the position of the ? in the query\n\tfor i := strings.Index(query, \"?\"); i >= 0 && len(args) > 0; i = strings.Index(query, \"?\") {\n\t\tbuf.WriteString(query[:i])\n\t\tif len(query[i:]) > 1 && query[i:i+2] == \"??\" {\n\t\t\tbuf.WriteString(\"?\")\n\t\t\tquery = query[i+2:]\n\t\t\tcontinue\n\t\t}\n\t\tbuf.WriteString(interpolateSQLValue(args[0]))\n\t\tquery = query[i+1:]\n\t\targs = args[1:]\n\t}\n\tbuf.WriteString(query)\n\treturn buf.String()\n}",
"func (s *Session) ReplacePlaceholders(query string) (string, error) {\n\tvar format sq.PlaceholderFormat = sq.Question\n\n\tif s.DB.DriverName() == \"postgres\" {\n\t\tformat = sq.Dollar\n\t}\n\treturn format.ReplacePlaceholders(query)\n}",
"func formatters(n int) string {\n\tf := []string{}\n\tfor i := 0; i < n; i++ {\n\t\tf = append(f, \"%#v\")\n\t}\n\treturn strings.Join(f, \", \")\n}",
"func (idx PlaceholderIdx) String() string {\n\treturn fmt.Sprintf(\"$%d\", idx+1)\n}",
"func fillPlaceholders(value string, opts *ApiOptions) (string, error) {\n\tvar err error\n\treturn placeholderRE.ReplaceAllStringFunc(value, func(m string) string {\n\t\tvar name string\n\t\tif m[0] == ':' {\n\t\t\tname = m[1:]\n\t\t} else {\n\t\t\tname = m[1 : len(m)-1]\n\t\t}\n\n\t\tswitch name {\n\t\tcase \"owner\":\n\t\t\tif baseRepo, e := opts.BaseRepo(); e == nil {\n\t\t\t\treturn baseRepo.RepoOwner()\n\t\t\t} else {\n\t\t\t\terr = e\n\t\t\t}\n\t\tcase \"repo\":\n\t\t\tif baseRepo, e := opts.BaseRepo(); e == nil {\n\t\t\t\treturn baseRepo.RepoName()\n\t\t\t} else {\n\t\t\t\terr = e\n\t\t\t}\n\t\tcase \"branch\":\n\t\t\tif os.Getenv(\"GH_REPO\") != \"\" {\n\t\t\t\terr = errors.New(\"unable to determine an appropriate value for the 'branch' placeholder\")\n\t\t\t\treturn m\n\t\t\t}\n\n\t\t\tif branch, e := opts.Branch(); e == nil {\n\t\t\t\treturn branch\n\t\t\t} else {\n\t\t\t\terr = e\n\t\t\t}\n\t\t}\n\t\treturn m\n\t}), err\n}",
"func (format *Format) GetPlaceholders() map[string]Placeholder {\n\treturn format.placeholders\n}",
"func fillPlaceholders(givenFlags, requiredFlags []string) ([]string, error) {\n\tvar ret []string\n\tfor _, required := range requiredFlags {\n\t\tif !IsPlaceholder(required) {\n\t\t\tret = append(ret, required)\n\t\t\tcontinue\n\t\t}\n\n\t\tnewRequest := strings.Replace(required, \"$optional(\", \"$(\", 1)\n\n\t\t// look for a match\n\t\tfor _, given := range givenFlags {\n\t\t\tkey := newRequest[2 : len(newRequest)-1]\n\t\t\tif res, match := TryMatch(key, given); match {\n\t\t\t\tret = append(ret, res)\n\t\t\t\tgoto Continue\n\t\t\t}\n\t\t}\n\n\t\tif strings.Contains(required, \"$optional(\") {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn nil, errors.New(fmt.Sprintf(\"placeholder definition \\\"%s\\\" unfilled in \", required) + \"%s\")\n\n\tContinue:\n\t\tcontinue\n\t}\n\treturn ret, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
FindMessages to list the messages
|
func FindMessages() ([]entity.Message, error) {
DB, errDB := db.GetDB()
if errDB != nil {
fmt.Println("Error of connecting db")
}
var messages []entity.Message
results, queryErr := DB.Query("SELECT * FROM messages")
if queryErr != nil {
fmt.Println("error while fethings hours list", queryErr)
}
for results.Next() {
var (
ID int
Text string
Sender string
)
err3 := results.Scan(&ID, &Text, &Sender)
if err3 != nil {
fmt.Println(err3.Error())
} else {
message := entity.Message{ID: ID, Text: Text, Sender: Sender}
messages = append(messages, message)
}
}
defer DB.Close()
return messages, nil
}
|
[
"func (p *MessageService) MessagesFind(message core.Message) []core.Message {\n\tif message.SenderId != \"\" {\n\t\treturn p.db.FindMessages(bson.D{{\"SenderId\", message.SenderId}})\n\t} else if message.ReceiverId != \"\" {\n\t\treturn p.db.FindMessages(bson.D{{\"ReceiverId\", message.ReceiverId}})\n\t}\n\t// return p.db.FindMessages(bson.D{{}}) ----When logic for group messages is introduced, might reintroduce\n\t//new message find filter cases\n\treturn []core.Message{}\n}",
"func ListMessages(criteria *tat.MessageCriteria, username string, topic tat.Topic) ([]tat.Message, error) {\n\tc, errc := buildMessageCriteria(criteria)\n\tif errc != nil {\n\t\treturn []tat.Message{}, errc\n\t}\n\n\tvar messages []tat.Message\n\tvar err error\n\n\t//set Default criteria.TreeView as notree\n\tif criteria.TreeView == \"\" {\n\t\tcriteria.TreeView = tat.TreeViewNoTree\n\t}\n\n\tvar isInCache bool\n\n\tmessages, isInCache, err = messageListFromCache(criteria, &topic)\n\tif err != nil {\n\t\tlog.Errorf(\"Error while Find All Messages %s from cache\", err)\n\t}\n\n\tif isInCache {\n\t\treturn messages, err\n\t}\n\n\tif criteria.Limit > 500 {\n\t\tlog.Warnf(\"ListMessages: criteriaLimitWarn: criteria with limit more than 500 (%d), username:%s topic:%s criteria:%s\",\n\t\t\tcriteria.Limit, username, topic.Topic, criteria.GetURL())\n\t} else if criteria.Limit > 50 {\n\t\tlog.Warnf(\"ListMessages: criteriaLimitNotice: criteria with limit more than 50 (%d), username:%s topic:%s criteria:%s\",\n\t\t\tcriteria.Limit, username, topic.Topic, criteria.GetURL())\n\t}\n\n\terr = store.GetCMessages(topic.Collection).Find(c).\n\t\tSort(criteria.SortBy).\n\t\tSkip(criteria.Skip).\n\t\tLimit(criteria.Limit).\n\t\tAll(&messages)\n\tif err != nil {\n\t\tlog.Errorf(\"Error while Find All Messages by username:%s with criterias:%s on topic:%s, err:%s\", username, criteria.GetURL(), topic.Topic, err)\n\t\treturn messages, err\n\t}\n\n\tif len(messages) == 0 {\n\t\tcacheMessageList(criteria, &topic, messages)\n\t\treturn messages, nil\n\t}\n\n\tif criteria.TreeView == tat.TreeViewOneTree || criteria.TreeView == tat.TreeViewFullTree {\n\t\tmessages, err = initTree(messages, criteria, username, topic)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error while Find All Messages (getTree) by username:%s with criterias:%s on topic:%s, err:%s\", username, criteria.GetURL(), topic.Topic, err)\n\t\t}\n\t}\n\tif criteria.TreeView == tat.TreeViewOneTree {\n\t\tmessages, err = OneTreeMessages(messages, 1, criteria, username, topic)\n\t} else if criteria.TreeView == tat.TreeViewFullTree {\n\t\tmessages, err = FullTreeMessages(messages, 1, criteria, username, topic)\n\t}\n\tif err != nil {\n\t\treturn messages, err\n\t}\n\n\tif criteria.TreeView == tat.TreeViewOneTree &&\n\t\t(criteria.LimitMinNbReplies != \"\" || criteria.LimitMaxNbReplies != \"\") {\n\t\treturn filterNbReplies(messages, criteria)\n\t}\n\n\tcacheMessageList(criteria, &topic, messages)\n\n\treturn messages, err\n}",
"func (c *Client) SearchMessages(ctx context.Context, request *SearchMessagesRequest) (*FoundMessages, error) {\n\tvar result FoundMessages\n\n\tif err := c.rpc.Invoke(ctx, request, &result); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &result, nil\n}",
"func (s *ChatServer) SearchMessages(ctx context.Context, req *chat.SearchMessagesRequest) (*chat.MessagesResponse, error) {\n\treturn nil, nil\n}",
"func (maildir *Maildir) List(start, limit int) ([]data.Message, error) {\n\tlog.Println(\"Listing messages in\", maildir.Path)\n\tmessages := make([]data.Message, 0)\n\n\tdir, err := os.Open(maildir.Path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer dir.Close()\n\n\tn, err := dir.Readdir(0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsort.Slice(n, func(i, j int) bool {\n\t\treturn !n[i].ModTime().Before(n[j].ModTime())\n\t})\n\n\tif start < 0 {\n\t\tstart = 0\n\t}\n\n\tif len(n) < start+limit {\n\t\tlimit = len(n) - start\n\t}\n\n\tif len(n) > start && start > 0 {\n\t\tn = n[start:]\n\t}\n\n\tif len(n) > limit {\n\t\tn = n[:limit]\n\t}\n\n\tfor _, fileinfo := range n {\n\t\tb, err := ioutil.ReadFile(filepath.Join(maildir.Path, fileinfo.Name()))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmsg := data.FromBytes(b)\n\t\tm := msg.Parse(maildir.Hostname)\n\t\tm.ID = data.MessageID(fileinfo.Name())\n\t\tm.Created = fileinfo.ModTime()\n\t\tmessages = append(messages, *m)\n\t}\n\n\tlog.Printf(\"Found %d messages\", len(messages))\n\treturn messages, nil\n}",
"func (maildir *Maildir) Search(kind, query string, start, limit int) ([]data.Message, int, error) {\n\tquery = strings.ToLower(query)\n\tvar filteredMessages = make([]data.Message, 0)\n\n\tvar matched int\n\n\terr := filepath.Walk(maildir.Path, func(path string, info os.FileInfo, err error) error {\n\t\tif limit > 0 && len(filteredMessages) >= limit {\n\t\t\treturn errors.New(\"reached limit\")\n\t\t}\n\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\tmsg, err := maildir.Load(info.Name())\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn nil\n\t\t}\n\n\t\tswitch kind {\n\t\tcase \"to\":\n\t\t\tfor _, t := range msg.To {\n\t\t\t\tif strings.Contains(strings.ToLower(t.Mailbox+\"@\"+t.Domain), query) {\n\t\t\t\t\tif start > matched {\n\t\t\t\t\t\tmatched++\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tfilteredMessages = append(filteredMessages, *msg)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"from\":\n\t\t\tif strings.Contains(strings.ToLower(msg.From.Mailbox+\"@\"+msg.From.Domain), query) {\n\t\t\t\tif start > matched {\n\t\t\t\t\tmatched++\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tfilteredMessages = append(filteredMessages, *msg)\n\t\t\t}\n\t\tcase \"containing\":\n\t\t\tif strings.Contains(strings.ToLower(msg.Raw.Data), query) {\n\t\t\t\tif start > matched {\n\t\t\t\t\tmatched++\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tfilteredMessages = append(filteredMessages, *msg)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\treturn filteredMessages, len(filteredMessages), nil\n}",
"func (c *Converter) findNestedMessages(curPkg *ProtoPackage, msgDesc *descriptor.DescriptorProto) (map[*descriptor.DescriptorProto]string, error) {\n\n\t// Get a list of all nested messages, and how often they occur:\n\tnestedMessages := make(map[*descriptor.DescriptorProto]string)\n\tif err := c.recursiveFindNestedMessages(curPkg, msgDesc, msgDesc.GetName(), nestedMessages); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Now filter them:\n\tresult := make(map[*descriptor.DescriptorProto]string)\n\tfor message, messageName := range nestedMessages {\n\t\tif !message.GetOptions().GetMapEntry() && !strings.HasPrefix(messageName, \".google.protobuf.\") {\n\t\t\tresult[message] = strings.TrimLeft(messageName, \".\")\n\t\t}\n\t}\n\n\treturn result, nil\n}",
"func (messer *MessageService ) AliesMessages( theid , myid string , offset int ) ([]*entity.Message ){\n\tmessages , er:= messer.MessageRepo.AliesMessages(theid , myid , offset)\n\tif er != nil {\n\t\treturn nil \n\t}\n\treturn messages\n}",
"func ListMessages(conf config.Config) {\n\n\tmessages, err := slack.GetMessages(conf)\n\tif err != nil {\n\t\tfmt.Println(\"Error: \" + err.Error())\n\t\treturn\n\t}\n\n\tlastDay := \"\"\n\tfor i := len(messages) - 1; i >= 0; i-- {\n\t\tmess := messages[i]\n\t\tcurrDay := mess.GetDateString()\n\n\t\tif lastDay != currDay {\n\t\t\tlastDay = currDay\n\t\t\tfmt.Println(\"\\n\" + currDay)\n\t\t}\n\t\t//text := strings.Replace(mess.Text, \"\\n\", \" \\n\", -1)\n\t\tfmt.Println(\" \" + mess.GetTimeString() + \": \" + mess.Text)\n\t}\n}",
"func (lc *repo) GetAllMessages() []model.Message {\n\treturn lc.data\n}",
"func (MessageUsecase *MessageUsecaseImpl) GetAll() (model.Messages, error) {\n\tgolog.Info(\"Enter Get All Message Usecase :\")\n\tMessages, err := MessageUsecase.MessageRepository.FindAll()\n\tgolog.Info(\"finish !\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn Messages, nil\n}",
"func (client *Client) SearchSecretMessages(request *SearchSecretMessagesRequest) (*FoundMessages, error) {\n\t// Unlock receive function at the end of this function to mark received event as processed\n\tdefer client.Unlock(\"SearchSecretMessages\")\n\tresult, err := client.Send(Request{\n\t\tmeta: meta{\n\t\t\tType: \"searchSecretMessages\",\n\t\t},\n\t\tData: map[string]interface{}{\n\t\t\t\"chat_id\": request.ChatID,\n\t\t\t\"query\": request.Query,\n\t\t\t\"from_search_id\": request.FromSearchID,\n\t\t\t\"limit\": request.Limit,\n\t\t\t\"filter\": request.Filter,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif result.Type == \"error\" {\n\t\treturn nil, buildResponseError(result.Data)\n\t}\n\n\treturn UnmarshalFoundMessages(result.Data)\n}",
"func retrieveMessages(lastNMsg int, patient, doctor string, es *esStruct) ([]msg, error) {\n\tvar r map[string]interface{}\n\tvar buf bytes.Buffer\n\tvar esMsgs []msg\n\n\t// prepare the query\n\t// - bring only the last N messages (const lastNMsg)\n\t// - ordered by date (only the earliest messages)\n\t// - matching patient with doctor\n\tquery := map[string]interface{}{\n\t\t\"size\": lastNMsg,\n\t\t\"sort\": map[string]interface{}{\n\t\t\t\"date\": map[string]interface{}{\n\t\t\t\t\"order\": \"desc\",\n\t\t\t},\n\t\t},\n\t\t\"query\": map[string]interface{}{\n\t\t\t\"bool\": map[string]interface{}{\n\t\t\t\t\"must\": []map[string]interface{}{\n\t\t\t\t\t{\"match\": map[string]interface{}{\"patient\": patient}},\n\t\t\t\t\t{\"match\": map[string]interface{}{\"doctor\": doctor}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif err := json.NewEncoder(&buf).Encode(query); err != nil {\n\t\treturn nil, fmt.Errorf(\"[ERROR] Error encoding query: %s\", err)\n\t}\n\tres, err := search(es, buf)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"[ERROR] Error getting response: %s\", err)\n\t}\n\tdefer res.Body.Close()\n\n\tif err := json.NewDecoder(res.Body).Decode(&r); err != nil {\n\t\treturn nil, fmt.Errorf(\"[ERROR] Error parsing the response body: %s\", err)\n\t}\n\n\t// return the history of messages only if they could be found on elasticsearch\n\tif res.StatusCode != 404 {\n\t\tfor _, hit := range r[\"hits\"].(map[string]interface{})[\"hits\"].([]interface{}) {\n\n\t\t\t// convert it to time.Time because of msg.When field type\n\t\t\tmsgDate, _ := time.Parse(\"2006-01-02T15:04:05Z\", hit.(map[string]interface{})[\"_source\"].(map[string]interface{})[\"date\"].(string))\n\t\t\taux := msg{\n\t\t\t\tMessage: hit.(map[string]interface{})[\"_source\"].(map[string]interface{})[\"msg\"].(string),\n\t\t\t\tWhen: msgDate,\n\t\t\t\tName: hit.(map[string]interface{})[\"_source\"].(map[string]interface{})[\"sentBy\"].(string),\n\t\t\t\tDoctor: hit.(map[string]interface{})[\"_source\"].(map[string]interface{})[\"doctor\"].(string),\n\t\t\t\tPatient: hit.(map[string]interface{})[\"_source\"].(map[string]interface{})[\"patient\"].(string),\n\t\t\t}\n\n\t\t\t// store the elasticsearch results in a slice because the dates are in the wrong order\n\t\t\tesMsgs = append(esMsgs, aux)\n\t\t}\n\t\treturn esMsgs, nil\n\t} else {\n\t\treturn nil, nil\n\t}\n}",
"func (c *Client) SearchSecretMessages(ctx context.Context, request *SearchSecretMessagesRequest) (*FoundMessages, error) {\n\tvar result FoundMessages\n\n\tif err := c.rpc.Invoke(ctx, request, &result); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &result, nil\n}",
"func (r *ChannelResource) Messages(ctx context.Context, query string, limit int) ([]Message, error) {\n\tif query == \"\" {\n\t\treturn nil, errors.New(\"empty query\")\n\t}\n\n\tq := url.Values{}\n\tswitch query[0] {\n\tcase '>':\n\t\tq.Add(\"after\", query[1:])\n\tcase '<':\n\t\tq.Add(\"before\", query[1:])\n\tcase '~':\n\t\tq.Add(\"around\", query[1:])\n\tdefault:\n\t\treturn nil, errors.New(\"lll-formatted query: prefix the message ID with '>' (after), '<' (before) or '~' (around)\")\n\t}\n\n\tif limit > 0 {\n\t\tif limit > 100 {\n\t\t\tlimit = 100\n\t\t}\n\t\tq.Set(\"limit\", strconv.Itoa(limit))\n\t}\n\n\te := endpoint.GetChannelMessages(r.channelID, q.Encode())\n\tresp, err := r.client.doReq(ctx, e, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, apiError(resp)\n\t}\n\n\tvar msgs []Message\n\tif err = json.NewDecoder(resp.Body).Decode(&msgs); err != nil {\n\t\treturn nil, err\n\t}\n\treturn msgs, nil\n}",
"func (client *Client) SearchCallMessages(request *SearchCallMessagesRequest) (*Messages, error) {\n\t// Unlock receive function at the end of this function to mark received event as processed\n\tdefer client.Unlock(\"SearchCallMessages\")\n\tresult, err := client.Send(Request{\n\t\tmeta: meta{\n\t\t\tType: \"searchCallMessages\",\n\t\t},\n\t\tData: map[string]interface{}{\n\t\t\t\"from_message_id\": request.FromMessageID,\n\t\t\t\"limit\": request.Limit,\n\t\t\t\"only_missed\": request.OnlyMissed,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif result.Type == \"error\" {\n\t\treturn nil, buildResponseError(result.Data)\n\t}\n\n\treturn UnmarshalMessages(result.Data)\n}",
"func GetAllMessages() []*Message {\n\tmessages := make([]*Message, 0)\n\terr := GetDB().Table(\"messages\").Find(&messages).Error\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn messages\n}",
"func (c channelQueryBuilder) GetMessages(filter *GetMessagesParams) (messages []*Message, err error) {\n\t// discord values\n\tconst filterLimit = 100\n\tconst filterDefault = 50\n\n\tif err = filter.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif filter.Limit == 0 {\n\t\tfilter.Limit = filterDefault\n\t\t// we hardcode it here in case discord goes dumb and decided to randomly change it.\n\t\t// This avoids that the bot do not experience a new, random, behaviour on API changes\n\t}\n\n\tif filter.Limit <= filterLimit {\n\t\treturn c.getMessages(filter)\n\t}\n\n\tlatestSnowflake := func(msgs []*Message) (latest Snowflake) {\n\t\tfor i := range msgs {\n\t\t\t// if msgs[i].ID.Date().After(latest.Date()) {\n\t\t\tif msgs[i].ID > latest {\n\t\t\t\tlatest = msgs[i].ID\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tearliestSnowflake := func(msgs []*Message) (earliest Snowflake) {\n\t\tfor i := range msgs {\n\t\t\t// if msgs[i].ID.Date().Before(earliest.Date()) {\n\t\t\tif msgs[i].ID < earliest {\n\t\t\t\tearliest = msgs[i].ID\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\t// scenario#1: filter.Around is not 0 AND filter.Limit is above 100\n\t// divide the limit by half and use .Before and .After tags on each quotient limit.\n\t// Use the .After on potential remainder.\n\t// Note! This method can be used recursively\n\tif !filter.Around.IsZero() {\n\t\tbeforeParams := *filter\n\t\tbeforeParams.Before = beforeParams.Around\n\t\tbeforeParams.Around = 0\n\t\tbeforeParams.Limit = filter.Limit / 2\n\t\tbefores, err := c.GetMessages(&beforeParams)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmessages = append(messages, befores...)\n\n\t\tafterParams := *filter\n\t\tafterParams.After = afterParams.Around\n\t\tafterParams.Around = 0\n\t\tafterParams.Limit = filter.Limit / 2\n\t\tafters, err := c.GetMessages(&afterParams)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmessages = append(messages, afters...)\n\n\t\t// filter.Around includes the given ID, so should .Before and .After iterations do as well\n\t\tif msg, _ := c.Message(filter.Around).WithContext(c.ctx).Get(); msg != nil {\n\t\t\t// assumption: error here can be caused by the message ID not actually being a real message\n\t\t\t// and that it was used to get messages in the vicinity. Therefore the err is ignored.\n\t\t\t// TODO: const discord errors.\n\t\t\tmessages = append(messages, msg)\n\t\t}\n\t} else {\n\t\t// scenario#3: filter.After or filter.Before is set.\n\t\t// note that none might be set, which will cause filter.Before to be set after the first 100 messages.\n\t\t//\n\t\tfor {\n\t\t\tif filter.Limit <= 0 {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tf := *filter\n\t\t\tif f.Limit > 100 {\n\t\t\t\tf.Limit = 100\n\t\t\t}\n\t\t\tfilter.Limit -= f.Limit\n\t\t\tmsgs, err := c.getMessages(&f)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tmessages = append(messages, msgs...)\n\t\t\tif !filter.After.IsZero() {\n\t\t\t\tfilter.After = latestSnowflake(msgs)\n\t\t\t} else {\n\t\t\t\t// no snowflake or filter.Before\n\t\t\t\tfilter.Before = earliestSnowflake(msgs)\n\t\t\t}\n\t\t}\n\t}\n\n\t// duplicates should not exist as we use snowflakes to fetch unique segments in time\n\treturn messages, nil\n}",
"func GetMessagesByGroup(c echo.Context) error {\n\t// Get user token authenticate\n\t//user := c.Get(\"user\").(*jwt.Token)\n\t//claims := user.Claims.(*utilities.Claim)\n\t//currentUser := claims.User\n\n\t// Get data request\n\trequest := utilities.Request{}\n\tif err := c.Bind(&request); err != nil {\n\t\treturn err\n\t}\n\n\t// get connection\n\tDB := provider.GetConnection()\n\tdefer DB.Close()\n\n\t// Pagination calculate\n\toffset := request.Validate()\n\n\t// Check the number of matches\n\tcounter := utilities.Counter{}\n\n\t// Query chatMessage scroll\n\tchatMessages := make([]chatMessage, 0)\n\tif err := DB.Raw(\"SELECT group_messages.id, group_messages.body, group_messages.body_type, group_messages.file_path, group_messages.created_at, group_messages.creator_id FROM group_messages \"+\n\t\t\"INNER JOIN group_message_recipients ON group_messages.id = group_message_recipients.message_id \"+\n\t\t\"WHERE group_message_recipients.recipient_group_id = ? \"+\n\t\t\"GROUP BY group_messages.id, group_messages.body, group_messages.body_type, group_messages.created_at, group_messages.created_at \"+\n\t\t\"ORDER BY group_messages.created_at DESC \"+\n\t\t\" OFFSET ? LIMIT ?\", request.GroupID, offset, request.Limit).Scan(&chatMessages).Error; err != nil {\n\t\treturn c.JSON(http.StatusOK, utilities.Response{Message: fmt.Sprintf(\"%s\", err)})\n\t}\n\tDB.Raw(\"SELECT count(*) FROM group_messages \"+\n\t\t\"INNER JOIN group_message_recipients ON group_messages.id = group_message_recipients.message_id \"+\n\t\t\"WHERE group_message_recipients.recipient_group_id = ? \"+\n\t\t\"GROUP BY group_messages.id\", request.GroupID).Scan(&counter)\n\n\t// find user creator info\n\tfor i := range chatMessages {\n\t\tuserShots := make([]userShort, 0)\n\t\tDB.Raw(\"SELECT * FROM users WHERE id = ?\", chatMessages[i].CreatorID).Scan(&userShots)\n\t\tif len(userShots) == 0 {\n\t\t\treturn c.JSON(http.StatusOK, utilities.Response{Message: fmt.Sprintf(\"Usuario no encontrado\")})\n\t\t}\n\t\tchatMessages[i].Creator = userShots[0]\n\t}\n\n\t// Validate scroll\n\tvar hasMore = false\n\tif request.CurrentPage < 10 {\n\t\tif request.Limit*request.CurrentPage < counter.Count {\n\t\t\thasMore = true\n\t\t}\n\t}\n\n\t// Return response data scroll reverse\n\treturn c.JSON(http.StatusOK, utilities.ResponseScroll{\n\t\tSuccess: true,\n\t\tData: chatMessages,\n\t\tHasMore: hasMore,\n\t\tCurrentPage: request.CurrentPage,\n\t})\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
PageSize sets the optional parameter "pageSize": The maximum number of Projects to return in the response. The server can return fewer projects than requested. If unspecified, server picks an appropriate default. Note: pagination is not yet supported; the server ignores this field.
|
func (c *ProjectsListCall) PageSize(pageSize int64) *ProjectsListCall {
c.opt_["pageSize"] = pageSize
return c
}
|
[
"func (c *projectsRESTClient) ListProjects(ctx context.Context, req *resourcemanagerpb.ListProjectsRequest, opts ...gax.CallOption) *ProjectIterator {\n\tit := &ProjectIterator{}\n\treq = proto.Clone(req).(*resourcemanagerpb.ListProjectsRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*resourcemanagerpb.Project, string, error) {\n\t\tresp := &resourcemanagerpb.ListProjectsResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v3/projects\")\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\t\tparams.Add(\"parent\", fmt.Sprintf(\"%v\", req.GetParent()))\n\t\tif req.GetShowDeleted() {\n\t\t\tparams.Add(\"showDeleted\", fmt.Sprintf(\"%v\", req.GetShowDeleted()))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetProjects(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}",
"func (r *AppsRequest) SetPageSize(pageSize int) {\n r.PageSize = pageSize\n}",
"func (c *ProjectsLocationsReusableConfigsListCall) PageSize(pageSize int64) *ProjectsLocationsReusableConfigsListCall {\n\tc.urlParams_.Set(\"pageSize\", fmt.Sprint(pageSize))\n\treturn c\n}",
"func (o *GetProgramsParams) SetPageSize(pageSize *int32) {\n\to.PageSize = pageSize\n}",
"func (c *AppsSearchCall) PageSize(pageSize int64) *AppsSearchCall {\n\tc.urlParams_.Set(\"pageSize\", fmt.Sprint(pageSize))\n\treturn c\n}",
"func (s *API) ListProjects(req *ListProjectsRequest, opts ...scw.RequestOption) (*ListProjectsResponse, error) {\n\tvar err error\n\n\tif req.OrganizationID == \"\" {\n\t\tdefaultOrganizationID, _ := s.client.GetDefaultOrganizationID()\n\t\treq.OrganizationID = defaultOrganizationID\n\t}\n\n\tdefaultPageSize, exist := s.client.GetDefaultPageSize()\n\tif (req.PageSize == nil || *req.PageSize == 0) && exist {\n\t\treq.PageSize = &defaultPageSize\n\t}\n\n\tquery := url.Values{}\n\tparameter.AddToQuery(query, \"organization_id\", req.OrganizationID)\n\tparameter.AddToQuery(query, \"name\", req.Name)\n\tparameter.AddToQuery(query, \"page\", req.Page)\n\tparameter.AddToQuery(query, \"page_size\", req.PageSize)\n\tparameter.AddToQuery(query, \"order_by\", req.OrderBy)\n\tparameter.AddToQuery(query, \"project_ids\", req.ProjectIDs)\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/account/v2/projects\",\n\t\tQuery: query,\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ListProjectsResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}",
"func (c *ProjectsLocationsApisVersionsSpecsListRevisionsCall) PageSize(pageSize int64) *ProjectsLocationsApisVersionsSpecsListRevisionsCall {\n\tc.urlParams_.Set(\"pageSize\", fmt.Sprint(pageSize))\n\treturn c\n}",
"func (ctrl OrganizationController) GetProjects(c *gin.Context) {\n\torganization := c.MustGet(cOrganization).(common.Organization)\n\tvar projects []common.Project\n\tif err := db.Projects().Find(bson.M{\"organization_id\": organization.ID}).All(&projects); err != nil {\n\t\tAbortWithError(LogFields{Context: c, Status: http.StatusGatewayTimeout,\n\t\t\tMessage: \"Error while getting organization projects\",\n\t\t\tLog: logrus.Fields{\"Organization ID\": organization.ID.Hex(), \"Error\": err.Error()},\n\t\t})\n\t\treturn\n\t}\n\tfor i, v := range projects {\n\t\tmetadata.ProjectMetadata(&v)\n\t\tprojects[i] = v\n\t}\n\n\tcount := len(projects)\n\tpgi := util.NewPagination(c, count)\n\tif pgi.HasPage() {\n\t\tAbortWithError(LogFields{Context: c, Status: http.StatusNotFound,\n\t\t\tMessage: \"#\" + strconv.Itoa(pgi.Page()) + \" page contains no results.\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, common.Response{\n\t\tCount: count,\n\t\tNext: pgi.NextPage(),\n\t\tPrevious: pgi.PreviousPage(),\n\t\tData: projects[pgi.Skip():pgi.End()],\n\t})\n}",
"func (a *QcApiService) ListProjects(ctx _context.Context) ([]Project, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue []Project\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/projects.json\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-Api-Key\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v []Project\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 401 {\n\t\t\tvar v string\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 404 {\n\t\t\tvar v string\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}",
"func (c *ProjectsLocationsApisDeploymentsListRevisionsCall) PageSize(pageSize int64) *ProjectsLocationsApisDeploymentsListRevisionsCall {\n\tc.urlParams_.Set(\"pageSize\", fmt.Sprint(pageSize))\n\treturn c\n}",
"func (a *TtsApiService) Projects(ctx context.Context) (ProjectsCollection, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Get\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t \tsuccessPayload ProjectsCollection\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/projects\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-Api-Key\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn successPayload, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn successPayload, localVarHttpResponse, err\n\t}\n\tdefer localVarHttpResponse.Body.Close()\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tbodyBytes, _ := ioutil.ReadAll(localVarHttpResponse.Body)\n\t\treturn successPayload, localVarHttpResponse, reportError(\"Status: %v, Body: %s\", localVarHttpResponse.Status, bodyBytes)\n\t}\n\n\tif err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil {\n\t\treturn successPayload, localVarHttpResponse, err\n\t}\n\n\n\treturn successPayload, localVarHttpResponse, err\n}",
"func (c *projectsRESTClient) SearchProjects(ctx context.Context, req *resourcemanagerpb.SearchProjectsRequest, opts ...gax.CallOption) *ProjectIterator {\n\tit := &ProjectIterator{}\n\treq = proto.Clone(req).(*resourcemanagerpb.SearchProjectsRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*resourcemanagerpb.Project, string, error) {\n\t\tresp := &resourcemanagerpb.SearchProjectsResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v3/projects:search\")\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\t\tif req.GetQuery() != \"\" {\n\t\t\tparams.Add(\"query\", fmt.Sprintf(\"%v\", req.GetQuery()))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetProjects(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}",
"func (r GetProjectsRequest) WithPagination(page, size int) PaginatedRequest {\n\tr.pagination = newPagination(page, size)\n\treturn r\n}",
"func (codeEngine *CodeEngineV2) NewProjectsPager(options *ListProjectsOptions) (pager *ProjectsPager, err error) {\n\tif options.Start != nil && *options.Start != \"\" {\n\t\terr = fmt.Errorf(\"the 'options.Start' field should not be set\")\n\t\treturn\n\t}\n\n\tvar optionsCopy ListProjectsOptions = *options\n\tpager = &ProjectsPager{\n\t\thasNext: true,\n\t\toptions: &optionsCopy,\n\t\tclient: codeEngine,\n\t}\n\treturn\n}",
"func getListProjectsParam(r *http.Request) (*apistructs.ProjectListRequest, error) {\n\t// 获取企业Id\n\torgIDStr := r.Header.Get(httputil.OrgHeader)\n\tif orgIDStr == \"\" {\n\t\torgIDStr = r.URL.Query().Get(\"orgId\")\n\t\tif orgIDStr == \"\" {\n\t\t\treturn nil, errors.Errorf(\"invalid param, orgId is empty\")\n\t\t}\n\t}\n\torgID, err := strconv.ParseInt(orgIDStr, 10, 64)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"invalid param, orgId is invalid\")\n\t}\n\n\t// 按项目名称搜索\n\tkeyword := r.URL.Query().Get(\"q\")\n\n\t// 获取pageSize\n\tpageSizeStr := r.URL.Query().Get(\"pageSize\")\n\tif pageSizeStr == \"\" {\n\t\tpageSizeStr = \"20\"\n\t}\n\tpageSize, err := strconv.Atoi(pageSizeStr)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"invalid param, pageSize is invalid\")\n\t}\n\t// 获取pageNo\n\tpageNoStr := r.URL.Query().Get(\"pageNo\")\n\tif pageNoStr == \"\" {\n\t\tpageNoStr = \"1\"\n\t}\n\tpageNo, err := strconv.Atoi(pageNoStr)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"invalid param, pageNo is invalid\")\n\t}\n\t// 获取isPublic\n\tvar isPublic bool\n\tisPublicStr := r.URL.Query().Get(\"is_public\")\n\tif isPublicStr == \"true\" {\n\t\tisPublic = true\n\t}\n\tvar asc bool\n\tascStr := r.URL.Query().Get(\"asc\")\n\tif ascStr == \"true\" {\n\t\tasc = true\n\t}\n\torderBy := r.URL.Query().Get(\"orderBy\")\n\n\treturn &apistructs.ProjectListRequest{\n\t\tOrgID: uint64(orgID),\n\t\tQuery: keyword,\n\t\tName: r.URL.Query().Get(\"name\"),\n\t\tPageNo: pageNo,\n\t\tPageSize: pageSize,\n\t\tOrderBy: orderBy,\n\t\tAsc: asc,\n\t\tIsPublic: isPublic,\n\t}, nil\n}",
"func GetProjects(c *gin.Context) {\n\ts := services.NewProjectService(daos.NewProjectDAO())\n\tif projects, err := s.FindAll(); err != nil {\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t\tlog.Println(err)\n\t} else {\n\t\tc.JSON(http.StatusOK, gin.H{\n\t\t\t\"projects\": projects,\n\t\t})\n\t}\n}",
"func (op Operations) ListAllProject(filter string) (*ProjectListResponse, error) {\n\tentities := make([]*Project, 0)\n\n\tresp, err := op.ListProject(&DSMetadata{\n\t\tFilter: &filter,\n\t\tKind: utils.StringPtr(\"project\"),\n\t\tLength: utils.Int64Ptr(itemsPerPage),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttotalEntities := utils.Int64Value(resp.Metadata.TotalMatches)\n\tremaining := totalEntities\n\toffset := utils.Int64Value(resp.Metadata.Offset)\n\n\tif totalEntities > itemsPerPage {\n\t\tfor hasNext(&remaining) {\n\t\t\tresp, err = op.ListProject(&DSMetadata{\n\t\t\t\tFilter: &filter,\n\t\t\t\tKind: utils.StringPtr(\"project\"),\n\t\t\t\tLength: utils.Int64Ptr(itemsPerPage),\n\t\t\t\tOffset: utils.Int64Ptr(offset),\n\t\t\t})\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tentities = append(entities, resp.Entities...)\n\n\t\t\toffset += itemsPerPage\n\t\t\tlog.Printf(\"[Debug] total=%d, remaining=%d, offset=%d len(entities)=%d\\n\", totalEntities, remaining, offset, len(entities))\n\t\t}\n\n\t\tresp.Entities = entities\n\t}\n\n\treturn resp, nil\n}",
"func (c *ProjectsLocationsApisDeploymentsListCall) PageSize(pageSize int64) *ProjectsLocationsApisDeploymentsListCall {\n\tc.urlParams_.Set(\"pageSize\", fmt.Sprint(pageSize))\n\treturn c\n}",
"func (r *Request) PageSize(size int) *Request {\n\tif size > 0 && size <= 100 {\n\t\tr.pageSize = size\n\t} else {\n\t\tr.pageSize = 50\n\t}\n\treturn r\n}",
"func (c *CustomersPolicySchemasListCall) PageSize(pageSize int64) *CustomersPolicySchemasListCall {\n\tc.urlParams_.Set(\"pageSize\", fmt.Sprint(pageSize))\n\treturn c\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TestIamPermissions: Tests the specified permissions against the IAM access control policy for the specified project.
|
func (r *ProjectsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsTestIamPermissionsCall {
c := &ProjectsTestIamPermissionsCall{s: r.s, opt_: make(map[string]interface{})}
c.resource = resource
c.testiampermissionsrequest = testiampermissionsrequest
return c
}
|
[
"func (c *IamClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {\n\tmd := metadata.Pairs(\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"resource\", url.QueryEscape(req.GetResource())))\n\tctx = insertMetadata(ctx, c.xGoogMetadata, md)\n\topts = append(c.CallOptions.TestIamPermissions[0:len(c.CallOptions.TestIamPermissions):len(c.CallOptions.TestIamPermissions)], opts...)\n\tvar resp *iampb.TestIamPermissionsResponse\n\terr := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tvar err error\n\t\tresp, err = c.iamClient.TestIamPermissions(ctx, req, settings.GRPC...)\n\t\treturn err\n\t}, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}",
"func (m *MockImages) TestIamPermissions(ctx context.Context, key *meta.Key, arg0 *ga.TestPermissionsRequest) (*ga.TestPermissionsResponse, error) {\n\tif m.TestIamPermissionsHook != nil {\n\t\treturn m.TestIamPermissionsHook(ctx, key, arg0, m)\n\t}\n\treturn nil, fmt.Errorf(\"TestIamPermissionsHook must be set\")\n}",
"func (c *Client) TestBucketIamPermissions(ctx context.Context, req *storagepb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {\n\tif _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {\n\t\tcctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)\n\t\tdefer cancel()\n\t\tctx = cctx\n\t}\n\tctx = insertMetadata(ctx, c.xGoogMetadata)\n\topts = append(c.CallOptions.TestBucketIamPermissions[0:len(c.CallOptions.TestBucketIamPermissions):len(c.CallOptions.TestBucketIamPermissions)], opts...)\n\tvar resp *iampb.TestIamPermissionsResponse\n\terr := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tvar err error\n\t\tresp, err = c.client.TestBucketIamPermissions(ctx, req, settings.GRPC...)\n\t\treturn err\n\t}, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}",
"func (c *DatabaseAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {\n\tctx = insertMetadata(ctx, c.xGoogMetadata)\n\topts = append(c.CallOptions.TestIamPermissions[0:len(c.CallOptions.TestIamPermissions):len(c.CallOptions.TestIamPermissions)], opts...)\n\tvar resp *iampb.TestIamPermissionsResponse\n\terr := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tvar err error\n\t\tresp, err = c.databaseAdminClient.TestIamPermissions(ctx, req, settings.GRPC...)\n\t\treturn err\n\t}, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}",
"func (m *MockAlphaRouters) TestIamPermissions(ctx context.Context, key *meta.Key, arg0 *alpha.TestPermissionsRequest) (*alpha.TestPermissionsResponse, error) {\n\tif m.TestIamPermissionsHook != nil {\n\t\treturn m.TestIamPermissionsHook(ctx, key, arg0, m)\n\t}\n\treturn nil, fmt.Errorf(\"TestIamPermissionsHook must be set\")\n}",
"func (g *GCEAlphaRegionNetworkFirewallPolicies) TestIamPermissions(ctx context.Context, key *meta.Key, arg0 *alpha.TestPermissionsRequest) (*alpha.TestPermissionsResponse, error) {\n\tklog.V(5).Infof(\"GCEAlphaRegionNetworkFirewallPolicies.TestIamPermissions(%v, %v, ...): called\", ctx, key)\n\n\tif !key.Valid() {\n\t\tklog.V(2).Infof(\"GCEAlphaRegionNetworkFirewallPolicies.TestIamPermissions(%v, %v, ...): key is invalid (%#v)\", ctx, key, key)\n\t\treturn nil, fmt.Errorf(\"invalid GCE key (%+v)\", key)\n\t}\n\tprojectID := g.s.ProjectRouter.ProjectID(ctx, \"alpha\", \"RegionNetworkFirewallPolicies\")\n\trk := &RateLimitKey{\n\t\tProjectID: projectID,\n\t\tOperation: \"TestIamPermissions\",\n\t\tVersion: meta.Version(\"alpha\"),\n\t\tService: \"RegionNetworkFirewallPolicies\",\n\t}\n\tklog.V(5).Infof(\"GCEAlphaRegionNetworkFirewallPolicies.TestIamPermissions(%v, %v, ...): projectID = %v, rk = %+v\", ctx, key, projectID, rk)\n\n\tif err := g.s.RateLimiter.Accept(ctx, rk); err != nil {\n\t\tklog.V(4).Infof(\"GCEAlphaRegionNetworkFirewallPolicies.TestIamPermissions(%v, %v, ...): RateLimiter error: %v\", ctx, key, err)\n\t\treturn nil, err\n\t}\n\tcall := g.s.Alpha.RegionNetworkFirewallPolicies.TestIamPermissions(projectID, key.Region, key.Name, arg0)\n\tcall.Context(ctx)\n\tv, err := call.Do()\n\tklog.V(4).Infof(\"GCEAlphaRegionNetworkFirewallPolicies.TestIamPermissions(%v, %v, ...) = %+v, %v\", ctx, key, v, err)\n\treturn v, err\n}",
"func (g *GCEImages) TestIamPermissions(ctx context.Context, key *meta.Key, arg0 *ga.TestPermissionsRequest) (*ga.TestPermissionsResponse, error) {\n\tklog.V(5).Infof(\"GCEImages.TestIamPermissions(%v, %v, ...): called\", ctx, key)\n\n\tif !key.Valid() {\n\t\tklog.V(2).Infof(\"GCEImages.TestIamPermissions(%v, %v, ...): key is invalid (%#v)\", ctx, key, key)\n\t\treturn nil, fmt.Errorf(\"invalid GCE key (%+v)\", key)\n\t}\n\tprojectID := g.s.ProjectRouter.ProjectID(ctx, \"ga\", \"Images\")\n\trk := &RateLimitKey{\n\t\tProjectID: projectID,\n\t\tOperation: \"TestIamPermissions\",\n\t\tVersion: meta.Version(\"ga\"),\n\t\tService: \"Images\",\n\t}\n\tklog.V(5).Infof(\"GCEImages.TestIamPermissions(%v, %v, ...): projectID = %v, rk = %+v\", ctx, key, projectID, rk)\n\n\tif err := g.s.RateLimiter.Accept(ctx, rk); err != nil {\n\t\tklog.V(4).Infof(\"GCEImages.TestIamPermissions(%v, %v, ...): RateLimiter error: %v\", ctx, key, err)\n\t\treturn nil, err\n\t}\n\tcall := g.s.GA.Images.TestIamPermissions(projectID, key.Name, arg0)\n\tcall.Context(ctx)\n\tv, err := call.Do()\n\tklog.V(4).Infof(\"GCEImages.TestIamPermissions(%v, %v, ...) = %+v, %v\", ctx, key, v, err)\n\treturn v, err\n}",
"func (g *GCEAlphaNetworkFirewallPolicies) TestIamPermissions(ctx context.Context, key *meta.Key, arg0 *alpha.TestPermissionsRequest) (*alpha.TestPermissionsResponse, error) {\n\tklog.V(5).Infof(\"GCEAlphaNetworkFirewallPolicies.TestIamPermissions(%v, %v, ...): called\", ctx, key)\n\n\tif !key.Valid() {\n\t\tklog.V(2).Infof(\"GCEAlphaNetworkFirewallPolicies.TestIamPermissions(%v, %v, ...): key is invalid (%#v)\", ctx, key, key)\n\t\treturn nil, fmt.Errorf(\"invalid GCE key (%+v)\", key)\n\t}\n\tprojectID := g.s.ProjectRouter.ProjectID(ctx, \"alpha\", \"NetworkFirewallPolicies\")\n\trk := &RateLimitKey{\n\t\tProjectID: projectID,\n\t\tOperation: \"TestIamPermissions\",\n\t\tVersion: meta.Version(\"alpha\"),\n\t\tService: \"NetworkFirewallPolicies\",\n\t}\n\tklog.V(5).Infof(\"GCEAlphaNetworkFirewallPolicies.TestIamPermissions(%v, %v, ...): projectID = %v, rk = %+v\", ctx, key, projectID, rk)\n\n\tif err := g.s.RateLimiter.Accept(ctx, rk); err != nil {\n\t\tklog.V(4).Infof(\"GCEAlphaNetworkFirewallPolicies.TestIamPermissions(%v, %v, ...): RateLimiter error: %v\", ctx, key, err)\n\t\treturn nil, err\n\t}\n\tcall := g.s.Alpha.NetworkFirewallPolicies.TestIamPermissions(projectID, key.Name, arg0)\n\tcall.Context(ctx)\n\tv, err := call.Do()\n\tklog.V(4).Infof(\"GCEAlphaNetworkFirewallPolicies.TestIamPermissions(%v, %v, ...) = %+v, %v\", ctx, key, v, err)\n\treturn v, err\n}",
"func (g *GCEAlphaRouters) TestIamPermissions(ctx context.Context, key *meta.Key, arg0 *alpha.TestPermissionsRequest) (*alpha.TestPermissionsResponse, error) {\n\tklog.V(5).Infof(\"GCEAlphaRouters.TestIamPermissions(%v, %v, ...): called\", ctx, key)\n\n\tif !key.Valid() {\n\t\tklog.V(2).Infof(\"GCEAlphaRouters.TestIamPermissions(%v, %v, ...): key is invalid (%#v)\", ctx, key, key)\n\t\treturn nil, fmt.Errorf(\"invalid GCE key (%+v)\", key)\n\t}\n\tprojectID := g.s.ProjectRouter.ProjectID(ctx, \"alpha\", \"Routers\")\n\trk := &RateLimitKey{\n\t\tProjectID: projectID,\n\t\tOperation: \"TestIamPermissions\",\n\t\tVersion: meta.Version(\"alpha\"),\n\t\tService: \"Routers\",\n\t}\n\tklog.V(5).Infof(\"GCEAlphaRouters.TestIamPermissions(%v, %v, ...): projectID = %v, rk = %+v\", ctx, key, projectID, rk)\n\n\tif err := g.s.RateLimiter.Accept(ctx, rk); err != nil {\n\t\tklog.V(4).Infof(\"GCEAlphaRouters.TestIamPermissions(%v, %v, ...): RateLimiter error: %v\", ctx, key, err)\n\t\treturn nil, err\n\t}\n\tcall := g.s.Alpha.Routers.TestIamPermissions(projectID, key.Region, key.Name, arg0)\n\tcall.Context(ctx)\n\tv, err := call.Do()\n\tklog.V(4).Infof(\"GCEAlphaRouters.TestIamPermissions(%v, %v, ...) = %+v, %v\", ctx, key, v, err)\n\treturn v, err\n}",
"func (c *Client) TestObjectIamPermissions(ctx context.Context, req *storagepb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {\n\tif _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {\n\t\tcctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)\n\t\tdefer cancel()\n\t\tctx = cctx\n\t}\n\tctx = insertMetadata(ctx, c.xGoogMetadata)\n\topts = append(c.CallOptions.TestObjectIamPermissions[0:len(c.CallOptions.TestObjectIamPermissions):len(c.CallOptions.TestObjectIamPermissions)], opts...)\n\tvar resp *iampb.TestIamPermissionsResponse\n\terr := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tvar err error\n\t\tresp, err = c.client.TestObjectIamPermissions(ctx, req, settings.GRPC...)\n\t\treturn err\n\t}, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}",
"func (g *GCEAlphaImages) TestIamPermissions(ctx context.Context, key *meta.Key, arg0 *alpha.TestPermissionsRequest) (*alpha.TestPermissionsResponse, error) {\n\tklog.V(5).Infof(\"GCEAlphaImages.TestIamPermissions(%v, %v, ...): called\", ctx, key)\n\n\tif !key.Valid() {\n\t\tklog.V(2).Infof(\"GCEAlphaImages.TestIamPermissions(%v, %v, ...): key is invalid (%#v)\", ctx, key, key)\n\t\treturn nil, fmt.Errorf(\"invalid GCE key (%+v)\", key)\n\t}\n\tprojectID := g.s.ProjectRouter.ProjectID(ctx, \"alpha\", \"Images\")\n\trk := &RateLimitKey{\n\t\tProjectID: projectID,\n\t\tOperation: \"TestIamPermissions\",\n\t\tVersion: meta.Version(\"alpha\"),\n\t\tService: \"Images\",\n\t}\n\tklog.V(5).Infof(\"GCEAlphaImages.TestIamPermissions(%v, %v, ...): projectID = %v, rk = %+v\", ctx, key, projectID, rk)\n\n\tif err := g.s.RateLimiter.Accept(ctx, rk); err != nil {\n\t\tklog.V(4).Infof(\"GCEAlphaImages.TestIamPermissions(%v, %v, ...): RateLimiter error: %v\", ctx, key, err)\n\t\treturn nil, err\n\t}\n\tcall := g.s.Alpha.Images.TestIamPermissions(projectID, key.Name, arg0)\n\tcall.Context(ctx)\n\tv, err := call.Do()\n\tklog.V(4).Infof(\"GCEAlphaImages.TestIamPermissions(%v, %v, ...) = %+v, %v\", ctx, key, v, err)\n\treturn v, err\n}",
"func (g *GCEBetaRouters) TestIamPermissions(ctx context.Context, key *meta.Key, arg0 *beta.TestPermissionsRequest) (*beta.TestPermissionsResponse, error) {\n\tklog.V(5).Infof(\"GCEBetaRouters.TestIamPermissions(%v, %v, ...): called\", ctx, key)\n\n\tif !key.Valid() {\n\t\tklog.V(2).Infof(\"GCEBetaRouters.TestIamPermissions(%v, %v, ...): key is invalid (%#v)\", ctx, key, key)\n\t\treturn nil, fmt.Errorf(\"invalid GCE key (%+v)\", key)\n\t}\n\tprojectID := g.s.ProjectRouter.ProjectID(ctx, \"beta\", \"Routers\")\n\trk := &RateLimitKey{\n\t\tProjectID: projectID,\n\t\tOperation: \"TestIamPermissions\",\n\t\tVersion: meta.Version(\"beta\"),\n\t\tService: \"Routers\",\n\t}\n\tklog.V(5).Infof(\"GCEBetaRouters.TestIamPermissions(%v, %v, ...): projectID = %v, rk = %+v\", ctx, key, projectID, rk)\n\n\tif err := g.s.RateLimiter.Accept(ctx, rk); err != nil {\n\t\tklog.V(4).Infof(\"GCEBetaRouters.TestIamPermissions(%v, %v, ...): RateLimiter error: %v\", ctx, key, err)\n\t\treturn nil, err\n\t}\n\tcall := g.s.Beta.Routers.TestIamPermissions(projectID, key.Region, key.Name, arg0)\n\tcall.Context(ctx)\n\tv, err := call.Do()\n\tklog.V(4).Infof(\"GCEBetaRouters.TestIamPermissions(%v, %v, ...) = %+v, %v\", ctx, key, v, err)\n\treturn v, err\n}",
"func (g *GCEBetaImages) TestIamPermissions(ctx context.Context, key *meta.Key, arg0 *beta.TestPermissionsRequest) (*beta.TestPermissionsResponse, error) {\n\tklog.V(5).Infof(\"GCEBetaImages.TestIamPermissions(%v, %v, ...): called\", ctx, key)\n\n\tif !key.Valid() {\n\t\tklog.V(2).Infof(\"GCEBetaImages.TestIamPermissions(%v, %v, ...): key is invalid (%#v)\", ctx, key, key)\n\t\treturn nil, fmt.Errorf(\"invalid GCE key (%+v)\", key)\n\t}\n\tprojectID := g.s.ProjectRouter.ProjectID(ctx, \"beta\", \"Images\")\n\trk := &RateLimitKey{\n\t\tProjectID: projectID,\n\t\tOperation: \"TestIamPermissions\",\n\t\tVersion: meta.Version(\"beta\"),\n\t\tService: \"Images\",\n\t}\n\tklog.V(5).Infof(\"GCEBetaImages.TestIamPermissions(%v, %v, ...): projectID = %v, rk = %+v\", ctx, key, projectID, rk)\n\n\tif err := g.s.RateLimiter.Accept(ctx, rk); err != nil {\n\t\tklog.V(4).Infof(\"GCEBetaImages.TestIamPermissions(%v, %v, ...): RateLimiter error: %v\", ctx, key, err)\n\t\treturn nil, err\n\t}\n\tcall := g.s.Beta.Images.TestIamPermissions(projectID, key.Name, arg0)\n\tcall.Context(ctx)\n\tv, err := call.Do()\n\tklog.V(4).Infof(\"GCEBetaImages.TestIamPermissions(%v, %v, ...) = %+v, %v\", ctx, key, v, err)\n\treturn v, err\n}",
"func (m *MockEnterpriseDB) BitbucketProjectPermissions() database.BitbucketProjectPermissionsStore {\n\tr0 := m.BitbucketProjectPermissionsFunc.nextHook()()\n\tm.BitbucketProjectPermissionsFunc.appendCall(EnterpriseDBBitbucketProjectPermissionsFuncCall{r0})\n\treturn r0\n}",
"func (m *MockClient) GetIamPolicy(projectName string) (*cloudresourcemanager.Policy, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetIamPolicy\", projectName)\n\tret0, _ := ret[0].(*cloudresourcemanager.Policy)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func TestAccProjectIamMember_multiple(t *testing.T) {\n\t// Multiple fine-grained resources\n\tacctest.SkipIfVcr(t)\n\tt.Parallel()\n\n\torg := envvar.GetTestOrgFromEnv(t)\n\tacctest.SkipIfEnvNotSet(t, \"GOOGLE_ORG\")\n\n\tpid := fmt.Sprintf(\"tf-test-%d\", acctest.RandInt(t))\n\tresourceName := \"google_project_iam_member.acceptance\"\n\tresourceName2 := \"google_project_iam_member.multiple\"\n\trole := \"roles/compute.instanceAdmin\"\n\tmember := \"user:[email protected]\"\n\tmember2 := \"user:[email protected]\"\n\n\tacctest.VcrTest(t, resource.TestCase{\n\t\tPreCheck: func() { acctest.AccTestPreCheck(t) },\n\t\tProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),\n\t\tSteps: []resource.TestStep{\n\t\t\t// Create a new project\n\t\t\t{\n\t\t\t\tConfig: testAccProject_create(pid, org),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccProjectExistingPolicy(t, pid),\n\t\t\t\t),\n\t\t\t},\n\t\t\t// Apply an IAM binding\n\t\t\t{\n\t\t\t\tConfig: testAccProjectAssociateMemberBasic(pid, org, role, member),\n\t\t\t},\n\t\t\tprojectIamMemberImportStep(resourceName, pid, role, member),\n\n\t\t\t// Apply another IAM binding\n\t\t\t{\n\t\t\t\tConfig: testAccProjectAssociateMemberMultiple(pid, org, role, member, role, member2),\n\t\t\t},\n\t\t\tprojectIamMemberImportStep(resourceName, pid, role, member),\n\t\t\tprojectIamMemberImportStep(resourceName2, pid, role, member2),\n\t\t},\n\t})\n}",
"func (c *FakeProjectIamPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ProjectIamPolicy, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewPatchSubresourceAction(projectiampoliciesResource, c.ns, name, pt, data, subresources...), &v1alpha1.ProjectIamPolicy{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.ProjectIamPolicy), err\n}",
"func TestAppProject_ValidPolicyRules(t *testing.T) {\n\tp := newTestProject()\n\terr := p.ValidateProject()\n\tassert.NoError(t, err)\n\tgoodPolicies := []string{\n\t\t\"p,proj:my-proj:my-role,applications,get,my-proj/*,allow\",\n\t\t\"p, proj:my-proj:my-role, applications, get, my-proj/*, allow\",\n\t\t\"p, proj:my-proj:my-role, applications, get, my-proj/*, deny\",\n\t\t\"p, proj:my-proj:my-role, applications, get, my-proj/foo, allow\",\n\t\t\"p, proj:my-proj:my-role, applications, get, my-proj/*-foo, allow\",\n\t\t\"p, proj:my-proj:my-role, applications, get, my-proj/foo-*, allow\",\n\t\t\"p, proj:my-proj:my-role, applications, get, my-proj/*-*, allow\",\n\t\t\"p, proj:my-proj:my-role, applications, get, my-proj/*.*, allow\",\n\t\t\"p, proj:my-proj:my-role, applications, *, my-proj/foo, allow\",\n\t\t\"p, proj:my-proj:my-role, applications, create, my-proj/foo, allow\",\n\t\t\"p, proj:my-proj:my-role, applications, update, my-proj/foo, allow\",\n\t\t\"p, proj:my-proj:my-role, applications, sync, my-proj/foo, allow\",\n\t\t\"p, proj:my-proj:my-role, applications, delete, my-proj/foo, allow\",\n\t\t\"p, proj:my-proj:my-role, applications, action/*, my-proj/foo, allow\",\n\t\t\"p, proj:my-proj:my-role, applications, action/apps/Deployment/restart, my-proj/foo, allow\",\n\t}\n\tfor _, good := range goodPolicies {\n\t\tp.Spec.Roles[0].Policies = []string{good}\n\t\terr = p.ValidateProject()\n\t\tassert.NoError(t, err)\n\t}\n}",
"func (c *IamClient) QueryTestablePermissions(ctx context.Context, req *adminpb.QueryTestablePermissionsRequest, opts ...gax.CallOption) (*adminpb.QueryTestablePermissionsResponse, error) {\n\tctx = insertMetadata(ctx, c.xGoogMetadata)\n\topts = append(c.CallOptions.QueryTestablePermissions[0:len(c.CallOptions.QueryTestablePermissions):len(c.CallOptions.QueryTestablePermissions)], opts...)\n\tvar resp *adminpb.QueryTestablePermissionsResponse\n\terr := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tvar err error\n\t\tresp, err = c.iamClient.QueryTestablePermissions(ctx, req, settings.GRPC...)\n\t\treturn err\n\t}, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Undelete: Restores the project identified by the specified `project_id` (for example, `myproject123`). You can only use this method for a project that has a lifecycle state of [DELETE_REQUESTED] [google.cloudresourcemanager.projects.v1beta1.LifecycleState.DELETE_RE QUESTED]. After deletion starts, as indicated by a lifecycle state of [DELETE_IN_PROGRESS] [google.cloudresourcemanager.projects.v1beta1.LifecycleState.DELETE_IN _PROGRESS], the project cannot be restored. The caller must have modify permissions for this project.
|
func (r *ProjectsService) Undelete(projectId string) *ProjectsUndeleteCall {
c := &ProjectsUndeleteCall{s: r.s, opt_: make(map[string]interface{})}
c.projectId = projectId
return c
}
|
[
"func (c *ProjectsClient) UndeleteProject(ctx context.Context, req *resourcemanagerpb.UndeleteProjectRequest, opts ...gax.CallOption) (*UndeleteProjectOperation, error) {\n\treturn c.internalClient.UndeleteProject(ctx, req, opts...)\n}",
"func (c *projectsRESTClient) UndeleteProjectOperation(name string) *UndeleteProjectOperation {\n\toverride := fmt.Sprintf(\"/v3/%s\", name)\n\treturn &UndeleteProjectOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),\n\t\tpollPath: override,\n\t}\n}",
"func DeleteProject(path string, id uint32) error {\n\t// Unset the project from the path.\n\terr := SetProject(path, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Unset the quota on the project.\n\terr = SetProjectQuota(path, id, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (c *projectsGRPCClient) UndeleteProjectOperation(name string) *UndeleteProjectOperation {\n\treturn &UndeleteProjectOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),\n\t}\n}",
"func (c *Cache) DelProject(shortName string) error {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\n\tif err := c.cat.DelProject(shortName); err != nil {\n\t\treturn err\n\t}\n\tc.uncache(shortName)\n\treturn nil\n}",
"func DeleteProject(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tpid := dbms.ReadProjectId(ps.Get(\"project\"))\n\tuid := ps.GetInt(\"authId\")\n\tp := model.InitedProject(pid)\n\tif p.CreatorId != uid {\n\t\tbase.ForbidErr(w, BelongErr)\n\t\treturn\n\t}\n\tif len(p.GetMissions()) > 0 {\n\t\tbase.ForbidErr(w, MissionRemainErr)\n\t\treturn\n\t}\n\tp.Remove()\n\tmakeBaseResp(w, r)\n}",
"func (b *boltDB) RemoveProject(id int) error {\n\tdb, err := bolt.Open(b.dbLocation, 0644, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\treturn db.Update(func(tx *bolt.Tx) error {\n\t\tbucket := tx.Bucket(projectsBucket)\n\t\tif bucket == nil {\n\t\t\treturn fmt.Errorf(\"bucket %q not found\", projectsBucket)\n\t\t}\n\n\t\terr := bucket.Delete(itob(id))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}",
"func (a *adapter) PurgeProject(ctx context.Context, projectID string) error {\n\t_, err := a.db.ExecContext(ctx, \"UPDATE chef_authn_tokens SET project_ids=array_remove(project_ids, $1)\", projectID)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (s *API) DeleteProject(req *DeleteProjectRequest, opts ...scw.RequestOption) error {\n\tvar err error\n\n\tif req.ProjectID == \"\" {\n\t\tdefaultProjectID, _ := s.client.GetDefaultProjectID()\n\t\treq.ProjectID = defaultProjectID\n\t}\n\n\tif fmt.Sprint(req.ProjectID) == \"\" {\n\t\treturn errors.New(\"field ProjectID cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"DELETE\",\n\t\tPath: \"/account/v2/projects/\" + fmt.Sprint(req.ProjectID) + \"\",\n\t\tHeaders: http.Header{},\n\t}\n\n\terr = s.client.Do(scwReq, nil, opts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func DeleteProject(db database.Database, logger log.Logger, username, projectID string) error {\n\terr := db.RemoveProject(projectID)\n\tif err != nil {\n\t\tlogger.Error(err.Error())\n\t\treturn err\n\t}\n\n\taccount, err := db.GetAccountByUsername(username)\n\n\tif err != nil {\n\t\tlogger.Error(err.Error())\n\t\treturn err\n\t}\n\n\taccount.ProjectIds = removeIDFromList(projectID, account.ProjectIds)\n\terr = db.UpdateAccount(account)\n\tif err != nil {\n\t\tlogger.Error(err.Error())\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (cli *CLI) ProjectDelete(profiles map[string]string) (string, error) {\n\tcommands := []string{\"project\", \"delete\", \"--yes\"}\n\n\tif len(profiles) > 0 {\n\t\tcommands = append(commands, \"--env-profiles\")\n\t\tenvProfiles := []string{}\n\t\tfor env, profile := range profiles {\n\t\t\tenvProfiles = append(envProfiles, fmt.Sprintf(\"%s=%s\", env, profile))\n\t\t}\n\t\tcommands = append(commands, strings.Join(envProfiles, \",\"))\n\t}\n\treturn cli.exec(\n\t\texec.Command(cli.path, commands...))\n}",
"func (db *ConcreteDatastore) DeleteProject(ProjectId int64) error {\n\trequest := `DELETE FROM Project \n\tWHERE project_id=?`\n\tif _, err := db.Exec(request, ProjectId); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func DeleteProject(c echo.Context) error {\n\tlogger := config.GetLogger\n\tdefer logger().Sync()\n\tprojectID := strings.TrimSpace(c.Param(\"projectId\"))\n\n\tif projectID == \"\" {\n\t\treturn c.JSON(http.StatusBadRequest, domain.ConstraintViolation(\"Bad request. Missing mandatory request value projectId\"))\n\t}\n\n\terr := domain.GetProjectDeleteUsecase().Execute(projectID)\n\tif err != nil {\n\t\tlogger().Errorf(\"An error occurred while trying to Delete the Project: %s\", err.Error())\n\t\treturn c.JSON(err.(domain.IdentifiableError).GetCode(), err)\n\t}\n\n\treturn c.JSON(http.StatusOK, \"\")\n}",
"func DeleteProject(p string) error {\n\tstore.DB.Close()\n\treturn configure.DeleteDatabase(filepath.Join(configure.TracyPath, p))\n}",
"func DeleteProject(p string) error {\n\tstore.DB.Close()\n\treturn configure.DeleteDatabase(filepath.Join(configure.Current.TracyPath, p+\".db\"))\n}",
"func (r *ProjectsWebAppsService) Undelete(nameid string, undeletewebapprequest *UndeleteWebAppRequest) *ProjectsWebAppsUndeleteCall {\n\tc := &ProjectsWebAppsUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.nameid = nameid\n\tc.undeletewebapprequest = undeletewebapprequest\n\treturn c\n}",
"func (r *ProjectsIosAppsService) Undelete(nameid string, undeleteiosapprequest *UndeleteIosAppRequest) *ProjectsIosAppsUndeleteCall {\n\tc := &ProjectsIosAppsUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.nameid = nameid\n\tc.undeleteiosapprequest = undeleteiosapprequest\n\treturn c\n}",
"func (r *ProjectsAndroidAppsService) Undelete(nameid string, undeleteandroidapprequest *UndeleteAndroidAppRequest) *ProjectsAndroidAppsUndeleteCall {\n\tc := &ProjectsAndroidAppsUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.nameid = nameid\n\tc.undeleteandroidapprequest = undeleteandroidapprequest\n\treturn c\n}",
"func (p *PrivilegedProjectProvider) DeleteUnsecured(projectInternalName string) error {\n\texistingProject, err := p.clientPrivileged.Get(projectInternalName, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\texistingProject.Status.Phase = kubermaticapiv1.ProjectTerminating\n\tif _, err := p.clientPrivileged.Update(existingProject); err != nil {\n\t\treturn err\n\t}\n\n\treturn p.clientPrivileged.Delete(projectInternalName, &metav1.DeleteOptions{})\n}",
"func ProjectDelete(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tvar err error\n\n\tappID := vars[\"appId\"]\n\n\tcomposeData, err := utils.GetComposeProject(appID)\n\tif err == nil {\n\t\tproj, err := utils.GetProject(composeData, appID)\n\n\t\tif err == nil {\n\t\t\tlog.Info(\"Delete Project : \", appID)\n proj.Down()\n\t\t proj.Delete()\n err = utils.RemoveComposeProject(appID)\n \n if err == nil {\n w.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n w.WriteHeader(http.StatusOK)\n if err := json.NewEncoder(w).Encode(jsonErr{Code: http.StatusOK, Text: \"OK\"}); err != nil {\n panic(err)\n }\n return\n }\n\t\t}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusNotFound)\n\tif err := json.NewEncoder(w).Encode(jsonErr{Code: http.StatusNotFound, Text: err.Error()}); err != nil {\n\t\tpanic(err)\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
RunInteractive runs the command in interactive mode and returns the output, and error. It takes command as array of strings, and a function `tester` that contains steps to run the test as an argument. The command is executed as a separate process, the environment of which is controlled via the `env` argument. The initial value of the subprocess environment is a copy of the environment of the current process. If `env` is not `nil`, it will be appended to the end of the subprocess environment. If there are duplicate environment keys, only the last value in the slice for each duplicate key is used.
|
func RunInteractive(command []string, env []string, tester Tester) (string, error) {
fmt.Fprintln(GinkgoWriter, "running command", command, "with env", env)
wd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
opts := termtest.Options{
CmdName: command[0],
Args: command[1:],
WorkDirectory: wd,
RetainWorkDir: true,
ExtraOpts: []expect.ConsoleOpt{},
}
if env != nil {
opts.Environment = append(os.Environ(), env...)
}
cp, err := termtest.New(opts)
if err != nil {
log.Fatal(err)
}
defer cp.Close()
buf := new(bytes.Buffer)
ctx := InteractiveContext{
Command: command,
buffer: buf,
StopCommand: func() {
_ = cp.Signal(os.Kill)
},
cp: cp,
}
tester(ctx)
_, err = cp.ExpectExitCode(0)
return buf.String(), err
}
|
[
"func (e TknRunner) RunInteractiveTests(t *testing.T, ops *Prompt) *expect.Console {\n\tt.Helper()\n\n\t// Multiplex output to a buffer as well for the raw bytes.\n\tbuf := new(bytes.Buffer)\n\tc, state, err := helper.NewVT10XConsole(goexpect.WithStdout(buf))\n\tassert.NilError(t, err)\n\tdefer c.Close()\n\n\tif e.namespace != \"\" {\n\t\tops.CmdArgs = append(ops.CmdArgs, \"--namespace\", e.namespace)\n\t}\n\n\tcmd := exec.Command(e.path, ops.CmdArgs[0:len(ops.CmdArgs)]...) //nolint:gosec\n\tcmd.Stdin = c.Tty()\n\tcmd.Stdout = c.Tty()\n\tcmd.Stderr = c.Tty()\n\n\tassert.NilError(t, cmd.Start())\n\n\tdonec := make(chan struct{})\n\tgo func() {\n\t\tdefer close(donec)\n\t\tif err := ops.Procedure(c); err != nil {\n\t\t\tt.Logf(\"procedure failed: %v\", err)\n\t\t}\n\t}()\n\n\t// Close the slave end of the pty, and read the remaining bytes from the master end.\n\t_ = c.Tty().Close()\n\t<-donec\n\n\t// Dump the terminal's screen.\n\tt.Logf(\"\\n%s\", goexpect.StripTrailingEmptyLines(state.String()))\n\n\tassert.NilError(t, cmd.Wait())\n\n\treturn c\n}",
"func RunCMDWithInteractive(command string, args ...string) error {\n\tcmd := exec.Command(command, args...)\n\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdout = os.Stdout\n\n\treturn cmd.Run()\n}",
"func ShellInteractiveCommand(cmdStr []string) error {\n\tlog.Print(\"[VERBOSE] \" + cmdStr[0])\n\n\t//\n\t// Block SIGINT, at least.\n\t// Otherwise Ctrl-C meant for gdb would kill newt.\n\t//\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\tsignal.Notify(c, syscall.SIGTERM)\n\tgo func() {\n\t\t<-c\n\t}()\n\n\t// Transfer stdin, stdout, and stderr to the new process\n\t// and also set target directory for the shell to start in.\n\tpa := os.ProcAttr{\n\t\tFiles: []*os.File{os.Stdin, os.Stdout, os.Stderr},\n\t}\n\n\t// Start up a new shell.\n\tproc, err := os.StartProcess(cmdStr[0], cmdStr, &pa)\n\tif err != nil {\n\t\tsignal.Stop(c)\n\t\treturn NewNewtError(err.Error())\n\t}\n\n\t// Release and exit\n\t_, err = proc.Wait()\n\tif err != nil {\n\t\tsignal.Stop(c)\n\t\treturn NewNewtError(err.Error())\n\t}\n\tsignal.Stop(c)\n\treturn nil\n}",
"func ExecShellInteractive(command string) {\n\tcmd := exec.Command(\"sh\", \"-c\", command)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stdin = os.Stdin\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlogging.LogErrorf(\"Cannot run shell command. Error: %s \", err.Error())\n\t\tos.Exit(1)\n\t}\n}",
"func TestRunAttachStdin(t *testing.T) {\n\n\tstdin, stdinPipe := io.Pipe()\n\tstdout, stdoutPipe := io.Pipe()\n\n\tcli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)\n\tdefer cleanup(globalRuntime)\n\n\tch := make(chan struct{})\n\tgo func() {\n\t\tdefer close(ch)\n\t\tcli.CmdRun(\"-i\", \"-a\", \"stdin\", unitTestImageID, \"sh\", \"-c\", \"echo hello && cat\")\n\t}()\n\n\t// Send input to the command, close stdin\n\tsetTimeout(t, \"Write timed out\", 10*time.Second, func() {\n\t\tif _, err := stdinPipe.Write([]byte(\"hi there\\n\")); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := stdinPipe.Close(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t})\n\n\tcontainer := globalRuntime.List()[0]\n\n\t// Check output\n\tsetTimeout(t, \"Reading command output time out\", 10*time.Second, func() {\n\t\tcmdOutput, err := bufio.NewReader(stdout).ReadString('\\n')\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif cmdOutput != container.ShortID()+\"\\n\" {\n\t\t\tt.Fatalf(\"Wrong output: should be '%s', not '%s'\\n\", container.ShortID()+\"\\n\", cmdOutput)\n\t\t}\n\t})\n\n\t// wait for CmdRun to return\n\tsetTimeout(t, \"Waiting for CmdRun timed out\", 5*time.Second, func() {\n\t\t// Unblock hijack end\n\t\tstdout.Read([]byte{})\n\t\t<-ch\n\t})\n\n\tsetTimeout(t, \"Waiting for command to exit timed out\", 5*time.Second, func() {\n\t\tcontainer.Wait()\n\t})\n\n\t// Check logs\n\tif cmdLogs, err := container.ReadLog(\"stdout\"); err != nil {\n\t\tt.Fatal(err)\n\t} else {\n\t\tif output, err := ioutil.ReadAll(cmdLogs); err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else {\n\t\t\texpectedLog := \"hello\\nhi there\\n\"\n\t\t\tif string(output) != expectedLog {\n\t\t\t\tt.Fatalf(\"Unexpected logs: should be '%s', not '%s'\\n\", expectedLog, output)\n\t\t\t}\n\t\t}\n\t}\n}",
"func (c *GitCommand) PrepareInteractiveRebaseCommand(baseSha string, todo string, overrideEditor bool) (*exec.Cmd, error) {\n\tex := c.OSCommand.GetLazygitPath()\n\n\tdebug := \"FALSE\"\n\tif c.OSCommand.Config.GetDebug() {\n\t\tdebug = \"TRUE\"\n\t}\n\n\tsplitCmd := str.ToArgv(fmt.Sprintf(\"git rebase --interactive --autostash --keep-empty --rebase-merges %s\", baseSha))\n\n\tcmd := c.OSCommand.command(splitCmd[0], splitCmd[1:]...)\n\n\tgitSequenceEditor := ex\n\tif todo == \"\" {\n\t\tgitSequenceEditor = \"true\"\n\t}\n\n\tcmd.Env = os.Environ()\n\tcmd.Env = append(\n\t\tcmd.Env,\n\t\t\"LAZYGIT_CLIENT_COMMAND=INTERACTIVE_REBASE\",\n\t\t\"LAZYGIT_REBASE_TODO=\"+todo,\n\t\t\"DEBUG=\"+debug,\n\t\t\"LANG=en_US.UTF-8\", // Force using EN as language\n\t\t\"LC_ALL=en_US.UTF-8\", // Force using EN as language\n\t\t\"GIT_SEQUENCE_EDITOR=\"+gitSequenceEditor,\n\t)\n\n\tif overrideEditor {\n\t\tcmd.Env = append(cmd.Env, \"EDITOR=\"+ex)\n\t}\n\n\treturn cmd, nil\n}",
"func ExecCommandIntoContainerWithEnv(ctx context.Context, container string, user string, cmd []string, env []string) (string, error) {\n\tdockerClient := getDockerClient()\n\tdefer dockerClient.Close()\n\n\tdetach := false\n\ttty := false\n\n\tcontainerName := container\n\n\tlog.WithFields(log.Fields{\n\t\t\"container\": containerName,\n\t\t\"command\": cmd,\n\t\t\"detach\": detach,\n\t\t\"env\": env,\n\t\t\"tty\": tty,\n\t}).Trace(\"Creating command to be executed in container\")\n\n\tresponse, err := dockerClient.ContainerExecCreate(\n\t\tctx, containerName, types.ExecConfig{\n\t\t\tUser: user,\n\t\t\tTty: tty,\n\t\t\tAttachStdin: false,\n\t\t\tAttachStderr: true,\n\t\t\tAttachStdout: true,\n\t\t\tDetach: detach,\n\t\t\tCmd: cmd,\n\t\t\tEnv: env,\n\t\t})\n\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"container\": containerName,\n\t\t\t\"command\": cmd,\n\t\t\t\"env\": env,\n\t\t\t\"error\": err,\n\t\t\t\"detach\": detach,\n\t\t\t\"tty\": tty,\n\t\t}).Warn(\"Could not create command in container\")\n\t\treturn \"\", err\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"container\": containerName,\n\t\t\"command\": cmd,\n\t\t\"detach\": detach,\n\t\t\"env\": env,\n\t\t\"tty\": tty,\n\t}).Trace(\"Command to be executed in container created\")\n\n\tresp, err := dockerClient.ContainerExecAttach(ctx, response.ID, types.ExecStartCheck{\n\t\tDetach: detach,\n\t\tTty: tty,\n\t})\n\tdefer resp.Close()\n\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"container\": containerName,\n\t\t\t\"command\": cmd,\n\t\t\t\"detach\": detach,\n\t\t\t\"env\": env,\n\t\t\t\"error\": err,\n\t\t\t\"tty\": tty,\n\t\t}).Error(\"Could not execute command in container\")\n\t\treturn \"\", err\n\t}\n\n\t// see https://stackoverflow.com/a/57132902\n\tvar execRes execResult\n\n\t// read the output\n\tvar outBuf, errBuf bytes.Buffer\n\toutputDone := make(chan error)\n\n\tgo func() {\n\t\t// StdCopy demultiplexes the stream into two buffers\n\t\t_, err = stdcopy.StdCopy(&outBuf, &errBuf, resp.Reader)\n\t\toutputDone <- err\n\t}()\n\n\tselect {\n\tcase err := <-outputDone:\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tbreak\n\n\tcase <-ctx.Done():\n\t\treturn \"\", ctx.Err()\n\t}\n\n\tstdout, err := ioutil.ReadAll(&outBuf)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"container\": containerName,\n\t\t\t\"command\": cmd,\n\t\t\t\"detach\": detach,\n\t\t\t\"env\": env,\n\t\t\t\"error\": err,\n\t\t\t\"tty\": tty,\n\t\t}).Error(\"Could not parse stdout from container\")\n\t\treturn \"\", err\n\t}\n\tstderr, err := ioutil.ReadAll(&errBuf)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"container\": containerName,\n\t\t\t\"command\": cmd,\n\t\t\t\"detach\": detach,\n\t\t\t\"env\": env,\n\t\t\t\"error\": err,\n\t\t\t\"tty\": tty,\n\t\t}).Error(\"Could not parse stderr from container\")\n\t\treturn \"\", err\n\t}\n\n\texecRes.ExitCode = 0\n\texecRes.StdOut = string(stdout)\n\texecRes.StdErr = string(stderr)\n\n\t// remove '\\n' from the response\n\treturn strings.ReplaceAll(execRes.StdOut, \"\\n\", \"\"), nil\n}",
"func (drv Driver) RunCommand(args []string) error {\n\treturn drv.SSHClient.RunCommand(args)\n}",
"func (m *nativeGitClient) runCredentialedCmd(args ...string) error {\n\tcloser, environ, err := m.creds.Environ()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { _ = closer.Close() }()\n\n\t// If a basic auth header is explicitly set, tell Git to send it to the\n\t// server to force use of basic auth instead of negotiating the auth scheme\n\tfor _, e := range environ {\n\t\tif strings.HasPrefix(e, fmt.Sprintf(\"%s=\", forceBasicAuthHeaderEnv)) {\n\t\t\targs = append([]string{\"--config-env\", fmt.Sprintf(\"http.extraHeader=%s\", forceBasicAuthHeaderEnv)}, args...)\n\t\t}\n\t}\n\n\tcmd := exec.Command(\"git\", args...)\n\tcmd.Env = append(cmd.Env, environ...)\n\t_, err = m.runCmdOutput(cmd, runOpts{})\n\treturn err\n}",
"func NewSubprocessTester(dir string, env []string, localOutputDir, nsjailPath, nsjailRoot string) (Tester, error) {\n\ts := &SubprocessTester{\n\t\tdir: dir,\n\t\tenv: env,\n\t\tlocalOutputDir: localOutputDir,\n\t}\n\t// If the caller provided a path to NsJail, then intialize sandboxing properties.\n\tif nsjailPath != \"\" {\n\t\ts.sProps = &sandboxingProps{\n\t\t\tnsjailPath: nsjailPath,\n\t\t\tnsjailRoot: nsjailRoot,\n\t\t\t// TODO(rudymathu): Remove this once ssh/ssh-keygen usage is removed.\n\t\t\tmountUserHome: true,\n\t\t}\n\n\t\tif _, err := os.Stat(\"/sys/class/net/qemu/\"); err == nil {\n\t\t\ts.sProps.mountQEMU = true\n\t\t} else if !errors.Is(err, os.ErrNotExist) {\n\t\t\treturn &SubprocessTester{}, nil\n\t\t}\n\n\t\tcwd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\treturn &SubprocessTester{}, err\n\t\t}\n\t\ts.sProps.cwd = cwd\n\t}\n\treturn s, nil\n}",
"func (o ExecClient) ExecuteCommand(ctx context.Context, command []string, podName string, containerName string, directRun bool, stdoutWriter *io.PipeWriter, stderrWriter *io.PipeWriter) (stdout []string, stderr []string, err error) {\n\tif !directRun {\n\t\tsoutReader, soutWriter := io.Pipe()\n\t\tserrReader, serrWriter := io.Pipe()\n\n\t\tklog.V(2).Infof(\"Executing command %v for pod: %v in container: %v\", command, podName, containerName)\n\n\t\t// Read stdout and stderr, store their output in cmdOutput, and also pass output to consoleOutput Writers (if non-nil)\n\t\tstdoutCompleteChannel := startReaderGoroutine(os.Stdout, soutReader, directRun, &stdout, stdoutWriter)\n\t\tstderrCompleteChannel := startReaderGoroutine(os.Stderr, serrReader, directRun, &stderr, stderrWriter)\n\n\t\terr = o.platformClient.ExecCMDInContainer(ctx, containerName, podName, command, soutWriter, serrWriter, nil, false)\n\n\t\t// Block until we have received all the container output from each stream\n\t\t_ = soutWriter.Close()\n\t\t<-stdoutCompleteChannel\n\t\t_ = serrWriter.Close()\n\t\t<-stderrCompleteChannel\n\n\t\t// Details are displayed only if no outputs are displayed\n\t\tif err != nil && !directRun {\n\t\t\t// It is safe to read from stdout and stderr here, as the goroutines are guaranteed to have terminated at this point.\n\t\t\tklog.V(2).Infof(\"ExecuteCommand returned an an err: %v. for command '%v'\\nstdout: %v\\nstderr: %v\",\n\t\t\t\terr, command, stdout, stderr)\n\n\t\t\tmsg := fmt.Sprintf(\"unable to exec command %v\", command)\n\t\t\tif len(stdout) != 0 {\n\t\t\t\tmsg += fmt.Sprintf(\"\\n=== stdout===\\n%s\", strings.Join(stdout, \"\\n\"))\n\t\t\t}\n\t\t\tif len(stderr) != 0 {\n\t\t\t\tmsg += fmt.Sprintf(\"\\n=== stderr===\\n%s\", strings.Join(stderr, \"\\n\"))\n\t\t\t}\n\t\t\treturn stdout, stderr, fmt.Errorf(\"%s: %w\", msg, err)\n\t\t}\n\n\t\treturn stdout, stderr, err\n\t}\n\n\ttty := setupTTY()\n\n\tfn := func() error {\n\t\treturn o.platformClient.ExecCMDInContainer(ctx, containerName, podName, command, tty.Out, os.Stderr, tty.In, tty.Raw)\n\t}\n\n\treturn nil, nil, tty.Safe(fn)\n}",
"func RunCommand(t *testing.T, name string, args []string, wd string, opts *ProgramTestOptions) error {\n\tpath := args[0]\n\tcommand := strings.Join(args, \" \")\n\tt.Logf(\"**** Invoke '%v' in '%v'\", command, wd)\n\n\tenv := os.Environ()\n\tif opts.Env != nil {\n\t\tenv = append(env, opts.Env...)\n\t}\n\tenv = append(env, \"PULUMI_DEBUG_COMMANDS=true\")\n\tenv = append(env, \"PULUMI_RETAIN_CHECKPOINTS=true\")\n\tenv = append(env, \"PULUMI_CONFIG_PASSPHRASE=correct horse battery staple\")\n\n\tcmd := exec.Cmd{\n\t\tPath: path,\n\t\tDir: wd,\n\t\tArgs: args,\n\t\tEnv: env,\n\t}\n\n\tstartTime := time.Now()\n\n\tvar runout []byte\n\tvar runerr error\n\tif opts.Verbose || os.Getenv(\"PULUMI_VERBOSE_TEST\") != \"\" {\n\t\tcmd.Stdout = opts.Stdout\n\t\tcmd.Stderr = opts.Stderr\n\t\trunerr = cmd.Run()\n\t} else {\n\t\trunout, runerr = cmd.CombinedOutput()\n\t}\n\n\tendTime := time.Now()\n\n\tif opts.ReportStats != nil {\n\t\t// Note: This data is archived and used by external analytics tools. Take care if changing the schema or format\n\t\t// of this data.\n\t\topts.ReportStats.ReportCommand(TestCommandStats{\n\t\t\tStartTime: startTime.Format(\"2006/01/02 15:04:05\"),\n\t\t\tEndTime: endTime.Format(\"2006/01/02 15:04:05\"),\n\t\t\tElapsedSeconds: float64((endTime.Sub(startTime)).Nanoseconds()) / 1000000000,\n\t\t\tStepName: name,\n\t\t\tCommandLine: command,\n\t\t\tStackName: string(opts.GetStackName()),\n\t\t\tTestID: wd,\n\t\t\tTestName: filepath.Base(opts.Dir),\n\t\t\tIsError: runerr != nil,\n\t\t\tCloudURL: opts.CloudURL,\n\t\t})\n\t}\n\n\tif runerr != nil {\n\t\tt.Logf(\"Invoke '%v' failed: %s\\n\", command, cmdutil.DetailedError(runerr))\n\n\t\tif !opts.Verbose {\n\t\t\tstderr := opts.Stderr\n\n\t\t\tif stderr == nil {\n\t\t\t\tstderr = os.Stderr\n\t\t\t}\n\n\t\t\t// Make sure we write the output in case of a failure to stderr so\n\t\t\t// tests can assert the shape of the error message.\n\t\t\t_, _ = fmt.Fprintf(stderr, \"%s\\n\", string(runout))\n\t\t}\n\t}\n\n\t// If we collected any program output, write it to a log file -- success or failure.\n\tif len(runout) > 0 {\n\t\tif logFile, err := writeCommandOutput(name, wd, runout); err != nil {\n\t\t\tt.Logf(\"Failed to write output: %v\", err)\n\t\t} else {\n\t\t\tt.Logf(\"Wrote output to %s\", logFile)\n\t\t}\n\t} else {\n\t\tt.Log(\"Command completed without output\")\n\t}\n\n\treturn runerr\n}",
"func ExecuteCliTestsStrategy(t *testing.T, cliTests []CliTests) {\n\tfor _, tst := range cliTests {\n\t\tt.Run(tst.title, func(t *testing.T) {\n\t\t\t// https://github.com/smartystreets/goconvey/wiki#your-first-goconvey-test\n\t\t\t// Only pass t into top-level Convey calls\n\t\t\tConvey(tst.title, t, func() {\n\n\t\t\t\t// Actual clone command to be tested\n\t\t\t\texitCode, errors := executeGitClone(tst.input.url, tst.input.privateKeyPath, tst.input.forceClone)\n\n\t\t\t\tConvey(fmt.Sprintf(\"The exit code should be %d\", tst.exitCode), func() {\n\t\t\t\t\tSo(exitCode, ShouldEqual, tst.exitCode)\n\t\t\t\t})\n\n\t\t\t\t// Test exact error message cases\n\t\t\t\tif len(tst.exactErrorMessages) > 0 {\n\t\t\t\t\tConvey(fmt.Sprintf(\"With %d error message\", len(tst.exactErrorMessages)), func() {\n\t\t\t\t\t\tSo(len(errors), ShouldEqual, len(tst.exactErrorMessages))\n\t\t\t\t\t\tfor _, errorMsg := range errors {\n\t\t\t\t\t\t\tSo(tst.exactErrorMessages[errorMsg.Error()], ShouldBeTrue)\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t\t\t}\n\n\t\t\t\t// Test whether the error message contains strings\n\t\t\t\tif len(tst.errorMessageContains) > 0 {\n\t\t\t\t\tConvey(fmt.Sprintf(\"With %d error message\", len(tst.errorMessageContains)), func() {\n\t\t\t\t\t\tfor _, errorMessageToken := range tst.errorMessageContains {\n\t\t\t\t\t\t\tSo(errors[0].Error(), ShouldContainSubstring, errorMessageToken)\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t}\n}",
"func RunPasteldWithInteractive(ctx context.Context, config *configs.Config, args ...string) (err error) {\n\tvar pastelDPath string\n\n\tif _, pastelDPath, _, _, err = checkPastelInstallPath(ctx, config, \"\"); err != nil {\n\t\treturn errNotFoundPastelPath\n\t}\n\n\tif !(config.Network == \"mainnet\" || config.Network == \"testnet\") {\n\t\treturn errNetworkModeInvalid\n\t}\n\n\targs = append(args, fmt.Sprintf(\"--datadir=%s\", config.WorkingDir))\n\n\tif config.Network == \"testnet\" {\n\t\targs = append(args, \"--testnet\")\n\t\treturn RunCMDWithInteractive(pastelDPath, args...)\n\t}\n\n\treturn RunCMDWithInteractive(pastelDPath, args...)\n}",
"func (to *TestOptions) RunTestCommand() error {\n\tcomponentName, err := getComponentName(to.componentContext)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar platformContext interface{}\n\tif pushtarget.IsPushTargetDocker() {\n\t\tplatformContext = nil\n\t} else {\n\t\tkc := kubernetes.KubernetesContext{\n\t\t\tNamespace: to.KClient.Namespace,\n\t\t}\n\t\tplatformContext = kc\n\t}\n\n\tdevfileHandler, err := adapters.NewComponentAdapter(componentName, to.componentContext, to.Application, to.devObj, platformContext)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn devfileHandler.Test(to.commandName, to.show)\n}",
"func testInfoInteractive() error {\n\trl, err := readline.New(\"\")\n\tif err != nil {\n\t\tlog.Fatalf(\"error initializing input from console: %v\", err)\n\t}\n\tdefer rl.Close()\n\n\tif testShortName == \"\" {\n\t\trl.SetPrompt(\"Short name of the test: \")\n\t\tshortNamePrompt, err := rl.Readline()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttestShortName = shortNamePrompt\n\t}\n\n\tif testDescription == \"\" {\n\t\trl.SetPrompt(\"Description: \")\n\t\tdescriptionPrompt, err := rl.Readline()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttestDescription = descriptionPrompt\n\t}\n\n\tif testUsername == \"\" {\n\t\trl.SetPrompt(\"Your username: \")\n\t\tusernamePrompt, err := rl.Readline()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttestUsername = usernamePrompt\n\t}\n\n\tif testType == \"\" {\n\t\trl.SetPrompt(\"Test type: \")\n\t\ttestTypePrompt, err := rl.Readline()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttestType = testTypePrompt\n\t}\n\n\tif testPolarimeter == 0 {\n\t\trl.SetPrompt(\"Polarimeter number: \")\n\t\ttestPolarimeterPrompt, err := rl.Readline()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tval, err := strconv.Atoi(testPolarimeterPrompt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttestPolarimeter = val\n\t}\n\n\treturn nil\n}",
"func RunCI(cmd *cobra.Command, args []string) {\n\tcommand := CommandWithStdout(\"bash\", \"-c\", dockerPullCommandLine)\n\terr := command.Run()\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(\"===== Build stage =====\")\n\tcommand = CommandWithStdout(\"authelia-scripts\", \"build\")\n\terr = command.Run()\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(\"===== Unit testing stage =====\")\n\tcommand = CommandWithStdout(\"authelia-scripts\", \"unittest\")\n\terr = command.Run()\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(\"===== End-to-end testing stage =====\")\n\tcommand = CommandWithStdout(\"authelia-scripts\", \"suites\", \"test\", \"--headless\", \"--only-forbidden\")\n\terr = command.Run()\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}",
"func (r *Remote) Interactive() error {\n\tgo func() {\n\t\tfor {\n\t\t\tdata, err := r.RecvN(1)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tfmt.Printf(\"%c\", data[0])\n\t\t}\n\t}()\n\n\tfor {\n\t\tvar line string\n\t\tfmt.Scanln(&line)\n\t\tif line == \"_quit\" {\n\t\t\tfmt.Println(\"Exiting...\")\n\t\t\treturn nil\n\t\t}\n\n\t\t_, err := r.SendLine([]byte(line))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}",
"func (r *fxRunner) run(ctx context.Context, command string, args ...string) error {\n\treturn r.sr.Run(ctx, r.constructCommand(command, args), subprocess.RunOptions{\n\t\t// Subcommands may run interactive logins, so give them access to stdin by default.\n\t\tStdin: os.Stdin,\n\t})\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
expectDescriptionSupplier returns a function intended to be used as description supplier when checking errors do not occur in ExpectString and SendLine. Note that the function returned is evaluated lazily, only in case an error occurs.
|
func expectDescriptionSupplier(ctx InteractiveContext, line string) func() string {
return func() string {
return fmt.Sprintf("error while sending or expecting line: \"%s\"\n"+
"=== output of command '%+q' read so far ===\n%v\n======================",
line,
ctx.Command,
ctx.buffer)
}
}
|
[
"func (m *Alert) SetDescription(value *string)() {\n m.description = value\n}",
"func crdDescriptionMock(\n\tspecDescriptor []olmv1alpha1.SpecDescriptor,\n\tstatusDescriptors []olmv1alpha1.StatusDescriptor,\n) olmv1alpha1.CRDDescription {\n\treturn olmv1alpha1.CRDDescription{\n\t\tName: fmt.Sprintf(\"%s.%s\", CRDKind, CRDName),\n\t\tDisplayName: CRDKind,\n\t\tDescription: \"mock-crd-description\",\n\t\tKind: CRDKind,\n\t\tVersion: CRDVersion,\n\t\tSpecDescriptors: specDescriptor,\n\t\tStatusDescriptors: statusDescriptors,\n\t}\n}",
"func (_Hotel_Interface *Hotel_InterfaceCaller) Description(opts *bind.CallOpts) (string, error) {\n\tvar (\n\t\tret0 = new(string)\n\t)\n\tout := ret0\n\terr := _Hotel_Interface.contract.Call(opts, out, \"description\")\n\treturn *ret0, err\n}",
"func ShouldPanicWithStr(t *testing.T, msg string, fn func()) {\n\tt.Helper()\n\tdefer func() {\n\t\tt.Helper()\n\t\tr := recover()\n\t\tif r == nil {\n\t\t\tt.Errorf(\"%sThe function %p should have panicked with %#v\",\n\t\t\t\tgetCallerInfo(), fn, msg)\n\t\t\treturn\n\t\t}\n\t\tgotStr, ok := r.(string)\n\t\tif !ok {\n\t\t\tgotErr, ok := r.(error)\n\t\t\tif !ok {\n\t\t\t\tt.Errorf(\"%sThe function paniced with not string/error: %#v\", getCallerInfo(), r)\n\t\t\t}\n\t\t\tgotStr = gotErr.Error()\n\t\t}\n\t\tif d := Diff(msg, gotStr); len(d) != 0 {\n\t\t\tt.Errorf(\"%sThe function %p panicked with the wrong message.\\n\"+\n\t\t\t\t\"Expected: %#v\\nReceived: %#v\\nDiff:%s\",\n\t\t\t\tgetCallerInfo(), fn, msg, gotStr, d)\n\t\t}\n\t}()\n\tfn()\n}",
"func getFailureDesc(line string) (string, error) {\n\n\t// See the following part of our control specification:\n\t// https://gitweb.torproject.org/torspec.git/tree/control-spec.txt?id=1ecf3f66586816fc718e38f8cd7cbb23fa9b81f5#n2472\n\tvar reasons = map[string]string{\n\t\t\"DONE\": \"The OR connection has shut down cleanly.\",\n\t\t\"CONNECTREFUSED\": \"We got an ECONNREFUSED while connecting to the target OR.\",\n\t\t\"IDENTITY\": \"We connected to the OR, but found that its identity was not what we expected.\",\n\t\t\"CONNECTRESET\": \"We got an ECONNRESET or similar IO error from the connection with the OR.\",\n\t\t\"TIMEOUT\": \"We got an ETIMEOUT or similar IO error from the connection with the OR, or we're closing the connection for being idle for too long.\",\n\t\t\"NOROUTE\": \"We got an ENOTCONN, ENETUNREACH, ENETDOWN, EHOSTUNREACH, or similar error while connecting to the OR.\",\n\t\t\"IOERROR\": \"We got some other IO error on our connection to the OR.\",\n\t\t\"RESOURCELIMIT\": \"We don't have enough operating system resources (file descriptors, buffers, etc) to connect to the OR.\",\n\t\t\"PT_MISSING\": \"No pluggable transport was available.\",\n\t\t\"MISC\": \"The OR connection closed for some other reason.\",\n\t}\n\n\tmatches := OrConnReasonField.FindStringSubmatch(line)\n\texpectedMatches := 2\n\tif len(matches) != expectedMatches {\n\t\treturn \"\", fmt.Errorf(\"expected %d but got %d matches\", expectedMatches, len(matches))\n\t}\n\n\tdesc, exists := reasons[matches[1]]\n\tif !exists {\n\t\treturn \"\", fmt.Errorf(\"could not find reason for %q\", matches[1])\n\t}\n\n\treturn desc, nil\n}",
"func (m *Channel) SetDescription(value *string)() {\n m.description = value\n}",
"func SanitizeDescription(s string) string {\n\ts = ConvertMultiLineText(s)\n\ts = EscapeIllegalCharacters(s)\n\n\treturn s\n}",
"func RandomDescription() string {\n\treturn createRandomDescription(30)\n}",
"func RPCErrDesc(err error) error {\n\tdesc := strings.Split(err.Error(), \"desc = \")\n\treturn errors.New(desc[1])\n}",
"func (m *WindowsDefenderApplicationControlSupplementalPolicy) SetDescription(value *string)() {\n err := m.GetBackingStore().Set(\"description\", value)\n if err != nil {\n panic(err)\n }\n}",
"func sendGenerator(t *testing.T, err error, expectedID string,\n\texpectedMsg myhttp.GuestMessage) func(string, myhttp.GuestMessage) error {\n\treturn func(guestID string, msg myhttp.GuestMessage) error {\n\t\ttest.Equals(t, expectedID, guestID)\n\t\ttest.Equals(t, expectedMsg, msg)\n\t\treturn err\n\t}\n}",
"func RandomDescription() string {\n\treturn RandomString(3)\n}",
"func (m *DirectoryObjectPartnerReference) SetDescription(value *string)() {\n m.description = value\n}",
"func (m *Alert) GetDescription()(*string) {\n return m.description\n}",
"func (*Expr_Throw) Descriptor() ([]byte, []int) {\n\treturn file_com_daml_daml_lf_1_15_daml_lf_1_proto_rawDescGZIP(), []int{16, 20}\n}",
"func (f *Fallback) Description() string {\n\treturn \"Fallback do the fallback.\"\n}",
"func (e *Error) Wrap(desc string) error { return Wrap(e, desc) }",
"func (m memoizedExpression) ExpressionDescription(mode DescriptionMode) string {\n\treturn m.Expression.ExpressionDescription(mode)\n}",
"func (o BotIntentOutput) Description() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BotIntent) *string { return v.Description }).(pulumi.StringPtrOutput)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetAuth returns SpotifyAuthenticator populates with OAuth token from session.
|
func (s SessionAuthentication) GetAuth(session *sessions.Session) (auth.Authenticator, error) {
token, err := auth.GetTokenFromSession(session)
if err != nil {
return nil, err
}
return Authenticator{Token: token}, nil
}
|
[
"func GetAuthSession() *AuthSession {\n\tif r == nil {\n\t\tr = NewKVS()\n\t}\n\treturn &AuthSession{\n\t\tr,\n\t}\n}",
"func GetAuth(secret string, customerName string, svc secretsmanageriface.SecretsManagerAPI) ([]byte, error) {\n\tsecretKey, err := awssecret.GetSecret(secret, svc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tauth, err := json.Marshal(&Authenticate{Username: secretKey.ID, Password: secretKey.Key, CustomerName: customerName})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn auth, nil\n}",
"func GetAuth(apiName string) (t *Auth, err error) {\n\n\t// no cached token get a new one\n\tapiTokenLock.Lock()\n\tdefer apiTokenLock.Unlock()\n\t// we obtained the lock make sure that someone else didn't generate the token\n\tct := getCachedAuth(apiName)\n\tif ct != nil {\n\t\treturn ct, nil\n\t}\n\n\t// create a new one\n\tvar auth Auth\n\t// cache the auth\n\tauth.clientName = apiName\n\tkeys := newKeys(auth.clientName)\n\tif keys == nil {\n\t\tlog.Println(\"Unable to get keys. Check that you have keys <api_name>.id and <api_name>.secret in $HOME/.blizzard\")\n\t\treturn nil, errors.New(ErrNoKeys)\n\t}\n\tauth.keys = *keys\n\tsetCachedAuth(&auth)\n\n\treturn &auth, nil\n}",
"func GetAuth(connectionString string) *model.Auth {\n\tauth := &model.Auth{}\n\taccountName, endpoint, authenticationToken := fetchCredentials(connectionString)\n\tauth.AccountName = accountName\n\tauth.Endpoint = endpoint\n\tauth.AuthenticationToken = authenticationToken\n\treturn auth\n}",
"func GetAuth(accessKey string, secretKey string) (auth Auth, err error) {\n\t// First try passed in credentials\n\tif accessKey != \"\" && secretKey != \"\" {\n\t\treturn Auth{accessKey, secretKey, \"\"}, nil\n\t}\n\n\t// Next try to get auth from the environment\n\tauth, err = SharedAuth()\n\tif err == nil {\n\t\t// Found auth, return\n\t\treturn\n\t}\n\n\t// Next try to get auth from the environment\n\tauth, err = EnvAuth()\n\tif err == nil {\n\t\t// Found auth, return\n\t\treturn\n\t}\n\n\t// Next try getting auth from the instance role\n\tcred, err := getInstanceCredentials()\n\tif err == nil {\n\t\t// Found auth, return\n\t\tauth.AccessKey = cred.AccessKeyId\n\t\tauth.SecretKey = cred.SecretAccessKey\n\t\tauth.Token = cred.Token\n\t\treturn\n\t}\n\terr = errors.New(\"No valid AWS authentication found\")\n\treturn\n}",
"func TokenAuth() *Auth {\n\tonce.Do(func() {\n\t\tsecret, ok := os.LookupEnv(\"APP_KEY\")\n\t\tif !ok || secret == \"\" {\n\t\t\tlog.Panic(\"App key is not set.\")\n\t\t}\n\n\t\tauth = &Auth{jwtauth.New(\"HS256\", []byte(secret), nil)}\n\t})\n\n\treturn auth\n}",
"func GetAccessToken() {\n\tform := url.Values{}\n\tform.Add(\"client_id\", ClientCredentials.ClientID)\n\tform.Add(\"client_secret\", ClientCredentials.ClientSecret)\n\tform.Add(\"grant_type\", \"authorization_code\")\n\tform.Add(\"code\", ClientCredentials.Code)\n\tform.Add(\"redirect_uri\", ClientCredentials.RedirectURI)\n\tresponse, err := http.PostForm(\"https://accounts.spotify.com/api/token\", form)\n\tif err != nil {\n\t\tpanic(\"--- Could not authenticate ---\")\n\t}\n\tdefer response.Body.Close()\n\tbuffer, _ := ioutil.ReadAll(response.Body)\n\tjson.Unmarshal(buffer, &authCredentials)\n}",
"func getAuthConfig() api.UserToken {\n\tid := viper.GetString(\"auth.local.id\")\n\ttoken := viper.GetString(\"auth.local.apitoken\")\n\tcreds := api.UserToken{}\n\tcreds.ID = id\n\tcreds.APIToken = token\n\treturn creds\n}",
"func GetAuth(ctx context.Context) *Auth {\n\tauth, ok := ctx.Value(keyAuth).(*Auth)\n\tif !ok {\n\t\treturn nil\n\t}\n\n\treturn auth\n}",
"func GetAuth(r *http.Request) *UserAuth {\n\treturn r.Context().Value(userAuthKey).(*UserAuth)\n}",
"func GetAuth(ctx *gin.Context) (*jwt.Auth, error) {\n\tauth, ok := ctx.MustGet(\"auth\").(*jwt.Auth)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Auth is not found\")\n\t}\n\n\treturn auth, nil\n}",
"func GetDefaultAuthenticator(clientID string, secretKey string) spotify.Authenticator {\n\tauth := spotify.NewAuthenticator(redirectURL, spotify.ScopePlaylistReadPrivate, spotify.ScopePlaylistModifyPrivate)\n\tauth.SetAuthInfo(clientID, secretKey)\n\treturn auth\n}",
"func (s *Session) GetAuthURL() string {\n\treturn s.AuthURL\n}",
"func (p *Provider) BeginAuth(state string) (goth.Session, error) {\n\tif p.AuthType == AuthTypeOAuth2 {\n\t\treturn p.BeginOAuth2(state)\n\t}\n\n\tif p.consumer == nil {\n\t\tp.initConsumer()\n\t}\n\n\tif p.Method == \"private\" {\n\t\taccessToken := &oauth.AccessToken{\n\t\t\tToken: p.ClientKey,\n\t\t\tSecret: p.Secret,\n\t\t}\n\t\tprivateSession := &Session{\n\t\t\tAuthURL: authorizeURL,\n\t\t\tRequestToken: nil,\n\t\t\tAccessToken: accessToken,\n\t\t\tAccessTokenExpires: time.Now().UTC().Add(87600 * time.Hour),\n\t\t}\n\t\treturn privateSession, nil\n\t}\n\trequestToken, url, err := p.consumer.GetRequestTokenAndUrl(p.CallbackURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsession := &Session{\n\t\tAuthURL: url,\n\t\tRequestToken: requestToken,\n\t}\n\treturn session, nil\n}",
"func Get(envName string) (*auth.Token, error) {\n\tpath := []string{\"micro\", \"auth\", envName}\n\taccessToken, _ := config.Get(append(path, \"token\")...)\n\n\trefreshToken, err := config.Get(append(path, \"refresh-token\")...)\n\tif err != nil {\n\t\t// Gracefully degrading here in case the user only has a temporary access token at hand.\n\t\t// The call will fail on the receiving end.\n\t\treturn &auth.Token{\n\t\t\tAccessToken: accessToken,\n\t\t}, nil\n\t}\n\n\t// See if the access token has expired\n\texpiry, _ := config.Get(append(path, \"expiry\")...)\n\tif len(expiry) == 0 {\n\t\treturn &auth.Token{\n\t\t\tAccessToken: accessToken,\n\t\t\tRefreshToken: refreshToken,\n\t\t}, nil\n\t}\n\texpiryInt, err := strconv.ParseInt(expiry, 10, 64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &auth.Token{\n\t\tAccessToken: accessToken,\n\t\tRefreshToken: refreshToken,\n\t\tExpiry: time.Unix(expiryInt, 0),\n\t}, nil\n}",
"func (s Service) Authenticate(authenticator auth.Authenticator) (music.Client, error) {\n\tspotifyAuth := auth.GetSpotifyAuthenticator(\"\")\n\toauthAuthenticator, ok := authenticator.(Authenticator)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"spotify: cannot process authentication\")\n\t}\n\tclient := spotifyAuth.NewClient(oauthAuthenticator.Token)\n\tspotifyClient := &Client{client: &client}\n\treturn spotifyClient, nil\n}",
"func GetAccessToken(code string, oauth SpotifyOauth) (Token, error) {\n\tvar err error\n\tparameters := url.Values{}\n\tparameters.Add(\"redirect_uri\", oauth.RedirectUri)\n\tparameters.Add(\"code\", code)\n\tparameters.Add(\"grant_type\", \"authorization_code\")\n\n\ttoken, err := sendAccessTokenRequest(parameters, oauth)\n\tif err == nil {\n\t\terr = saveTokenInfo(token, oauth)\n\t\tif err == nil {\n\t\t\treturn token, nil\n\t\t}\n\t}\n\treturn Token{}, err\n}",
"func (p ProxmoxClient) GetAuth(req *TicketRequest) (AuthInfo, error) {\n\tendpoint_url := \"/api2/json/access/ticket\"\n\tpayload, _ := query.Values(req)\n\tbody, err := p.PostContent(endpoint_url, payload)\n\n\tif err != nil {\n\t\treturn AuthInfo{}, err\n\t}\n\n\tvar auth AuthResponse\n\tjson.Unmarshal(body, &auth)\n\n\treturn auth.Data, nil\n}",
"func (p *Provider) AuthCallback(c buffalo.Context) auth.Response {\n\tres := c.Response()\n\treq := c.Request()\n\n\tresp := auth.Response{}\n\n\tdefer auth.Logout(res, req)\n\n\tmsg := auth.CheckSessionStore()\n\tif msg != \"\" {\n\t\tlog.WithContext(c).Errorf(\"got message from Google's CheckSessionStore() in AuthCallback ... %s\", msg)\n\t}\n\n\tvalue, err := auth.GetFromSession(ProviderName, req)\n\tif err != nil {\n\t\tresp.Error = err\n\t\treturn resp\n\t}\n\n\tsess, err := p.UnmarshalSession(value)\n\tif err != nil {\n\t\tresp.Error = err\n\t\treturn resp\n\t}\n\n\terr = auth.ValidateState(req, sess)\n\tif err != nil {\n\t\tresp.Error = err\n\t\treturn resp\n\t}\n\n\tuser, err := p.FetchUser(sess)\n\tif err == nil {\n\t\tauthUser := auth.User{\n\t\t\tFirstName: user.FirstName,\n\t\t\tLastName: user.LastName,\n\t\t\tEmail: user.Email,\n\t\t\tUserID: user.UserID,\n\t\t\tNickname: user.NickName,\n\t\t}\n\n\t\tresp.AuthUser = &authUser\n\n\t\t// user can be found with existing session data\n\t\treturn resp\n\t}\n\n\t// get new token and retry fetch\n\t_, err = sess.Authorize(p, req.URL.Query())\n\tif err != nil {\n\t\tresp.Error = err\n\t\treturn resp\n\t}\n\n\terr = auth.StoreInSession(ProviderName, sess.Marshal(), req, res)\n\n\tif err != nil {\n\t\tresp.Error = err\n\t\treturn resp\n\t}\n\n\tvar gu goth.User\n\tif gu, err = p.FetchUser(sess); err != nil {\n\t\tresp.Error = err\n\t\treturn resp\n\t}\n\n\tauthUser := auth.User{\n\t\tFirstName: gu.FirstName,\n\t\tLastName: gu.LastName,\n\t\tEmail: gu.Email,\n\t\tUserID: gu.UserID,\n\t\tNickname: gu.NickName,\n\t\tPhotoURL: gu.AvatarURL,\n\t}\n\n\tresp.AuthUser = &authUser\n\treturn resp\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Authenticate provides authenticator for the Spotify and return Client for Spotify.
|
func (s Service) Authenticate(authenticator auth.Authenticator) (music.Client, error) {
spotifyAuth := auth.GetSpotifyAuthenticator("")
oauthAuthenticator, ok := authenticator.(Authenticator)
if !ok {
return nil, fmt.Errorf("spotify: cannot process authentication")
}
client := spotifyAuth.NewClient(oauthAuthenticator.Token)
spotifyClient := &Client{client: &client}
return spotifyClient, nil
}
|
[
"func Authorize(s *Settings) *spotify.Client {\n\ttoken, err := retrieveToken(s)\n\tif err != nil {\n\t\tlog.Fatalf(\"error checking for existing token: %v\", err)\n\t}\n\tif token != nil {\n\n\t\tclient := auth.NewClient(token)\n\t\tlog.Println(\"Found existing token!\")\n\t\treturn &client\n\t}\n\tauth = spotify.NewAuthenticator(s.SpotifyRedirectURI, spotify.ScopePlaylistReadCollaborative)\n\thttp.Handle(\"/callback\", newAuthCompleter(s))\n\thttp.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Println(\"Got request for:\", r.URL.String())\n\t})\n\tgo http.ListenAndServe(\":8080\", nil)\n\turl := auth.AuthURL(state)\n\tfmt.Println(\"Please log in to Spotify by visiting the following page in your browser:\", url)\n\treturn <-ch\n}",
"func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error {\n\tversions := []*utils.Version{\n\t\t{ID: v2, Priority: 20, Suffix: \"/v2.0/\"},\n\t\t{ID: v3, Priority: 30, Suffix: \"/v3/\"},\n\t}\n\n\tchosen, endpoint, err := utils.ChooseVersion(client, versions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch chosen.ID {\n\tcase v2:\n\t\treturn v2auth(client, endpoint, options, gophercloud.EndpointOpts{})\n\tcase v3:\n\t\treturn v3auth(client, endpoint, &options, gophercloud.EndpointOpts{})\n\tdefault:\n\t\t// The switch statement must be out of date from the versions list.\n\t\treturn fmt.Errorf(\"Unrecognized identity version: %s\", chosen.ID)\n\t}\n}",
"func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {\n\tmount, ok := m[\"mount\"]\n\tif !ok {\n\t\tmount = \"kerberos\"\n\t}\n\tusername := m[\"username\"]\n\tif username == \"\" {\n\t\treturn nil, errors.New(`\"username\" is required`)\n\t}\n\tservice := m[\"service\"]\n\tif service == \"\" {\n\t\treturn nil, errors.New(`\"service\" is required`)\n\t}\n\trealm := m[\"realm\"]\n\tif realm == \"\" {\n\t\treturn nil, errors.New(`\"realm\" is required`)\n\t}\n\tkeytabPath := m[\"keytab_path\"]\n\tif keytabPath == \"\" {\n\t\treturn nil, errors.New(`\"keytab_path\" is required`)\n\t}\n\tkrb5ConfPath := m[\"krb5conf_path\"]\n\tif krb5ConfPath == \"\" {\n\t\treturn nil, errors.New(`\"krb5conf_path\" is required`)\n\t}\n\n\tloginCfg := &LoginCfg{\n\t\tUsername: username,\n\t\tService: service,\n\t\tRealm: realm,\n\t\tKeytabPath: keytabPath,\n\t\tKrb5ConfPath: krb5ConfPath,\n\t}\n\n\tauthHeaderVal, err := GetAuthHeaderVal(loginCfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\theaders := http.Header{}\n\theaders.Set(spnego.HTTPHeaderAuthRequest, authHeaderVal)\n\tc.SetHeaders(headers)\n\n\tpath := fmt.Sprintf(\"auth/%s/login\", mount)\n\n\tsecret, err := c.Logical().Write(path, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif secret == nil {\n\t\treturn nil, errors.New(\"empty response from credential provider\")\n\t}\n\treturn secret, nil\n}",
"func NewAuthenticatedClient(t *testing.T) *vim25.Client {\n\tu := URL()\n\tif u == nil {\n\t\tt.SkipNow()\n\t}\n\n\tsoapClient := soap.NewClient(u, true)\n\tvimClient, err := vim25.NewClient(context.Background(), soapClient)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treq := types.Login{\n\t\tThis: *vimClient.ServiceContent.SessionManager,\n\t}\n\n\treq.UserName = u.User.Username()\n\tif pw, ok := u.User.Password(); ok {\n\t\treq.Password = pw\n\t}\n\n\t_, err = methods.Login(context.Background(), vimClient, &req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treturn vimClient\n}",
"func (p *ServiceProvider) Spotify() SpotifyClient {\n\treturn p.spotify\n}",
"func Authenticate(clientInfo ClientInfo, verbose bool, trace TracingLevel) (*AuthenticatedClient, error) {\n\turl := \"https://api.astra.datastax.com/v2/authenticateServiceAccount\"\n\tbody, err := json.Marshal(clientInfo)\n\tif err != nil {\n\t\treturn &AuthenticatedClient{}, fmt.Errorf(\"unable to marshal JSON object with: %w\", err)\n\t}\n\treq, err := http.NewRequest(\"POST\", url, bytes.NewBuffer(body))\n\tif err != nil {\n\t\treturn &AuthenticatedClient{}, fmt.Errorf(\"failed creating request with: %w\", err)\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Accept\", \"application/json\")\n\tc := newHTTPClient()\n\tres, err := c.Do(req)\n\tif err != nil {\n\t\treturn &AuthenticatedClient{}, fmt.Errorf(\"failed listing databases with: %w\", err)\n\t}\n\tmaybeTrace(req, res, trace)\n\tdefer closeBody(res)\n\tif res.StatusCode != 200 {\n\t\treturn &AuthenticatedClient{}, readErrorFromResponse(res, 200)\n\t}\n\tvar tokenResponse TokenResponse\n\terr = json.NewDecoder(res.Body).Decode(&tokenResponse)\n\tif err != nil {\n\t\treturn &AuthenticatedClient{}, fmt.Errorf(\"unable to decode response with error: %w\", err)\n\t}\n\tif tokenResponse.Token == \"\" {\n\t\treturn &AuthenticatedClient{}, errors.New(\"empty token in token response\")\n\t}\n\treturn &AuthenticatedClient{\n\t\tclient: c,\n\t\ttoken: fmt.Sprintf(\"Bearer %s\", tokenResponse.Token),\n\t\tverbose: verbose,\n\t\ttrace: trace,\n\t}, nil\n}",
"func NewAuthenticator(credentials io.Reader) (*Authenticator, error) {\n\tcfg, err := clientFromCredentials(credentials)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating config from credentials: %w\", err)\n\t}\n\treturn &Authenticator{\n\t\tState: generateOauthState(),\n\t\tcfg: cfg,\n\t}, nil\n}",
"func (w *ServerInterfaceWrapper) Authenticate(ctx echo.Context) error {\n\tvar err error\n\n\tctx.Set(\"authorization.Scopes\", []string{\"user\", \"password\"})\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.Authenticate(ctx)\n\treturn err\n}",
"func NewAuthenticator(o oauth.Options) (a Authenticator, err error) {\n\tctx := context.Background()\n\tswitch o.ProviderName {\n\tcase azure.Name:\n\t\ta, err = azure.New(ctx, &o)\n\tcase gitlab.Name:\n\t\ta, err = gitlab.New(ctx, &o)\n\tcase github.Name:\n\t\ta, err = github.New(ctx, &o)\n\tcase google.Name:\n\t\ta, err = google.New(ctx, &o)\n\tcase oidc.Name:\n\t\ta, err = oidc.New(ctx, &o)\n\tcase okta.Name:\n\t\ta, err = okta.New(ctx, &o)\n\tcase onelogin.Name:\n\t\ta, err = onelogin.New(ctx, &o)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"identity: unknown provider: %s\", o.ProviderName)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn a, nil\n}",
"func (mp MockProvider) Authenticate(ctx context.Context, code string) (*sessions.State, error) {\n\treturn &mp.AuthenticateResponse, mp.AuthenticateError\n}",
"func (s *Server) Authenticate(r *http.Request) (*ClientCredentials, *TokenCredentials, error) {\n\tvar (\n\t\tc *ClientCredentials\n\t\tt *TokenCredentials\n\t\terr error\n\t)\n\n\trr, err := s.validate(r, false)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tc = rr.client\n\tt = rr.token\n\treturn c, t, nil\n}",
"func (c *APIClient) Authenticate(ctx context.Context) error {\n\treq := Request {\n\t\tMethod: \"PUT\",\n\t\tCTX: ctx,\n\t}\n\n\ttype authData struct {\n\t\tCredentials string `json:\"credentials,omitempty\"`\n\t\tAPIKey string `json:\"api_key,omitempty\"`\n\t}\n\n\t//Authenticate requests using username/password/realm chain\n\t//it slightly different from the APIAuth\n\n\tad := authData{}\n\n\tif c.cfg.APIKey != \"\" {\n\t\treq.Path = c.cfg.BasePath + \"/api_auth\"\n\t\tad.APIKey = c.cfg.APIKey\n\t} else {\n\t\tif c.cfg.BasicAuth.Username != \"\" || c.cfg.BasicAuth.Password != \"\" || c.cfg.BasicAuth.Realm != \"\" {\n\t\t\treq.Path = c.cfg.BasePath + \"/user_auth\"\n\n\t\t\thasher := md5.New()\n\t\t\thash := hasher.Sum([]byte(c.cfg.BasicAuth.Username + \":\" + c.cfg.BasicAuth.Password))\n\t\t\tad.Credentials = hex.EncodeToString(hash)\n\t\t} else {\n\t\t\treturn reportError(\"\")\n\t\t}\n\t}\n\n\tauth := RequestEnvelope{\n\t\tData: ad,\n\t}\n\n\tauthBody, jsonErr := json.Marshal(auth)\n\tif jsonErr != nil {\n\t\treturn jsonErr\n\t}\n\n\tpostBody, bodyErr := setBody(authBody, \"json\")\n\tif bodyErr != nil {\n\t\treturn bodyErr\n\t}\n\n\t// create path and map variables\n\treq.PostBody = postBody\n\treq.HeaderParams = make(map[string]string)\n\treq.QueryParams = url.Values{}\n\n\t// body params\n\t//localVarPostBody = &body\n\tr, err := c.common.client.prepareRequest(&req)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tauthResponse, err := c.cfg.HTTPClient.Do(r)\n\t//authResponse, err := c.callAPI(ctx, r)\n\tif err != nil{\n\t\treturn err\n\t}\n\tif authResponse != nil {\n\t\tdefer authResponse.Body.Close()\n\t}\n\n\tswitch authResponse.StatusCode {\n\tcase 201:\n\t\t//Succesfull authentication\n\t\tauthdata := AuthResponse{}\n\t\terr := readBody(authResponse, &authdata)\n\t\tif err != nil {\n\t\t\treturn NewError(\"BodyError\", \"\", err)\n\t\t}\n\n\t\ttoken = authdata.AuthToken\n\t\tgo authTokenExpire()\n\n\t\treturn nil\n\n\tdefault:\n\t\treturn NewError(\"AuthenticationError\", string(authResponse.StatusCode), nil)\n\t}\n\n\t//return nil\n}",
"func (cl *Client) Authenticate(scopes []string) error {\n\tif len(scopes) == 0 {\n\t\treturn fmt.Errorf(\"Must have at least one scope defined (ie `data:read` and/or `bucket:create`)\")\n\t}\n\n\tgrant_type := \"client_credentials\" // TODO: don't hard code this, but make it default\n\tu := cl.Path(\"/authentication/v1/authenticate\")\n\tvalues := url.Values{\n\t\t\"grant_type\": {grant_type},\n\t\t\"client_id\": {cl.clientId},\n\t\t\"client_secret\": {cl.clientSecret},\n\t\t\"scope\": {strings.Join(scopes[:], \" \")},\n\t}\n\n\tresp, err := cl.client.PostForm(u, values)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(body, &cl.jwt)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcl.jwt.SetExpiration()\n\n\tcl.scopes = scopes\n\treturn nil\n}",
"func (c *Controller) Authenticate(sid string, env *common.SessionEnv) (*common.ConnectResult, error) {\n\tc.metrics.Gauge(metricsRPCPending).Inc()\n\t<-c.sem\n\tdefer func() { c.sem <- struct{}{} }()\n\tc.metrics.Gauge(metricsRPCPending).Dec()\n\n\top := func() (interface{}, error) {\n\t\treturn c.client.Connect(\n\t\t\tnewContext(sid),\n\t\t\tprotocol.NewConnectMessage(env),\n\t\t)\n\t}\n\n\tc.metrics.Counter(metricsRPCCalls).Inc()\n\n\tresponse, err := c.retry(sid, op)\n\n\tif err != nil {\n\t\tc.metrics.Counter(metricsRPCFailures).Inc()\n\n\t\treturn nil, err\n\t}\n\n\tif r, ok := response.(*pb.ConnectionResponse); ok {\n\n\t\tc.log.WithField(\"sid\", sid).Debugf(\"Authenticate response: %v\", r)\n\n\t\treply, err := protocol.ParseConnectResponse(r)\n\n\t\treturn reply, err\n\t}\n\n\tc.metrics.Counter(metricsRPCFailures).Inc()\n\n\treturn nil, errors.New(\"Failed to deserialize connection response\")\n}",
"func NewSpotifyClient(authToken string) *SpotifyClient {\n\treturn &SpotifyClient{\n\t\tclient: &http.Client{\n\t\t\tTimeout: DefaultClientTimeout,\n\t\t},\n\t\ttoken: authToken,\n\t}\n}",
"func (c authenticator) Authenticate() (response oauthResponse, err error) {\n\tresp, err := http.PostForm(c.baseURL+\"/security/oauth/token\",\n\t\turl.Values{\n\t\t\t\"grant_type\": {\"password\"},\n\t\t\t\"client_id\": {c.creds.ClientID},\n\t\t\t\"client_secret\": {c.creds.ClientSecret},\n\t\t\t\"username\": {c.creds.Username},\n\t\t\t\"password\": {c.creds.Password},\n\t\t})\n\tif err != nil {\n\t\treturn oauthResponse{}, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn oauthResponse{}, errors.New(\"Prosper server error: \" + resp.Status)\n\t}\n\terr = json.NewDecoder(resp.Body).Decode(&response)\n\tif err != nil {\n\t\treturn oauthResponse{}, err\n\t}\n\treturn response, nil\n}",
"func GetDefaultAuthenticator(clientID string, secretKey string) spotify.Authenticator {\n\tauth := spotify.NewAuthenticator(redirectURL, spotify.ScopePlaylistReadPrivate, spotify.ScopePlaylistModifyPrivate)\n\tauth.SetAuthInfo(clientID, secretKey)\n\treturn auth\n}",
"func NewAuthenticator(creds ClientCredentials) ProsperAuthenticator {\n\treturn &authenticator{\n\t\tbaseURL: baseProsperURL,\n\t\tcreds: creds,\n\t}\n}",
"func (c *Config) Open(upstream *url.URL, serviceCerts *certs.ServiceCerts,\n\tlogger *zap.Logger, idTokenValidatorClient id_token.ValidateIdTokenServiceClient) (authenticator.Authenticator, error) {\n\n\t// create child logger, with added fields\n\tlogger = logger.With(\n\t\tzap.String(\"issuer\", c.Issuer),\n\t\tzap.String(\"client_id\", c.ClientID))\n\treturn NewAuthenticator(c.Issuer, c.ClientID, upstream, false, 1, serviceCerts, logger, idTokenValidatorClient)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
CurrentUsersPlaylists returns playlists from the Spotify API.
|
func (c Client) CurrentUsersPlaylists() (*spotify.SimplePlaylistPage, error) {
playlist, err := c.client.CurrentUsersPlaylists()
return playlist, err
}
|
[
"func (s *PlaylistsService) List(\n\tctx context.Context,\n\tuserID int,\n) (*PlaylistsListResp, *http.Response, error) {\n\tif userID == 0 {\n\t\tuserID = s.client.userID\n\t}\n\n\turi := fmt.Sprintf(\"users/%v/playlists/list\", userID)\n\treq, err := s.client.NewRequest(http.MethodGet, uri, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tplaylists := new(PlaylistsListResp)\n\tresp, err := s.client.Do(ctx, req, playlists)\n\treturn playlists, resp, err\n}",
"func (s *PlaylistsService) Get(\n\tctx context.Context,\n\tuserID int,\n\tkind int,\n) (*PlaylistsGetResp, *http.Response, error) {\n\tif userID == 0 {\n\t\tuserID = s.client.userID\n\t}\n\n\turi := fmt.Sprintf(\"users/%v/playlists/%v\", userID, kind)\n\treq, err := s.client.NewRequest(http.MethodGet, uri, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tplaylist := new(PlaylistsGetResp)\n\tresp, err := s.client.Do(ctx, req, playlist)\n\treturn playlist, resp, err\n}",
"func (f *FortniteAPI) Playlists(language Language) (*Playlists, error) {\n\tstream, err := f.fetch(\"https://fortnite-api.com/v1/playlists\", map[string]string{\n\t\t\"language\": string(language),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trespBytes, err := ioutil.ReadAll(stream)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar out = &Playlists{}\n\terr = json.Unmarshal(respBytes, out)\n\treturn out, err\n}",
"func (t *Tokens) SearchPlaylists(keywords string) (*models.SearchPlaylists, error) {\n\t/**\n\thttps://developer.spotify.com/web-api/search-item/\n\t*/\n\n\tquery := encodeQuery(keywords)\n\n\tendpoint := \"https://api.spotify.com/v1/search?q=\" + query + \"&type=playlist\"\n\n\tres, err := extensions.GetRequest(endpoint, t.AccessToken)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tplaylists := new(models.SearchPlaylists)\n\n\terr = json.Unmarshal(res, playlists)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn playlists, nil\n}",
"func getPlaylistMap() (map[string]spotify.ID, error) {\n\tplaylistMap := make(map[string]spotify.ID)\n\n\t// Unfortunately 50 is as high as Spotify will go, meaning that our\n\t// pagination is pretty much guaranteed to get degradedly slow ...\n\tlimit := 50\n\toffset := 0\n\n\topts := &spotify.Options{\n\t\tLimit: &limit,\n\t\tOffset: &offset,\n\t}\n\n\tlog.Infof(\"Building playlist map\")\n\n\tfor {\n\t\tpage, err := client.CurrentUsersPlaylistsOpt(opts)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Reached the end of pagination.\n\t\tif len(page.Playlists) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tfor _, playlist := range page.Playlists {\n\t\t\tplaylistMap[playlist.Name] = playlist.ID\n\t\t}\n\n\t\toffset += len(page.Playlists)\n\t}\n\n\tlog.Infof(\"Cached %v playlist(s)\", len(playlistMap))\n\treturn playlistMap, nil\n}",
"func ListWatchedReposForAuthenticatedUser(ctx context.Context, req *ListWatchedReposForAuthenticatedUserReq, opt ...requests.Option) (*ListWatchedReposForAuthenticatedUserResponse, error) {\n\topts := requests.BuildOptions(opt...)\n\tif req == nil {\n\t\treq = new(ListWatchedReposForAuthenticatedUserReq)\n\t}\n\tresp := &ListWatchedReposForAuthenticatedUserResponse{}\n\n\thttpReq, err := req.HTTPRequest(ctx, opt...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr, err := opts.HttpClient().Do(httpReq)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = resp.ReadResponse(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}",
"func GetChannelPlaylists(URL string) (links []Link, err error) {\n\tdoc, err := goquery.NewDocument(URL)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdoc.Find(\"a[href^='/playlist?list=']\").Each(func(_ int, s *goquery.Selection) {\n\t\tlinks = append(links, Link{\n\t\t\tName: s.Text(),\n\t\t\tURL: s.AttrOr(\"href\", \"\"),\n\t\t})\n\t})\n\n\treturn\n}",
"func GetMyWatchedRepos(ctx *context.APIContext) {\n\t// swagger:operation GET /user/subscriptions user userCurrentListSubscriptions\n\t// ---\n\t// summary: List repositories watched by the authenticated user\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: page\n\t// in: query\n\t// description: page number of results to return (1-based)\n\t// type: integer\n\t// - name: limit\n\t// in: query\n\t// description: page size of results\n\t// type: integer\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/RepositoryList\"\n\n\trepos, total, err := getWatchedRepos(ctx, ctx.Doer, true, utils.GetListOptions(ctx))\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"getWatchedRepos\", err)\n\t}\n\n\tctx.SetTotalCountHeader(total)\n\tctx.JSON(http.StatusOK, &repos)\n}",
"func (a *Client) GetUsersCurrent(params *GetUsersCurrentParams, authInfo runtime.ClientAuthInfoWriter) (*GetUsersCurrentOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetUsersCurrentParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"GetUsersCurrent\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/users/current\",\n\t\tProducesMediaTypes: []string{\"application/json\", \"text/plain\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetUsersCurrentReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetUsersCurrentOK), nil\n\n}",
"func (a *Client) GetUsersCurrentPermissions(params *GetUsersCurrentPermissionsParams, authInfo runtime.ClientAuthInfoWriter) (*GetUsersCurrentPermissionsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetUsersCurrentPermissionsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"GetUsersCurrentPermissions\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/users/current/permissions\",\n\t\tProducesMediaTypes: []string{\"application/json\", \"text/plain\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetUsersCurrentPermissionsReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetUsersCurrentPermissionsOK), nil\n\n}",
"func (s *ActivityService) ListWatched(ctx context.Context, user string, opt *ListOptions) ([]*Repository, *Response, error) {\n\tvar u string\n\tif user != \"\" {\n\t\tu = fmt.Sprintf(\"users/%v/subscriptions\", user)\n\t} else {\n\t\tu = \"user/subscriptions\"\n\t}\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar watched []*Repository\n\tresp, err := s.client.Do(ctx, req, &watched)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn watched, resp, nil\n}",
"func (a *app) currentlyPlayingAPI(w http.ResponseWriter, r *http.Request, id string) {\n\t// Get the access and refresh tokens from the database for\n\t// the given user.\n\t// If the tokens are empty we'll know that the user hasn't authorized\n\t// his/her account.\n\tat, rt := a.getTokens(id)\n\tif at == \"\" {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, newErrorAPI(http.StatusNotFound, \"not found\"))\n\t\treturn\n\t}\n\n\t// Get the currently playing object for the requested user id.\n\tcpo, err := a.getCurrentlyPlayingObject(id, at, rt)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, newErrorAPI(http.StatusInternalServerError, \"internal server error\"))\n\t\treturn\n\t}\n\n\t// This case means that the user isn't currently playing anything.\n\tif cpo == nil && err == nil {\n\t\tw.WriteHeader(http.StatusOK)\n\t\tfmt.Fprintf(w, newErrorAPI(http.StatusOK, \"user is not playing anything\"))\n\t\treturn\n\t}\n\n\tj, _ := json.Marshal(cpo)\n\tfmt.Fprintf(w, string(j))\n}",
"func (this *Manager) GetUserList(appId uint32) ([]string) {\n this.Ulocker.RLock()\n defer this.Ulocker.RUnlock()\n\n uls := make([]string, 0)\n for _, c := range this.Users {\n if c.AppId == appId {\n uls = append(uls, c.UserId)\n }\n }\n\n log.Printf(\"-DBUG- users count: %d\", len(this.Users))\n\n return uls\n}",
"func (t *Team) Players(c appengine.Context) ([]*User, error) {\n\n\tvar users []*User\n\tvar err error\n\tif users, err = UsersByIds(c, t.UserIds); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn users, err\n}",
"func (us *User) GetListOfActiveSessions() (SessionListSuccess, error){\n var (\n rawRequest *RawRequest\n response []byte\n err error\n getListOfActiveSessionsResponse SessionListSuccess\n\t )\n\n \n\n \n\n \n \n \n \n \n //API call\n rawRequest = NewRequest(\n us.config,\n \"get\",\n \"/service/application/user/authentication/v1.0/sessions\",\n nil,\n nil,\n nil)\n response, err = rawRequest.Execute()\n if err != nil {\n return SessionListSuccess{}, err\n\t }\n \n err = json.Unmarshal(response, &getListOfActiveSessionsResponse)\n if err != nil {\n return SessionListSuccess{}, common.NewFDKError(err.Error())\n }\n return getListOfActiveSessionsResponse, nil\n \n }",
"func GetWatchedRepos(ctx *context.APIContext) {\n\t// swagger:operation GET /users/{username}/subscriptions user userListSubscriptions\n\t// ---\n\t// summary: List the repositories watched by a user\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: username\n\t// type: string\n\t// in: path\n\t// description: username of the user\n\t// required: true\n\t// - name: page\n\t// in: query\n\t// description: page number of results to return (1-based)\n\t// type: integer\n\t// - name: limit\n\t// in: query\n\t// description: page size of results\n\t// type: integer\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/RepositoryList\"\n\n\tprivate := ctx.ContextUser.ID == ctx.Doer.ID\n\trepos, total, err := getWatchedRepos(ctx, ctx.ContextUser, private, utils.GetListOptions(ctx))\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"getWatchedRepos\", err)\n\t}\n\n\tctx.SetTotalCountHeader(total)\n\tctx.JSON(http.StatusOK, &repos)\n}",
"func GetFilteredUserLists(url string) ([]UserList, error) {\n\n\tuserLists := []UserList{}\n\n\tfor true {\n\t\tresp, err := AuthenticatedRequest(url)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar data UserListsResponse\n\n\t\t// Decode request response into UserListsResponse\n\t\terr = json.NewDecoder(resp.Body).Decode(&data)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tuserLists = append(userLists, data.Lists...)\n\n\t\t// Check if pagination provides a Next url to use\n\t\tnextURL := data.Pagination.Urls.Next\n\t\tif nextURL != \"\" {\n\t\t\tfmt.Println(data)\n\t\t\turl = nextURL\n\t\t} else {\n\t\t\t// Exit loop if not\n\t\t\tbreak\n\t\t}\n\n\t}\n\n\t// Return filtered results\n\treturn FilterNotifyUserLists(userLists), nil\n}",
"func getUserList(w http.ResponseWriter, r *http.Request, listID string) (*models.List, error){\n\tvar list models.List\n\terr := mgm.Coll(listModel).FindByID(listID, &list)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn nil, err\n\t}\n\n\tif list.Username != mux.Vars(r)[\"username\"] {\n\t\thttp.Error(w, \"forbidden resource\", http.StatusForbidden)\n\t\treturn nil, errors.New(\"forbidden resource\")\n\t}\n\treturn &list, nil\n}",
"func NewPlaylistsService(temperatureRepository base.TemperatureRepository,\n\tplaylistsRepository base.PlaylistsRepository) base.PlaylistsService {\n\treturn playlistsService{\n\t\ttemperatureRepository: temperatureRepository,\n\t\tplaylistsRepository: playlistsRepository,\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
PlayerDevices returns available devices for the playback.
|
func (c Client) PlayerDevices() ([]spotify.PlayerDevice, error) {
devices, err := c.client.PlayerDevices()
return devices, err
}
|
[
"func GetDevices() (d []Device, err error) {\n\tvar ds Devices\n\tt := getAccessToken()\n\n\tr := buildRequest(\"GET\", apiURLBase+\"me/player/devices\", nil, nil)\n\tr.Header.Add(\"Authorization\", \"Bearer \"+t)\n\n\terr = makeRequest(r, &ds)\n\n\treturn ds.Devices, err\n}",
"func Devices() ([]*DeviceInfo, error) {\n\treturn nil, errNotSupported\n}",
"func (c *Conn) Devices() ([]Device, error) {\n\tif c.closed {\n\t\treturn nil, nil\n\t} else if c.watchEnabled {\n\t\treturn nil, ErrWatchModeEnabled\n\t}\n\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tc.send(\"?DEVICES;\")\n\n\tfor {\n\t\tobj, err := c.next()\n\t\tif err != nil {\n\t\t\treturn nil, errUnexpected(err)\n\t\t}\n\n\t\tif devs, ok := obj.([]Device); ok {\n\t\t\treturn devs, nil\n\t\t}\n\t}\n}",
"func getDevices(cfg *config) ([]device, error) {\n\tsh := gosh.NewShell(nil)\n\tdefer sh.Cleanup()\n\n\toutput := sh.Cmd(\"adb\", \"devices\", \"-l\").Stdout()\n\n\treturn parseDevicesOutput(output, cfg)\n}",
"func (pa *PortAudio) Devices() (in *portaudio.DeviceInfo, out *portaudio.DeviceInfo) {\n\treturn pa.inDevice, pa.outDevice\n}",
"func (ow *OW) ListDevices() (devs []string, err error) {\n\tvar dir []string\n\tdir, err = ow.Dir(\"/\")\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, item := range dir {\n\t\tdev := DeviceRegex.FindString(item)\n\t\tif dev != \"\" {\n\t\t\tdevs = append(devs, dev)\n\t\t}\n\t}\n\treturn\n}",
"func (manager *DeviceManager) GetDevices(target *DeviceType) []*Device {\n\tdevices := []*Device{}\n\tfor deviceType, device := range manager.Devices {\n\t\tif deviceType.Core == target.Core && (target.Modifier == \"\" || deviceType.Modifier == target.Modifier) {\n\t\t\tdevices = append(devices, device)\n\t\t}\n\t}\n\treturn devices\n}",
"func (cp *handler[T]) Devices() []Device[T] {\n\tcp.mu.Lock()\n\tdefer cp.mu.Unlock()\n\n\treturn cp.devices\n}",
"func (b *BottlerocketBridge) AvailableDevices(ctx context.Context) ([]*pb.Device, error) {\n\treturn b.persister.AvailableDevices(ctx)\n}",
"func GetDevices(client *http.Client, endpoint string) ([]DeviceList, error) {\n\tret := []DeviceList{}\n\n\tcontents, err := issueCommand(client, endpoint, \"/devices\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := json.Unmarshal(contents, &ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n}",
"func GetDevices(clientID *string, secretID *string) []string {\n\tmapOfIDToName(clientID, secretID)\n\taccessAPI(clientID, secretID)\n\n\tdevices := []string{}\n\tdevs, err := gosmart.GetDevices(client, endpoint)\n\tcheck(err)\n\tfor _, d := range devs {\n\t\tdInfo, err := gosmart.GetDeviceInfo(client, endpoint, d.ID)\n\t\tcheck(err)\n\t\tif dInfo.Attributes[\"switch\"] == \"on\" {\n\t\t\tdevices = append(devices, d.ID+\"|\"+d.DisplayName+\"|on\")\n\t\t} else {\n\t\t\tdevices = append(devices, d.ID+\"|\"+d.DisplayName+\"|\")\n\t\t}\n\t\tfmt.Printf(\"ID: %s, Name: %q, Display Name: %q\\n\", d.ID, d.Name, d.DisplayName)\n\t}\n\treturn devices\n}",
"func GetDevices(token string) []Device {\n\t// curl --header 'Access-Token: <your_access_token_here>' \\\n\t// https://api.pushbullet.com/v2/devices\n\n\tpbURL := \"https://api.pushbullet.com\"\n\theaders := []header{\n\t\t{\"Access-Token\", token},\n\t}\n\n\tres, err := makeRequest(pbURL+\"/v2/devices\", \"GET\", headers, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer res.Body.Close()\n\n\tvar userDevices deviceResponse\n\tjson.NewDecoder(res.Body).Decode(&userDevices)\n\treturn userDevices.Devices\n}",
"func (m *ManagerImpl) GetDevices(podUID, containerName string) ResourceDeviceInstances {\n\treturn m.podDevices.getContainerDevices(podUID, containerName)\n}",
"func GetAllDevices() ([]MODELS.Device, error) {\r\n\tvar deviceData []MODELS.Device = make([]MODELS.Device, 0, 10)\r\n\r\n\trows, err := db.Query(\r\n\t\tfmt.Sprintf(\"SELECT * FROM %s\", DEVICE_TABLE),\r\n\t)\r\n\r\n\tif err != nil {\r\n\t\treturn nil, fmt.Errorf(\"GetAllDevices: %v\", err)\r\n\t}\r\n\tdefer rows.Close()\r\n\r\n\tfor rows.Next() {\r\n\t\tvar data MODELS.Device\r\n\t\terr := rows.Scan(\r\n\t\t\t&data.ID,\r\n\t\t\t&data.RoomId,\r\n\t\t\t&data.Name,\r\n\t\t\t&data.Parameters,\r\n\t\t\t&data.Status,\r\n\t\t\t&data.Type,\r\n\t\t)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, fmt.Errorf(\"GetAllDevices: %v\", err)\r\n\t\t}\r\n\t\tdeviceData = append(deviceData, data)\r\n\t}\r\n\r\n\treturn deviceData, nil\r\n}",
"func (tr *BLETransport) GetDevices() []BLEDevice {\n\ttr.peripheralsMutex.Lock()\n\tdefer tr.peripheralsMutex.Unlock()\n\n\tres := make([]BLEDevice, len(tr.connectedPeripherals))\n\n\ti := 0\n\tfor _, d := range tr.connectedPeripherals {\n\t\tres[i] = d.Device\n\t\ti++\n\t}\n\n\treturn res\n}",
"func (config *ContainerHookConfig) GetAllDevices() []*DeviceMapping {\n\treturn config.Devices[:]\n}",
"func (o *PluginSettings) GetDevicesOk() ([]PluginDevice, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Devices, true\n}",
"func GetFfmpegDevices(p ProberCommon) Devices {\n\tdevs := Devices{}\n\tdevs.Audios = parseFfmpegDeviceType(p, \"audio\")\n\tdevs.Videos = parseFfmpegDeviceType(p, \"video\")\n\treturn devs\n}",
"func (dm *devMapper) ListDevices() ([]dmDevice, error) {\n\tvar (\n\t\tdmName struct {\n\t\t\tDev uint64\n\t\t\tNext uint32\n\t\t}\n\t\tdevices []dmDevice\n\t)\n\n\t// TODO: Move command version numbers to central location, like the C libdevmapper does\n\tdmi := dmIoctl{Version: dmVersion{4, 0, 0}}\n\n\tbuf, err := dm.ioctl(DM_LIST_DEVICES, &dmi)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Reader spanning the dm ioctl reponse payload\n\tr := bytes.NewReader(buf[dmi.DataStart : dmi.DataStart+dmi.DataSize])\n\n\tfor {\n\t\tvar name []byte\n\n\t\tbinary.Read(r, nativeEndian, &dmName)\n\n\t\tif dmName.Next != 0 {\n\t\t\t// Make byte array large enough to hold the bytes up until next struct head\n\t\t\tname = make([]byte, int(dmName.Next)-binary.Size(dmName))\n\t\t} else {\n\t\t\t// Last device in list - consume all remaining bytes\n\t\t\tname = make([]byte, r.Len())\n\t\t}\n\n\t\tr.Read(name)\n\n\t\tdevices = append(devices, dmDevice{Dev: dmName.Dev, Name: string(bytes.TrimRight(name, \"\\x00\"))})\n\n\t\tif dmName.Next == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn devices, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
PlayerState returns current player state.
|
func (c Client) PlayerState() (*spotify.PlayerState, error) {
playerState, err := c.client.PlayerState()
return playerState, err
}
|
[
"func (p *Player) State() int {\n\n\treturn int(al.GetSourcei(p.source, al.SourceState))\n}",
"func GetPlayerState(opts *Options) (ps PlayerState, err error) {\n\tv, err := query.Values(opts)\n\n\tif err != nil {\n\t\treturn ps, err\n\t}\n\n\tt := getAccessToken()\n\n\tr := buildRequest(\"GET\", apiURLBase+\"me/player\", v, nil)\n\tr.Header.Add(\"Authorization\", \"Bearer \"+t)\n\n\terr = makeRequest(r, &ps)\n\n\treturn ps, err\n}",
"func (g *Game) State() uint {\n\tg.gameLock.Lock()\n\tdefer g.gameLock.Unlock()\n\treturn g.state\n}",
"func GetTestState() *GameState {\n\n\treturn &GameState{\n\t\tTurn: 1,\n\t\tRNG: rand.New(rand.NewSource(0)), // this may be useful as our test state will always generate the same sequence of things\n\t\tCreated: 1000,\n\t\tUpdated: 2000,\n\t\tIDCounter: 300,\n\t\tBoardWidth: 2,\n\t\tBoardHeight: 2,\n\t\tBoardSpaces: []BoardSpace{\n\t\t\t{ID: 0, SpaceType: 0, State: 0},\n\t\t\t{ID: 1, SpaceType: 0, State: 0},\n\t\t\t{ID: 2, SpaceType: 0, State: 0},\n\t\t\t{ID: 3, SpaceType: 0, State: 0},\n\t\t},\n\t\tPlayers: []PlayerState{\n\t\t\t{\n\t\t\t\tID: PlayerOneID,\n\t\t\t\tName: PlayerOneName,\n\t\t\t\tLocation: 0,\n\t\t\t\tFacing: 1,\n\t\t\t\tHealth: 1,\n\t\t\t\tHand: []CardState{\n\t\t\t\t\t{ID: 101, CardType: 0},\n\t\t\t\t\t{ID: 102, CardType: 0},\n\t\t\t\t\t{ID: 103, CardType: 0},\n\t\t\t\t\t{ID: 104, CardType: 0},\n\t\t\t\t\t{ID: 105, CardType: 0},\n\t\t\t\t},\n\t\t\t\tDeck: []CardState{\n\t\t\t\t\t{ID: 106, CardType: 0},\n\t\t\t\t\t{ID: 107, CardType: 0},\n\t\t\t\t\t{ID: 108, CardType: 0},\n\t\t\t\t\t{ID: 109, CardType: 0},\n\t\t\t\t\t{ID: 110, CardType: 0},\n\t\t\t\t},\n\t\t\t\tDiscard: []CardState{\n\t\t\t\t\t{ID: 111, CardType: 0},\n\t\t\t\t\t{ID: 112, CardType: 0},\n\t\t\t\t\t{ID: 113, CardType: 0},\n\t\t\t\t\t{ID: 114, CardType: 0},\n\t\t\t\t\t{ID: 115, CardType: 0},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tID: PlayerTwoID,\n\t\t\t\tName: PlayerTwoName,\n\t\t\t\tLocation: 3,\n\t\t\t\tFacing: 2,\n\t\t\t\tHealth: 2,\n\t\t\t\tHand: []CardState{\n\t\t\t\t\t{ID: 201, CardType: 0},\n\t\t\t\t\t{ID: 202, CardType: 0},\n\t\t\t\t\t{ID: 203, CardType: 0},\n\t\t\t\t\t{ID: 204, CardType: 0},\n\t\t\t\t\t{ID: 205, CardType: 0},\n\t\t\t\t},\n\t\t\t\tDeck: []CardState{\n\t\t\t\t\t{ID: 206, CardType: 0},\n\t\t\t\t\t{ID: 207, CardType: 0},\n\t\t\t\t\t{ID: 208, CardType: 0},\n\t\t\t\t\t{ID: 209, CardType: 0},\n\t\t\t\t\t{ID: 210, CardType: 0},\n\t\t\t\t},\n\t\t\t\tDiscard: []CardState{\n\t\t\t\t\t{ID: 211, CardType: 0},\n\t\t\t\t\t{ID: 212, CardType: 0},\n\t\t\t\t\t{ID: 213, CardType: 0},\n\t\t\t\t\t{ID: 214, CardType: 0},\n\t\t\t\t\t{ID: 215, CardType: 0},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tAnimations: [][]animations.AnimationAction{\n\t\t\t[]animations.AnimationAction{\n\t\t\t\tanimations.GetMoveForward(PlayerOneID),\n\t\t\t\tanimations.GetMoveBackward(PlayerTwoID),\n\t\t\t},\n\t\t\t[]animations.AnimationAction{\n\t\t\t\tanimations.GetMoveForward(PlayerTwoID),\n\t\t\t\tanimations.GetTurnClockwise90(PlayerOneID),\n\t\t\t},\n\t\t\t[]animations.AnimationAction{\n\t\t\t\tanimations.GetFireCanon(PlayerOneID),\n\t\t\t\tanimations.GetHitByCanon(PlayerTwoID),\n\t\t\t},\n\t\t},\n\t}\n}",
"func (b *Bot) State() map[string]interface{} {\n\treturn b.state\n}",
"func (ch Channel) GetState() int32 {\n\treturn int32(ch.State)\n}",
"func (c *Connection) GetState() State {\n\treturn c.state\n}",
"func (c *Connection) State() ConnectionState {\n\tc.mtx.Lock()\n\tdefer c.mtx.Unlock()\n\treturn c.state\n}",
"func (i *InterfaceState) GetState() uint32 {\n\tif i.State == \"admin-up\" {\n\t\treturn 1\n\t} else {\n\t\treturn 0\n\t}\n}",
"func (l *Life) BoardState() []bool {\n\treturn l.board\n}",
"func (s *SubState) PlayerIndex() boardgame.PlayerIndex {\n\treturn s.ref.PlayerIndex\n}",
"func (container *Container) State() ContainerState {\n\tcontainer.mutex.Lock()\n\tdefer container.mutex.Unlock()\n\treturn container.state\n}",
"func (i *InteractableObject) State() bool {\n\treturn i.state\n}",
"func (s *stateMachine) State() (*State, time.Time) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\treturn s.state, s.ts\n}",
"func (e *Event) GetState() uint32 {\n\treturn atomic.LoadUint32(&e.state)\n}",
"func (s Source) State() int32 {\n\treturn getSourcei(s, paramSourceState)\n}",
"func (m *SecureScoreControlStateUpdate) GetState()(*string) {\n return m.state\n}",
"func (gsm *GameStateManager) Get() GameState {\n\treturn gsm.currentState\n}",
"func (f *Frame) State() RunState {\n\treturn f.state\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TransferPlayback transfer playback to another player by a ID.
|
func (c Client) TransferPlayback(id spotify.ID, play bool) error {
err := c.client.TransferPlayback(id, play)
return err
}
|
[
"func (pb *Playback) PlaybackSet(val string, id string, accessToken string) (bool, error) {\n\t// change toggle_play_pause to togglePlayPause, next_track to skipToNextTrack and previous_track to skipToPreviousTrack\n\tif val == \"toggle_play_pause\" {\n\t\tval = \"togglePlayPause\"\n\t} else if val == \"next_track\" {\n\t\tval = \"skipToNextTrack\"\n\t} else if val == \"previous_track\" {\n\t\tval = \"skipToPreviousTrack\"\n\t}\n\n\turl := fmt.Sprintf(\"%s%s%s%s\", \"https://api.ws.sonos.com/control/api/v1/groups/\", id, \"/playback/\", val)\n\n\treq, err := http.NewRequest(\"POST\", url, nil)\n\tif err != nil {\n\t\tlog.Error(\"Error when setting playback: \", err)\n\t\treturn false, err\n\t}\n\treq.Header.Set(\"Authorization\", os.ExpandEnv(fmt.Sprintf(\"%s%s\", \"Bearer \", accessToken)))\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tlog.Debug(\"New request: \", req)\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tlog.Error(\"Error when DefaultClient.Do on PlaybackSet: \", err)\n\t\treturn false, err\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\tlog.Error(\"Bad HTTP return code \", resp.StatusCode)\n\t\treturn false, fmt.Errorf(\"%s%s\", \"Bad HTTP return code \", strconv.Itoa(resp.StatusCode))\n\t}\n\n\treturn true, nil\n}",
"func (_Dai *DaiTransactorSession) Transfer(dst common.Address, wad *big.Int) (*types.Transaction, error) {\n\treturn _Dai.Contract.Transfer(&_Dai.TransactOpts, dst, wad)\n}",
"func (_Dai *DaiTransactor) Transfer(opts *bind.TransactOpts, dst common.Address, wad *big.Int) (*types.Transaction, error) {\n\treturn _Dai.contract.Transact(opts, \"transfer\", dst, wad)\n}",
"func (_CardMinting *CardMintingTransactor) Transfer(opts *bind.TransactOpts, _to common.Address, _cardId *big.Int) (*types.Transaction, error) {\n\treturn _CardMinting.contract.Transact(opts, \"transfer\", _to, _cardId)\n}",
"func (c *Conn) MediaTogglePlayback(id int) error {\n\treturn c.doCommand(fmt.Sprintf(\"/api/1/vehicles/%d/command/media_toggle_playback\", id), nil)\n}",
"func StartPlayback(opts *PlayerOptions) error {\n\tv, err := query.Values(nil) // Don't pass anything here because if we do and we start playback with a large list URIs they will be put in the query string and give us an error\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\topts.DeviceID = \"\"\n\tj, err := json.Marshal(opts)\n\n\tif err != nil {\n\t\tlog.Fatal(\"fatal\", err)\n\t}\n\n\tb := bytes.NewBuffer(j)\n\n\tt := getAccessToken()\n\n\tr := buildRequest(\"PUT\", apiURLBase+\"me/player/play\", v, b)\n\tr.Header.Add(\"Authorization\", \"Bearer \"+t)\n\n\terr = makeRequest(r, nil)\n\t// TODO: Handle error here\n\n\treturn err\n}",
"func NewPlaybackAPI(a audio.Playback, factory websocket.ConnectionFactory) rest.API {\n\tp := playAPI{a, factory}\n\treturn rest.API(&p)\n}",
"func (ap *AbstractP2P) SetTransfer(src, dst string) {\n\tap.src = src\n\tap.dst = dst\n}",
"func (_CardOwnership *CardOwnershipTransactor) Transfer(opts *bind.TransactOpts, _to common.Address, _cardId *big.Int) (*types.Transaction, error) {\n\treturn _CardOwnership.contract.Transact(opts, \"transfer\", _to, _cardId)\n}",
"func (c Channel) TransferID() TransferID { return c.transferID }",
"func Transfer(done chan [3]string, sendURL string, URLData url.Values, raidaID int, t time.Time) {\n\tvar responseText [3]string\n\tstart := time.Now()\n\t//fmt.Printf(\"\\nSentUrl: %v\\nUrlData:%v\\r\\n\\r\\n\", sendURL, URLData)\n\tresponse, err := http.PostForm(sendURL, URLData)\n\tErrStop(20, err, t)\n\n\tdefer response.Body.Close()\n\tbody, _ := ioutil.ReadAll(response.Body)\n\n\tu, _ := url.Parse(sendURL)\n\tu.RawQuery = URLData.Encode()\n\tRequest := fmt.Sprintf(\" RAIDA %d: %v\", raidaID, u)\n\telapsed := time.Since(start)\n\telapsedString := fmt.Sprintf(\"%v\", elapsed)\n\tresponseText[0] = string(body)\n\tresponseText[1] = Request\n\tresponseText[2] = elapsedString\n\tdone <- responseText\n\n}",
"func Transfer(ctx context.Context, pair *KeyPair, id uint64, cluster []string) error {\n\tclient, err := getLeader(ctx, pair, cluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer client.Close()\n\tleader, err := client.Leader(ctx)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error getting leader\")\n\t}\n\tlog.Printf(\"transfer leadership from:%d to:%d\\n\", leader.ID, id)\n\t//return client.Transfer(ctx, id)\n\terr = client.Transfer(ctx, id)\n\tlog.Printf(\"transferred leadership from:%d to:%d\\n\", leader.ID, id)\n\treturn err\n}",
"func (_CanDelegate *CanDelegateTransactorSession) Transfer(to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _CanDelegate.Contract.Transfer(&_CanDelegate.TransactOpts, to, value)\n}",
"func (a *CallbackAction) setDestinationControlID(id string) {\n\tparts := strings.SplitN(id, \"_\", 2)\n\tif len(parts) == 2 {\n\t\ta.DestControlID = parts[0]\n\t\ta.SubID = parts[1]\n\t} else {\n\t\ta.DestControlID = id\n\t}\n}",
"func (_CanDelegate *CanDelegateTransactor) Transfer(opts *bind.TransactOpts, to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _CanDelegate.contract.Transact(opts, \"transfer\", to, value)\n}",
"func (_RepTok *RepTokTransactor) Transfer(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) {\n\treturn _RepTok.contract.Transact(opts, \"transfer\", _recipient, _amount)\n}",
"func (a *serverAction) DestinationControlID(id string) *serverAction {\n\ta.setDestinationControlID(id)\n\treturn a\n}",
"func (s *MatchSession) AddPlayerID(uuids ...uuid.UUID) {\nloop:\n\tfor _, v := range uuids {\n\t\tfor _, w := range s.PlayerIDs { // it's ugly, but we can't sort.\n\t\t\tif v == w {\n\t\t\t\tcontinue loop\n\t\t\t}\n\t\t}\n\n\t\ts.PlayerIDs = append(s.PlayerIDs, v)\n\t}\n}",
"func Transfer(done chan [3]string, sendURL string, URLData url.Values, raidaID int, t time.Time) {\n\tvar responseText [3]string\n\tstart := time.Now()\n\t//\tfmt.Printf(\"\\nSentUrl: %v\\nUrlData:%v\", sendURL, URLData)\n\tresponse, err := raidahttp.PostForm(sendURL, URLData)\n\tif err != nil {\n\t\tfmt.Println(\"request failed for sendURL\")\n\t\tresponseText[0] = \"{failed}\"\n\t\tRequest := fmt.Sprintf(\" RAIDA %d\")\n\t\telapsed := time.Since(start)\n\t\telapsedString := fmt.Sprintf(\"%v\", elapsed)\n\t\tresponseText[1] = Request\n\t\tresponseText[2] = elapsedString\n\t\tdone <- responseText\n\t\treturn\n\t}\n\t//\tdefer response.Body.Close()\n\tbody, _ := ioutil.ReadAll(response.Body)\n\n\tu, _ := url.Parse(sendURL)\n\tu.RawQuery = URLData.Encode()\n\tRequest := fmt.Sprintf(\" RAIDA %d: %v\", raidaID, u)\n\telapsed := time.Since(start)\n\telapsedString := fmt.Sprintf(\"%v\", elapsed)\n\tresponseText[0] = string(body)\n\tresponseText[1] = Request\n\tresponseText[2] = elapsedString\n\tdone <- responseText\n\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Search performs search within Spotify database.
|
func (c Client) Search(query string, t spotify.SearchType) (*spotify.SearchResult, error) {
searchResult, err := c.client.Search(query, t)
return searchResult, err
}
|
[
"func Search(index, query string) {\n\ta.Get(index + \"/_search?q=\" + query)\n}",
"func (t TwitterAPI) search(queryString string, v url.Values) (sr SearchResponse, err error) {\n\tv = CleanValues(v)\n\tv.Set(\"q\", queryString)\n\tresponseCh := make(chan Response)\n\tt.queryQueue <- Query{BaseURL + \"/search/tweets.json\", v, &sr, _GET, responseCh}\n\n\tresp := <-responseCh\n\terr = resp.err\n\treturn sr, err\n}",
"func Search(rw http.ResponseWriter, request *http.Request) {\n\n\tlog.Println(\"call Search\")\n\n\t// 検索文字列取得\n\tquery := request.URL.Query().Get(\"query\")\n\tlog.Println(\"クエリ:\", query)\n\n\t// ユーザ情報取得\n\tuser := getAccount(rw, request)\n\n\ttmpl := parseTemplate()\n\n\t// 商品検索\n\tmessage := \"\"\n\tdbm := db.ConnDB()\n\trows, err := dbm.Query(\"select number, name, image, price, stock from products where name like \\\"%\" + query + \"%\\\" order by name\")\n\tif err != nil {\n\t\t// 商品検索失敗した場合、商品検索画面にメッセージ表示\n\t\toutputErrorLog(\"商品 検索 失敗\", err)\n\t\tmessage = \"検索に失敗しました。\" + err.Error()\n\t\trenderSearch(tmpl, rw, request, user, query, message, nil)\n\t\treturn\n\t}\n\n\t// モデルに格納\n\tproducts := []models.Product{}\n\tfor rows.Next() {\n\t\tproduct := models.Product{}\n\t\tif err = rows.Scan(&product.Number, &product.Name, &product.Image, &product.Price, &product.Stock); err != nil {\n\t\t\t// 商品データの格納に失敗した場合、商品検索画面にメッセージ表示\n\t\t\toutputErrorLog(\"商品 格納 失敗\", err)\n\t\t\tmessage = \"検索に失敗しました。\" + err.Error()\n\t\t\trenderSearch(tmpl, rw, request, user, query, message, nil)\n\t\t\treturn\n\t\t}\n\t\tproducts = append(products, product)\n\t}\n\n\tif len(products) == 0 {\n\t\tmessage = \"検索結果なし\"\n\t}\n\tlog.Println(\"検索件数:\", len(products), \"件\")\n\n\t// 商品検索画面表示\n\trenderSearch(tmpl, rw, request, user, query, message, products)\n}",
"func (s *SpotifyMusicSource) SearchSong(song types.ScrapedSongData) (types.SongSearchResult, error) {\n\tfmt.Println(\"Searching Spotify for: \", song.Name)\n\ts.AttemptedSearches++\n\tqueryStrings := BuildSpotifyQueryStrings(song)\n\n\tfor i := 0; i < len(queryStrings); i++ {\n\t\tresults, err := s.client.Search(queryStrings[i], spotify.SearchTypeTrack)\n\n\t\tif err != nil {\n\t\t\treturn types.SongSearchResult{}, err\n\t\t}\n\n\t\tif len(results.Tracks.Tracks) == 0 {\n\t\t\t//Try the next query string\n\t\t\tcontinue\n\t\t}\n\n\t\t//TODO: Suggest human intervention for searches that return more than one track\n\t\ttrack := results.Tracks.Tracks[0]\n\n\t\t//The last two query strings are going to be attempts without artists, so we need to check the string distance\n\t\tif i < len(queryStrings)-2 {\n\t\t\ts.SuccessfulHits++\n\t\t\treturn types.SongSearchResult{\n\t\t\t\tSongId: song.Id,\n\t\t\t\tURI: string(track.URI),\n\t\t\t\tName: track.Name,\n\t\t\t\tSource: \"Spotify\",\n\t\t\t\tRelation: song.Relation,\n\t\t\t\tExternalUrl: track.ExternalURLs[\"spotify\"],\n\t\t\t}, nil\n\t\t} else if distance := levenshtein.ComputeDistance(song.Name, track.Name); distance < maximumStringDistance {\n\t\t\ts.SuccessfulHits++\n\t\t\treturn types.SongSearchResult{\n\t\t\t\tSongId: song.Id,\n\t\t\t\tURI: string(track.URI),\n\t\t\t\tName: track.Name,\n\t\t\t\tSource: \"Spotify\",\n\t\t\t\tRelation: song.Relation,\n\t\t\t\tExternalUrl: track.ExternalURLs[\"spotify\"],\n\t\t\t}, nil\n\t\t}\n\t}\n\n\tif len(song.Artists) > 0 {\n\t\treturn types.SongSearchResult{}, fmt.Errorf(\"Could not find name: %s artist: %s year: %s\", song.Name, song.Artists[0].Name, song.Year)\n\t} else {\n\t\treturn types.SongSearchResult{}, fmt.Errorf(\"Could not find name: %s year: %s\", song.Name, song.Year)\n\t}\n}",
"func Search(ctx context.Context, keyword string, start, limit int) (*SearchResult, *Response, error) {\n\tquery := make(url.Values)\n\tquery.Add(\"query\", keyword)\n\tquery.Add(\"start\", strconv.Itoa(start))\n\tquery.Add(\"limit\", strconv.Itoa(limit))\n\treq, err := DefaultClient.NewAPIRequest(\"/book/fuzzy-search\", query)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"cannot search\")\n\t}\n\trst := new(SearchResult)\n\tresp, err := DefaultClient.Do(ctx, req, rst)\n\tif err != nil {\n\t\treturn nil, resp, errors.Wrap(err, \"cannot search\")\n\t}\n\treturn rst, resp, nil\n}",
"func Search(w http.ResponseWriter, r *http.Request) {\n\ttmpl := shared.Template(r)\n\tdata := shared.DefaultData(r)\n\tif r.Method == \"POST\" {\n\n\t\tquery := r.PostFormValue(\"query\")\n\t\t//full text search by name & description. Btw you can extend search to multi-table scenario with rankings, etc\n\t\t//fts index and SearchPosts assume language is english\n\t\tposts, _ := models.SearchPosts(query)\n\t\tdata[\"Title\"] = fmt.Sprintf(\"%s %q\", \"Search results for\", query)\n\t\tdata[\"Posts\"] = posts\n\t\ttmpl.Lookup(\"search/results\").Execute(w, data)\n\n\t} else {\n\t\terr := fmt.Errorf(\"Method %q not allowed\", r.Method)\n\t\tlog.Printf(\"ERROR: %s\\n\", err)\n\t\tw.WriteHeader(405)\n\t\ttmpl.Lookup(\"errors/405\").Execute(w, shared.ErrorData(err))\n\t}\n}",
"func (s *SqlBackend) SearchArtists(query string) ([]Artist, error) {\n\treturn s.artistQuery(\"SELECT * FROM artists WHERE normalized_title LIKE ?;\",\n\t\t\"%\"+query+\"%\")\n}",
"func Search(query *SearchQuery) []*Listing {\n\tspacemarketListings := SearchSpacemarket(query)\n\tinstabaseListings := SearchInstabase(query)\n\n\tfmt.Printf(`\n\tgot %d listings for spacemarket\n\tgot %d listings for instabase\n\t`, len(spacemarketListings), len(instabaseListings))\n\n\tvar result []*Listing\n\n\tresult = append(result, spacemarketListings...)\n\tresult = append(result, instabaseListings...)\n\n\treturn result\n}",
"func (c *Client) Search(q string) (Result, error) {\n\tr := Result{}\n\td := url.Values{}\n\td.Set(\"action\", \"search\")\n\td.Set(\"q\", q)\n\n\terr := c.perform(d, &r)\n\tif err != nil {\n\t\treturn r, err\n\t}\n\n\treturn r, nil\n}",
"func (p *SearchProxyServiceClient) Search(query *SearchQuery, provider string) (r *SearchResult_, err error) {\n if err = p.sendSearch(query, provider); err != nil { return }\n return p.recvSearch()\n}",
"func (q *DataQuery) Search(query string) *DataQuery {\n\tq.params.Add(\"search\", query)\n\treturn q\n}",
"func (s *Server) Search(ctx context.Context, in *pb.SearchRequest) (out *pb.SearchReply, err error) {\n\tout = &pb.SearchReply{}\n\tquery := make(map[string]interface{})\n\tquery[\"name\"] = in.Name\n\tquery[\"country\"] = in.Country\n\n\tvar vasps []pb.VASP\n\tif vasps, err = s.db.Search(query); err != nil {\n\t\tout.Error = &pb.Error{\n\t\t\tCode: 400,\n\t\t\tMessage: err.Error(),\n\t\t}\n\t}\n\n\tout.Vasps = make([]*pb.VASP, len(vasps))\n\tfor i := 0; i < len(vasps); i++ {\n\t\t// avoid pointer errors from range\n\t\tout.Vasps[i] = &vasps[i]\n\n\t\t// return only entities, remove certificate info until lookup\n\t\tout.Vasps[i].VaspTRISACertification = nil\n\t}\n\n\tentry := log.With().\n\t\tStrs(\"name\", in.Name).\n\t\tStrs(\"country\", in.Country).\n\t\tInt(\"results\", len(out.Vasps)).\n\t\tLogger()\n\n\tif out.Error != nil {\n\t\tentry.Warn().Err(out.Error).Msg(\"unsuccessful search\")\n\t} else {\n\t\tentry.Info().Msg(\"search succeeded\")\n\t}\n\treturn out, nil\n}",
"func Search(window *glfw.Window, ctx *nk.Context, state *UIState) {\n\tstate.searchOnce.Do(func() {\n\t\tstate.isFetchingItems = true\n\t\tgo func() {\n\t\t\tdefer state.queue(func() {\n\t\t\t\tstate.isFetchingItems = false\n\t\t\t})\n\n\t\t\titems, err := session.ListItems()\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"list items: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlog.Printf(\"list items returned %d items\", len(items))\n\t\t\tstate.queue(func() {\n\t\t\t\tstate.items = items\n\t\t\t\tstate.searchResults = state.items[:]\n\t\t\t\tstate.statusText = fmt.Sprintf(\"%d results\", len(items))\n\t\t\t})\n\t\t}()\n\t})\n\n\twidth, height := window.GetSize()\n\tbounds := nk.NkRect(0, 0, float32(width), float32(height))\n\tif nk.NkBegin(ctx, \"search\", bounds, nk.WindowNoScrollbar) > 0 {\n\t\tregion := nk.NkWindowGetContentRegion(ctx)\n\n\t\tnk.NkLayoutSpaceBegin(ctx, nk.Static, 0, 3)\n\n\t\tbounds := nk.NkLayoutWidgetBounds(ctx)\n\n\t\t// Copy the current search query to check if it changed.\n\t\tsearchQuery := make([]byte, state.searchQueryLen)\n\t\tcopy(searchQuery, state.searchQuery)\n\n\t\tnk.NkLayoutSpacePush(ctx, nk.NkRect(0, 0, bounds.W(), bounds.H()))\n\n\t\tstate.tab(func() {\n\t\t\tnk.NkEditFocus(ctx, nk.EditField|nk.EditGotoEndOnActivate)\n\t\t})\n\t\tnk.NkEditString(\n\t\t\tctx,\n\t\t\tnk.EditField,\n\t\t\tstate.searchQuery,\n\t\t\t&state.searchQueryLen,\n\t\t\tbufSize,\n\t\t\tnk.NkFilterDefault,\n\t\t)\n\n\t\tif !bytes.Equal(searchQuery, state.searchQuery[:state.searchQueryLen]) {\n\t\t\tstate.selectedItem = nil\n\t\t\tif state.isFetchingItems {\n\n\t\t\t\t// Cancel the previous search.\n\t\t\t\tstate.searchCancel()\n\t\t\t}\n\n\t\t\tstate.isFetchingItems = true\n\t\t\tctx, cancel := context.WithCancel(context.Background())\n\t\t\tstate.searchCancel = cancel\n\n\t\t\tquery := string(state.searchQuery[:state.searchQueryLen])\n\t\t\tif query == \"\" {\n\t\t\t\tstate.searchResults = state.items[:]\n\t\t\t\tstate.isFetchingItems = false\n\t\t\t} else {\n\t\t\t\tgo func() {\n\t\t\t\t\tdefer state.queue(func() {\n\t\t\t\t\t\tselect {\n\t\t\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tstate.isFetchingItems = false\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\n\t\t\t\t\tresults, err := session.SearchItems(query)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Printf(\"search items: %v\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tstate.queue(func() {\n\t\t\t\t\t\tselect {\n\t\t\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tstate.searchResults = results\n\t\t\t\t\t\t\tstate.statusText = fmt.Sprintf(\"matched %d results\", len(state.searchResults))\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t\t\t}()\n\t\t\t}\n\t\t}\n\n\t\t// Search results item list.\n\t\tnk.NkLayoutSpacePush(ctx, nk.NkRect(0, bounds.H()+4, bounds.W(), region.H()-bounds.H()-20))\n\n\t\tstate.tab(func() {\n\t\t\tif len(state.searchResults) == 0 {\n\t\t\t\tstate.id--\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// TODO: Focus the results list and handle up down keys to navigate list.\n\t\t})\n\n\t\tnk.SetGroupPadding(ctx, nk.NkVec2(0, 0))\n\t\tif nk.NkGroupBegin(ctx, \"items\", nk.WindowScrollAutoHide) > 0 {\n\t\t\tfor _, item := range state.searchResults {\n\t\t\t\tsearchResultItem(window, ctx, state, item)\n\t\t\t}\n\t\t\tnk.NkGroupEnd(ctx)\n\t\t}\n\n\t\tnk.NkLayoutSpacePush(ctx, nk.NkRect(0, region.H()-28, bounds.W(), 28))\n\n\t\tStatusLine(window, ctx, state)\n\n\t\tnk.NkLayoutSpaceEnd(ctx)\n\n\t\tnk.NkEnd(ctx)\n\t}\n}",
"func (s *Storage) Search(param *string) []string {\n\tvar d crawler.Document\n\n\tres := make([]string, 0)\n\th := hash(strings.ToLower(*param))\n\tids := s.ind[h]\n\n\tfor _, id := range ids {\n\t\td = s.binarySearch(id, 0, len(s.docs))\n\t\tif d.ID != 0 {\n\t\t\tres = append(res, fmt.Sprintf(\"%d: %s (%s)\", d.ID, d.Title, d.URL))\n\t\t}\n\t}\n\n\treturn res\n}",
"func InstallSearch(app *fiber.App, route cli.RouteConfig, tsm sqlite3.TimeSeriesManager) {\n\tendPoint := fmt.Sprintf(\"%s/%s/%s/search\", route.DBAlias, route.Table, route.TimeColumn)\n\tapp.Post(endPoint, func(c *fiber.Ctx) {\n\t\tbody := []byte(c.Body())\n\t\tvar target string\n\n\t\tif len(c.Body()) > 0 {\n\t\t\tvar targetJSON searchTarget\n\t\t\terr := json.Unmarshal(body, &targetJSON)\n\t\t\tif err != nil {\n\t\t\t\tsend400(c, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttarget = strings.ToLower(targetJSON.Target)\n\t\t} else {\n\t\t\ttarget = \"\"\n\t\t}\n\n\t\tvar tagKeys []sqlite3.TagKey\n\t\ttsm.GetTagKeys(route.Table, &tagKeys)\n\t\tresult := []string{}\n\n\t\taddTagKey := func(tag string) {\n\t\t\tlowerTag := strings.ToLower(tag)\n\t\t\tif strings.Contains(lowerTag, target) {\n\t\t\t\tresult = append(result, lowerTag)\n\t\t\t}\n\t\t}\n\t\tfor _, i := range tagKeys {\n\t\t\taddTagKey(strings.ToLower(i.Text))\n\t\t}\n\t\taddTagKey(route.TimeColumn)\n\t\tsend200(c, result)\n\t})\n}",
"func (m *MovieInMemory) Search(ctx context.Context, criteria repository.Criteria) ([]*aggregate.Movie, string, error) {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\tmovies := make([]*aggregate.Movie, 0)\n\tfor _, m := range m.db {\n\t\tif len(movies) == criteria.Limit {\n\t\t\tbreak\n\t\t}\n\t\tmovies = append(movies, m)\n\t}\n\treturn movies, \"\", nil\n}",
"func (a *client) Search(query string, params Parameters) ([]SearchResultStruct, *ErrorStruct) {\n\tparams.Set(\"access_token\", a.oauth.AccessToken)\n\tparams.Add(\"q\", query)\n\tresult := <-a.handler.addRequest(search, params)\n\n\tdec := json.NewDecoder(bytes.NewBuffer(result.body))\n\tif 200 <= result.statuscode && result.statuscode < 300 {\n\t\ttarget := []SearchResultStruct{}\n\t\tdec.Decode(&target)\n\t\treturn target, nil\n\t} else {\n\t\ttarget := ErrorStruct{}\n\t\tdec.Decode(&target)\n\t\treturn []SearchResultStruct{}, &target\n\t}\n\n\treturn []SearchResultStruct{}, &ErrorStruct{}\n}",
"func foodsSearch(c *gin.Context) {\n\tvar (\n\t\tmax int\n\t\tpage int\n\t\tfoods []interface{}\n\t)\n\tcount := 0\n\t// check for a query\n\tq := c.Query(\"q\")\n\tif q == \"\" {\n\t\terrorout(c, http.StatusBadRequest, gin.H{\"status\": http.StatusBadRequest, \"message\": \"A search string in the q parameter is required\"})\n\t\treturn\n\t}\n\t// check for field\n\tf := c.Query(\"f\")\n\tif f != \"\" && f != \"foodDescription\" && f != \"upc\" && f != \"company\" && f != \"ingredients\" {\n\t\terrorout(c, http.StatusBadRequest, gin.H{\"status\": http.StatusBadRequest, \"message\": \"Unrecognized search field. Must be one of 'foodDescription','company', 'upc' or 'ingredients'\"})\n\t\treturn\n\t}\n\n\tif max, err = strconv.Atoi(c.Query(\"max\")); err != nil {\n\t\tmax = defaultListMax\n\t}\n\tif max > maxListSize {\n\t\terrorout(c, http.StatusBadRequest, gin.H{\"status\": http.StatusBadRequest, \"message\": fmt.Sprintf(\"max parameter %d exceeds maximum allowed size of %d\", max, maxListSize)})\n\t\treturn\n\t}\n\tif page, err = strconv.Atoi(c.Query(\"page\")); err != nil {\n\t\tpage = 0\n\t}\n\tif page < 0 {\n\t\tpage = 0\n\t}\n\toffset := page * max\n\n\tif count, err = dc.Search(fdc.SearchRequest{Query: q, IndexName: cs.CouchDb.Fts, Format: fdc.META, Max: max, Page: offset}, &foods); err != nil {\n\t\terrorout(c, http.StatusBadRequest, gin.H{\"status\": http.StatusBadRequest, \"message\": fmt.Sprintf(\"Search query failed %v\", err)})\n\t\treturn\n\t}\n\tresults := fdc.BrowseResult{Count: int32(count), Start: int32(page), Max: int32(max), Items: foods}\n\tc.JSON(http.StatusOK, results)\n}",
"func search(params map[string]string, client *Client) bool {\n startS, startOk := params[\"start\"]\n var start = 0\n var err error\n if startOk {\n start, err = strconv.Atoi(startS)\n if err != nil {\n SendErr(client, \"Start not number\", \"co_isbn\")\n return false\n }\n }\n var books []Book\n isbnS, isbnOk := params[\"isbn\"]\n if isbnOk {\n isbn, err := strconv.Atoi(isbnS)\n if err != nil {\n SendErr(client, \"isbn not number\", \"co_isbn\")\n return false\n }\n db.Offset(start*OFFSET_NUM).Where(\"isbn=?\", isbn).Limit(OFFSET_NUM).Find(&books)\n var bookMap = make(map[string]string)\n for index, book := range books {\n bookMap[\"book_\"+strconv.Itoa(index)] = strconv.Itoa(int(book.Isbn))\n }\n SendMessage(client, \"search_return\", bookMap)\n return true\n }\n title := \"%\"+params[\"title\"]+\"%\"\n author := \"%\"+params[\"author\"]+\"%\"\n genre := \"%\"+params[\"genre\"]+\"%\"\n db.Offset(start*OFFSET_NUM).Where(\"title LIKE ? and author LIKE ? and genre LIKE ?\", title, author, genre).Limit(OFFSET_NUM).Find(&books)\n var bookMap = make(map[string]string)\n for index, book := range books {\n bookMap[\"book_\"+strconv.Itoa(index)] = strconv.Itoa(int(book.Isbn))\n }\n SendMessage(client, \"success\", bookMap)\n return true\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
CreatePlaylistForUser creates a playlist for a Spotify user.
|
func (c Client) CreatePlaylistForUser(userID, playlistName, description string, public bool) (*spotify.FullPlaylist, error) {
playlist, err := c.client.CreatePlaylistForUser(userID, playlistName, description, public)
return playlist, err
}
|
[
"func CreatePlaylist(authedClient *spotify.Client, playlistName string, tracks []spotify.FullTrack) (string, error) {\n\tif playlistName == \"\" {\n\t\tplaylistName = generatePlaylistName(time.Now())\n\t}\n\n\tuser, err := authedClient.CurrentUser()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tplaylist, err := authedClient.CreatePlaylistForUser(user.User.ID, playlistName, \"Playlist from Echoespl\", false)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttrackIds := []spotify.ID{}\n\tfor _, track := range tracks {\n\t\ttrackIds = append(trackIds, track.ID)\n\t}\n\n\t_, err = authedClient.AddTracksToPlaylist(playlist.ID, trackIds...)\n\treturn playlistName, err\n}",
"func createPlaylist(name string, length int, client SpotifyClient, trackProc func([]spotify.Song, int) []spotify.Song) (err error) {\n\ttracks, err := client.UserTracks()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tshuffleTracks(&tracks)\n\n\tfinalTracks := trackProc(tracks, length)\n\n\tif err := client.CreatePlaylist(name, finalTracks); err != nil {\n\t\treturn err\n\t}\n\treturn\n}",
"func CreateSocialPlaylist(writer http.ResponseWriter, request *http.Request) {\n\t// Initialize paths\n\terr := initWithEnv()\n\tif err != nil {\n\t\thttp.Error(writer, err.Error(), http.StatusInternalServerError)\n\t\tlogger.LogErr(\"InitWithEnv\", err, nil)\n\t\treturn\n\t}\n\n\tvar socialPlaylistReq createSocialPlaylistRequest\n\n\t// Decode our object\n\tjsonDecodeErr := json.NewDecoder(request.Body).Decode(&socialPlaylistReq)\n\tif jsonDecodeErr != nil {\n\t\thttp.Error(writer, jsonDecodeErr.Error(), http.StatusInternalServerError)\n\t\tlogger.LogErr(\"SocialPlaylistReq Decoder\", jsonDecodeErr, request)\n\t\treturn\n\t}\n\n\t// Figure out what service we are going to create a playlist in\n\tvar platformEndpoint string\n\tvar socialRefreshTokens *social.RefreshTokensResponse\n\tvar socialRefreshTokenErr error\n\n\tif socialPlaylistReq.SocialPlatform.PlatformName == \"spotify\" {\n\t\tlog.Printf(\"Creating playlist for Spotify\")\n\t\tplatformEndpoint = \"https://api.spotify.com/v1/users/\" + socialPlaylistReq.SocialPlatform.ID + \"/playlists\"\n\n\t\t// This is sort of weird, but I haven't been able to find any resources on an Apple Music tokens expiring\n\t\t// Therefore, this check should only be done on Spotify at the moment\n\t\tsocialRefreshTokens, socialRefreshTokenErr = refreshToken(socialPlaylistReq.SocialPlatform)\n\t\tif socialRefreshTokenErr != nil {\n\t\t\thttp.Error(writer, socialRefreshTokenErr.Error(), http.StatusBadRequest)\n\t\t\tlogger.LogErr(\"RefreshToken\", socialRefreshTokenErr, request)\n\t\t\treturn\n\t\t}\n\t} else if socialPlaylistReq.SocialPlatform.PlatformName == \"apple\" {\n\t\tlog.Printf(\"Creating playlist for Apple Music\")\n\t\tplatformEndpoint = \"https://api.music.apple.com/v1/me/library/playlists\"\n\t}\n\n\t// fr3fou - \"i fixed this Kappa\" (04/10/20)\n\t// Setup resonse if we have a token to return\n\tvar response *createSocialPlaylistResponse\n\n\t// Again, this is solely for Spotify at the moment\n\tif socialPlaylistReq.SocialPlatform.PlatformName == \"spotify\" && socialRefreshTokens != nil {\n\t\t// Get token for specified platform\n\t\tplatformRefreshToken, doesExist := socialRefreshTokens.RefreshTokens[socialPlaylistReq.SocialPlatform.PlatformName]\n\t\tif doesExist == true {\n\t\t\tlog.Println(\"Setting new APIToken on socialPlatform\")\n\t\t\tsocialPlaylistReq.SocialPlatform.APIToken.Token = platformRefreshToken.Token\n\n\t\t\t// Write new apiToken as response\n\t\t\tresponse = &createSocialPlaylistResponse{\n\t\t\t\tPlatformName: socialPlaylistReq.SocialPlatform.PlatformName,\n\t\t\t\tRefreshToken: platformRefreshToken,\n\t\t\t}\n\t\t} else {\n\t\t\t// Another token needed refresh, but not the one we were looking for\n\t\t\tlog.Printf(\"%s was not refreshed\", socialPlaylistReq.SocialPlatform.PlatformName)\n\t\t}\n\t}\n\n\t// Call API to create playlist with data\n\tcreateReqErr := createPlaylist(platformEndpoint, socialPlaylistReq.SocialPlatform, socialPlaylistReq.PlaylistName)\n\tif createReqErr != nil {\n\t\thttp.Error(writer, createReqErr.Error(), http.StatusBadRequest)\n\t\tlogger.LogErr(\"CreatePlaylist\", createReqErr, request)\n\t\treturn\n\t}\n\n\tif response != nil {\n\t\tjson.NewEncoder(writer).Encode(response)\n\t} else {\n\t\twriter.WriteHeader(http.StatusNoContent)\n\t}\n}",
"func playlistHandler(env *Env, w http.ResponseWriter, r *http.Request) error {\n\ttok, err := authorizeRequest(env.Auth, w, r)\n\tif err != nil {\n\t\treturn StatusError{http.StatusBadGateway, errors.Wrap(err, \"cannot authorize Spotify request\")}\n\t}\n\n\tc := env.Auth.NewClient(tok)\n\tc.AutoRetry = true\n\n\tserv, err := spotifyservice.New(&c)\n\tif err != nil {\n\t\treturn StatusError{http.StatusInternalServerError, errors.Wrap(err, \"cannot initialize Spotify service\")}\n\t}\n\n\tbuf, err := buffer.New(serv)\n\tif err != nil {\n\t\treturn StatusError{http.StatusInternalServerError, errors.Wrap(err, \"cannot initialize service buffer\")}\n\t}\n\n\tgen, err := refind.New(buf, serv)\n\tif err != nil {\n\t\treturn StatusError{http.StatusInternalServerError, errors.Wrap(err, \"cannot initialize Refind client\")}\n\t}\n\n\tlist, err := gen.Tracklist(playlistLimit)\n\tif err != nil {\n\t\treturn StatusError{http.StatusInternalServerError, errors.Wrap(err, \"cannot generate track list\")}\n\t}\n\n\tt := strings.Title(adj.GenerateCombined(1, \"-\"))\n\n\tpl, err := serv.Playlist(t, playlistDescription, list)\n\tif err != nil {\n\t\treturn StatusError{http.StatusInternalServerError, errors.Wrap(err, \"cannot create user playlist\")}\n\t}\n\n\tp := playlist{URI: string(pl.URI)}\n\trender.JSON(w, r, p)\n\n\treturn nil\n}",
"func HostDeezerCreatePlaylist(title, userid, token string, tracks []string) error {\n\tdeezerAPIBase := os.Getenv(\"DEEZER_API_BASE\")\n\turl := fmt.Sprintf(\"%s/user/%s/playlists?access_token=%s&request_method=post&title=%s\", deezerAPIBase, userid, token, title)\n\tsrc := &types.DeezerPlaylistCreationResponse{}\n\terr := util.MakeRequest(url, src)\n\tif err != nil {\n\t\tlog.Println(\"Error making request here.\")\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\n\tallTracks := strings.Join(tracks, \",\")\n\tplaylistURL := fmt.Sprintf(\"%s/playlist/%d/tracks?access_token=%s&request_method=post&songs=%s\", deezerAPIBase, src.ID, token, allTracks)\n\terr = util.MakeRequest(playlistURL, true)\n\n\tif err != nil {\n\t\tlog.Println(\"Error making request to add tracks to playlist\")\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\treturn nil\n}",
"func createSpotifyPlaylistRequest(playlistName string, endpoint string, apiToken string) (*http.Request, error) {\n\t// Create playlist data\n\tvar spotifyPlaylistRequest = spotifyPlaylistRequest{\n\t\tName: \"Grüvee: \" + playlistName,\n\t\tPublic: true,\n\t\tCollaborative: false,\n\t\tDescription: \"Created with love from Grüvee ❤️\",\n\t}\n\n\t// Create json body\n\tjsonPlaylist, jsonErr := json.Marshal(spotifyPlaylistRequest)\n\tif jsonErr != nil {\n\t\treturn nil, jsonErr\n\t}\n\n\t// Create request object\n\tcreatePlaylistReq, createPlaylistReqErr := http.NewRequest(\"POST\", endpoint, bytes.NewBuffer(jsonPlaylist))\n\tif createPlaylistReqErr != nil {\n\t\treturn nil, createPlaylistReqErr\n\t}\n\n\t// Add headers\n\tcreatePlaylistReq.Header.Add(\"Content-Type\", \"application/json\")\n\tcreatePlaylistReq.Header.Add(\"Authorization\", \"Bearer \"+apiToken)\n\n\treturn createPlaylistReq, nil\n}",
"func (r *PlaylistRepo) Create(pl *models.Playlist) error {\n\tr.logger.WithField(\"name\", pl.Name).Debug(\"Adding new playlist\")\n\tquery := fmt.Sprintf(\"INSERT INTO Playlists(%s) VALUES(?, ?, ?, datetime('now'), datetime('now'))\", playlistFields)\n\tres, err := r.db.Exec(query, pl.Name, pl.Status, pl.Message)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Setting the dates like this should be enough for now\n\tpl.CreatedAt = time.Now()\n\tpl.UpdatedAt = time.Now()\n\tvar id int64\n\tif id, err = res.LastInsertId(); err == nil {\n\t\tpl.ID = uint(id)\n\t}\n\treturn err\n}",
"func createPlaylist(endpoint string, platform firebase.FirestoreSocialPlatform,\n\tplaylistName string) error {\n\tvar request *http.Request\n\tvar requestErr error\n\n\t// Check for platform\n\tif platform.PlatformName == \"spotify\" {\n\t\trequest, requestErr = createSpotifyPlaylistRequest(playlistName, endpoint, platform.APIToken.Token)\n\t} else if platform.PlatformName == \"apple\" {\n\t\trequest, requestErr = createAppleMusicPlaylistRequest(playlistName, endpoint, platform.APIToken.Token)\n\t}\n\n\tif requestErr != nil {\n\t\tlog.Printf(\"[createPlaylist] %v\", requestErr.Error())\n\t\treturn requestErr\n\t}\n\n\tcreatePlaylistResp, httpErr := httpClient.Do(request)\n\tif httpErr != nil {\n\t\tlog.Printf(\"[createPlaylist] %v\", httpErr.Error())\n\t\treturn httpErr\n\t}\n\n\t// If we have errors, lets parse 'em out\n\tif createPlaylistResp.StatusCode != http.StatusOK && createPlaylistResp.StatusCode != http.StatusCreated {\n\t\tif platform.PlatformName == \"spotify\" {\n\t\t\tvar spotifyErrorObj social.SpotifyRequestError\n\n\t\t\terr := json.NewDecoder(createPlaylistResp.Body).Decode(&spotifyErrorObj)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"[createPlaylist] %v\", err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn fmt.Errorf(\"Status Code %v: \"+spotifyErrorObj.Error.Message, spotifyErrorObj.Error.Status)\n\t\t} else if platform.PlatformName == \"apple\" {\n\t\t\tvar appleMusicReqErr social.AppleMusicRequestError\n\n\t\t\terr := json.NewDecoder(createPlaylistResp.Body).Decode(&appleMusicReqErr)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"[createPlaylist] %v\", err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// The first error is the most important so for now let's just grab that\n\t\t\treturn fmt.Errorf(\"Status Code %v: \"+appleMusicReqErr.Errors[0].Detail, appleMusicReqErr.Errors[0].Status)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func playlistHandler(w http.ResponseWriter, r *http.Request) {\n\tsess, err := store.Get(r, sessionName)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif uri, ok := sess.Values[\"playlist\"].(string); ok {\n\t\tpayload := Playlist{URI: uri}\n\t\trender.JSON(w, r, payload)\n\t\treturn\n\t}\n\n\ttok, err := authorizeRequest(w, r)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tc := auth.NewClient(tok)\n\tc.AutoRetry = true\n\n\tms, err := spotifyservice.New(&c)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tscryer, err := scry.New(ms)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\n\tpl, err := scryer.FromTracks(\"Discover Now\")\n\tif err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(w, \"cannot create playlist\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tsess.Values[\"playlist\"] = string(pl.URI)\n\tsess.Save(r, w)\n\n\tpayload := Playlist{URI: string(pl.URI)}\n\trender.JSON(w, r, payload)\n}",
"func (s *Service) AddPlaylist(mood *internal.Mood) error {\n\tpayload := model.AddPlaylistPayload{\n\t\tUserID: mood.UserID,\n\t\tMoodID: mood.ID,\n\t\tName: mood.Name,\n\t}\n\n\tif err := s.publishJSON(addPlaylistQueue, payload); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (mr *MockRepositoryMockRecorder) CreatePlaylist(playlist interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreatePlaylist\", reflect.TypeOf((*MockRepository)(nil).CreatePlaylist), playlist)\n}",
"func CreateURLsFromPlaylist(ctx context.Context, args []string) error {\n\tfs := flag.NewFlagSet(\"create-urls-from-playlist\", flag.ExitOnError)\n\tvar playlist string\n\tfs.StringVar(&playlist, \"playlist\", \"\", \"youtube playlist\")\n\tif err := fs.Parse(args); err != nil {\n\t\treturn err\n\t}\n\tif playlist == \"\" {\n\t\treturn errors.New(\"playlist is required\")\n\t}\n\n\tlog := log.New()\n\tredis, err := redis.New(log)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbroker := broker.New(redis, log)\n\tdb, err := db.New(log)\n\tif err != nil {\n\t\treturn err\n\t}\n\tstore := store.New(db)\n\tmanager := manager.NewServer(broker, store)\n\tplaylistLoader := service.NewPlaylistLoader(manager, store, youtube.New(log))\n\n\treturn playlistLoader.CreateURLsFromYoutube(ctx, playlist)\n}",
"func (ah AShirtAuthBridge) CreateNewAuthForUser(data UserAuthData) error {\n\treturn CreateNewAuthForUserGeneric(ah.db, ah.authSchemeName, ah.authSchemeType, data)\n}",
"func CreateDeletePlaylistRequest() (request *DeletePlaylistRequest) {\n\trequest = &DeletePlaylistRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"live\", \"2016-11-01\", \"DeletePlaylist\", \"live\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (mr *MockRepositoryMockRecorder) DeletePlaylistFromUser(userID, playlistID interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeletePlaylistFromUser\", reflect.TypeOf((*MockRepository)(nil).DeletePlaylistFromUser), userID, playlistID)\n}",
"func (c Client) AddTracksToPlaylist(playlistID spotify.ID, trackIDs ...spotify.ID) (snapshotID string, err error) {\n\tsnapshotID, err = c.client.AddTracksToPlaylist(playlistID, trackIDs...)\n\treturn snapshotID, err\n}",
"func CreateUserShell(usr *app.User, db *gorm.DB) *ishell.Shell {\n\tshell := ishell.New()\n\n\tusrChat, err := NewUserChat(usr, db)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"error on create user chat\")\n\t\treturn nil\n\t}\n\n\tshell.AddCmd(&ishell.Cmd{\n\t\tName: \"publish\",\n\t\tHelp: `Publish a message into a queue \"{string(queueName)}\" \"{string(message)}\"`,\n\t\tFunc: usrChat.shellPublish,\n\t})\n\n\tshell.AddCmd(&ishell.Cmd{\n\t\tName: \"print\",\n\t\tHelp: `Print all messages from a queue \"{string(queueName)}\" `,\n\t\tFunc: usrChat.shellPrintQueue,\n\t})\n\n\tshell.AddCmd(&ishell.Cmd{\n\t\tName: \"exit\",\n\t\tHelp: \"exit the program\",\n\t\tFunc: usrChat.shellExit,\n\t})\n\n\treturn shell\n}",
"func (m *MockRepository) CreatePlaylist(playlist *models.Playlist) (*models.Playlist, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreatePlaylist\", playlist)\n\tret0, _ := ret[0].(*models.Playlist)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (s3Util S3UtilImpl) CreateS3BucketForUser(bucket string) (*s3.CreateBucketOutput, error) {\n\tinput := &s3.CreateBucketInput{\n\t\tBucket: aws.String(bucket),\n\t\tCreateBucketConfiguration: &s3.CreateBucketConfiguration{\n\t\t\tLocationConstraint: aws.String(s3Util.awsRegion),\n\t\t},\n\t}\n\n\tresult, err := s3Util.s3Svc.CreateBucket(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase s3.ErrCodeBucketAlreadyExists:\n\t\t\t\tlog.Println(s3.ErrCodeBucketAlreadyExists, aerr.Error())\n\t\t\t\treturn result, nil\n\t\t\tcase s3.ErrCodeBucketAlreadyOwnedByYou:\n\t\t\t\tlog.Println(s3.ErrCodeBucketAlreadyOwnedByYou, aerr.Error())\n\t\t\t\treturn result, nil\n\t\t\tdefault:\n\t\t\t\tlog.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Println(err.Error())\n\t\t}\n\t\treturn nil, err\n\t}\n\n\terr = s3Util.s3Svc.WaitUntilBucketExists(&s3.HeadBucketInput{\n\t\tBucket: aws.String(bucket),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error occurred while waiting for bucket to be created, %v\", bucket)\n\t}\n\n\treturn result, err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
AddTracksToPlaylist adds one or more tracks to a user's playlist. This call requires ScopePlaylistModifyPublic or ScopePlaylistModifyPrivate. A maximum of 100 tracks can be added per call. It returns a snapshot ID that can be used to identify this version (the new version) of the playlist in future requests.
|
func (c Client) AddTracksToPlaylist(playlistID spotify.ID, trackIDs ...spotify.ID) (snapshotID string, err error) {
snapshotID, err = c.client.AddTracksToPlaylist(playlistID, trackIDs...)
return snapshotID, err
}
|
[
"func (s *PlaylistsService) AddTracks(\n\tctx context.Context,\n\tkind int,\n\trevision int,\n\ttracks []PlaylistsTrack,\n\topts *PlaylistsAddTracksOptions,\n) (*PlaylistsAddTracksResp, *http.Response, error) {\n\tif opts == nil {\n\t\topts = &PlaylistsAddTracksOptions{\n\t\t\tAt: 0,\n\t\t}\n\t}\n\n\tdiff := []struct {\n\t\tOp string `json:\"op\"`\n\t\tAt int `json:\"at\"`\n\t\tTracks []PlaylistsTrack `json:\"tracks\"`\n\t}{\n\t\t{\n\t\t\tOp: \"insert\",\n\t\t\tAt: opts.At,\n\t\t\tTracks: tracks,\n\t\t},\n\t}\n\n\tb, err := json.Marshal(diff)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tform := url.Values{}\n\tform.Set(\"diff\", string(b))\n\tform.Set(\"revision\", strconv.Itoa(revision))\n\n\turi := fmt.Sprintf(\n\t\t\"users/%v/playlists/%v/change-relative\",\n\t\ts.client.userID,\n\t\tkind,\n\t)\n\n\treq, err := s.client.NewRequest(http.MethodPost, uri, form)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\taddTracksResp := new(PlaylistsAddTracksResp)\n\tresp, err := s.client.Do(ctx, req, addTracksResp)\n\treturn addTracksResp, resp, err\n}",
"func (s *Service) AddPlaylist(mood *internal.Mood) error {\n\tpayload := model.AddPlaylistPayload{\n\t\tUserID: mood.UserID,\n\t\tMoodID: mood.ID,\n\t\tName: mood.Name,\n\t}\n\n\tif err := s.publishJSON(addPlaylistQueue, payload); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (ruo *ResolutionUpdateOne) AddPlaylistVideoIDs(ids ...int) *ResolutionUpdateOne {\n\truo.mutation.AddPlaylistVideoIDs(ids...)\n\treturn ruo\n}",
"func (ru *ResolutionUpdate) AddPlaylistVideoIDs(ids ...int) *ResolutionUpdate {\n\tru.mutation.AddPlaylistVideoIDs(ids...)\n\treturn ru\n}",
"func (mr *MockRepositoryMockRecorder) AddTrackToPlaylist(playlistID, trackID interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddTrackToPlaylist\", reflect.TypeOf((*MockRepository)(nil).AddTrackToPlaylist), playlistID, trackID)\n}",
"func (m *Mixtape) AddSongsToPlaylist(playlistID string, songIDs []string) error {\n\tplaylist := m.Playlists.FindPlaylist(playlistID)\n\tif playlist == nil {\n\t\treturn errors.New(ErrPlaylistNotFound)\n\t}\n\n\tfor i := range songIDs {\n\t\tplaylist.AddSong(songIDs[i])\n\t}\n\n\treturn nil\n}",
"func (jpl *JsonPlaylist) AddTrack(ajpl *JsonPlaylist, trackName string) {\n\tcurSegs := jpl.Segments[trackName]\n\tvar lastSeq uint64\n\tif len(curSegs) > 0 {\n\t\tlastSeq = curSegs[len(curSegs)-1].SeqNo\n\t}\n\n\tfor _, seg := range ajpl.Segments[trackName] {\n\t\tneedSort := false\n\t\tif seg.SeqNo > lastSeq {\n\t\t\tcurSegs = append(curSegs, seg)\n\t\t} else {\n\t\t\ti := sort.Search(len(curSegs), func(i int) bool {\n\t\t\t\treturn curSegs[i].SeqNo >= seg.SeqNo\n\t\t\t})\n\t\t\tif i < len(curSegs) && curSegs[i].SeqNo == seg.SeqNo {\n\t\t\t\t// x is present at data[i]\n\t\t\t} else {\n\t\t\t\t// x is not present in data,\n\t\t\t\t// but i is the index where it would be inserted.\n\t\t\t\tif i < len(curSegs) {\n\t\t\t\t\tneedSort = true\n\t\t\t\t}\n\t\t\t\tcurSegs = append(curSegs, seg)\n\t\t\t}\n\t\t}\n\t\tif needSort {\n\t\t\tsort.Slice(curSegs, func(i, j int) bool {\n\t\t\t\treturn curSegs[i].SeqNo < curSegs[j].SeqNo\n\t\t\t})\n\t\t}\n\t\tlastSeq = curSegs[len(curSegs)-1].SeqNo\n\t}\n\tjpl.Segments[trackName] = curSegs\n}",
"func TracksFromPlaylist(h echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tuser := usermw.GetUser(c)\n\t\tplaylistID := c.Param(\"playlist\")\n\t\tif playlistID == \"\" {\n\t\t\treturn echo.NewHTTPError(http.StatusInternalServerError, \"Not a valid playlist ID\")\n\t\t}\n\t\tclient := spotifymw.GetClient(c)\n\t\tplaylistOwner, err := spotifymw.FindPlaylistOwner(client, spotify.ID(playlistID))\n\t\tif err != nil {\n\t\t\treturn echo.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf(\"Error getting playlist owner: %v\", err))\n\t\t}\n\t\ttracks, err := spotifymw.TracksFromPlaylist(client, spotify.ID(playlistID), playlistOwner, user.ID)\n\t\tif err != nil {\n\t\t\treturn echo.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf(\"Error getting tracks: %v\", err))\n\t\t}\n\t\tc.Set(\"tracks\", &tracks)\n\t\treturn h(c)\n\t}\n}",
"func CreatePlaylist(authedClient *spotify.Client, playlistName string, tracks []spotify.FullTrack) (string, error) {\n\tif playlistName == \"\" {\n\t\tplaylistName = generatePlaylistName(time.Now())\n\t}\n\n\tuser, err := authedClient.CurrentUser()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tplaylist, err := authedClient.CreatePlaylistForUser(user.User.ID, playlistName, \"Playlist from Echoespl\", false)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttrackIds := []spotify.ID{}\n\tfor _, track := range tracks {\n\t\ttrackIds = append(trackIds, track.ID)\n\t}\n\n\t_, err = authedClient.AddTracksToPlaylist(playlist.ID, trackIds...)\n\treturn playlistName, err\n}",
"func (ruo *ResolutionUpdateOne) AddPlaylistVideos(p ...*Playlist_Video) *ResolutionUpdateOne {\n\tids := make([]int, len(p))\n\tfor i := range p {\n\t\tids[i] = p[i].ID\n\t}\n\treturn ruo.AddPlaylistVideoIDs(ids...)\n}",
"func createPlaylist(name string, length int, client SpotifyClient, trackProc func([]spotify.Song, int) []spotify.Song) (err error) {\n\ttracks, err := client.UserTracks()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tshuffleTracks(&tracks)\n\n\tfinalTracks := trackProc(tracks, length)\n\n\tif err := client.CreatePlaylist(name, finalTracks); err != nil {\n\t\treturn err\n\t}\n\treturn\n}",
"func (ru *ResolutionUpdate) AddPlaylistVideos(p ...*Playlist_Video) *ResolutionUpdate {\n\tids := make([]int, len(p))\n\tfor i := range p {\n\t\tids[i] = p[i].ID\n\t}\n\treturn ru.AddPlaylistVideoIDs(ids...)\n}",
"func (client *Client) AddTrackers(infoHash string, trackers string) (*http.Response, error) {\n\tparams := make(map[string]string)\n\tparams[\"hash\"] = strings.ToLower(infoHash)\n\tparams[\"urls\"] = trackers\n\n\treturn client.post(\"command/addTrackers\", params)\n}",
"func playlistHandler(env *Env, w http.ResponseWriter, r *http.Request) error {\n\ttok, err := authorizeRequest(env.Auth, w, r)\n\tif err != nil {\n\t\treturn StatusError{http.StatusBadGateway, errors.Wrap(err, \"cannot authorize Spotify request\")}\n\t}\n\n\tc := env.Auth.NewClient(tok)\n\tc.AutoRetry = true\n\n\tserv, err := spotifyservice.New(&c)\n\tif err != nil {\n\t\treturn StatusError{http.StatusInternalServerError, errors.Wrap(err, \"cannot initialize Spotify service\")}\n\t}\n\n\tbuf, err := buffer.New(serv)\n\tif err != nil {\n\t\treturn StatusError{http.StatusInternalServerError, errors.Wrap(err, \"cannot initialize service buffer\")}\n\t}\n\n\tgen, err := refind.New(buf, serv)\n\tif err != nil {\n\t\treturn StatusError{http.StatusInternalServerError, errors.Wrap(err, \"cannot initialize Refind client\")}\n\t}\n\n\tlist, err := gen.Tracklist(playlistLimit)\n\tif err != nil {\n\t\treturn StatusError{http.StatusInternalServerError, errors.Wrap(err, \"cannot generate track list\")}\n\t}\n\n\tt := strings.Title(adj.GenerateCombined(1, \"-\"))\n\n\tpl, err := serv.Playlist(t, playlistDescription, list)\n\tif err != nil {\n\t\treturn StatusError{http.StatusInternalServerError, errors.Wrap(err, \"cannot create user playlist\")}\n\t}\n\n\tp := playlist{URI: string(pl.URI)}\n\trender.JSON(w, r, p)\n\n\treturn nil\n}",
"func (m *MockRepository) AddTrackToPlaylist(playlistID, trackID int) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AddTrackToPlaylist\", playlistID, trackID)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}",
"func (mr *MockRepositoryMockRecorder) AddPlaylistToMediateka(userID, playlistID interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddPlaylistToMediateka\", reflect.TypeOf((*MockRepository)(nil).AddPlaylistToMediateka), userID, playlistID)\n}",
"func (r *PlaylistRepo) AddEntry(playlistID uint, entry *models.PlaylistEntry) error {\n\tquery := fmt.Sprintf(\n\t\t\"INSERT INTO PlaylistEntries(playlistId, %s) VALUES(?, ?, -1, ?, ?, datetime('now'), datetime('now'))\",\n\t\tplaylistEntryFields,\n\t)\n\tres, err := r.db.Exec(query, playlistID, entry.VideoHash, entry.RequestedBy, entry.RequesterIP)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"AddEntry: Failed to create entry: %v\", err)\n\t}\n\tid, err := res.LastInsertId()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"AddEntry: Failed to retrieve last insert ID: %v\", err)\n\t}\n\tentry.ID = uint(id)\n\t// Set the position of all unsorted playlist entries to their ID - this way they should be the last entry in their\n\t// list\n\tquery = \"UPDATE PlaylistEntries SET position = id WHERE position < 0\"\n\tif _, err = r.db.Exec(query); err != nil {\n\t\treturn fmt.Errorf(\"AddEntry: Failed to reposition playlist entries: %v\", err)\n\t}\n\treturn nil\n}",
"func (c Client) CreatePlaylistForUser(userID, playlistName, description string, public bool) (*spotify.FullPlaylist, error) {\n\tplaylist, err := c.client.CreatePlaylistForUser(userID, playlistName, description, public)\n\treturn playlist, err\n}",
"func (puo *PetUpdateOne) AddPlayGroups(p ...*PlayGroup) *PetUpdateOne {\n\tids := make([]int, len(p))\n\tfor i := range p {\n\t\tids[i] = p[i].ID\n\t}\n\treturn puo.AddPlayGroupIDs(ids...)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetTracks gets Spotify catalog information for multiple tracks based on their Spotify IDs. It supports up to 50 tracks in a single call. Tracks are returned in the order requested. If a track is not found, that position in the result will be nil. Duplicate ids in the query will result in duplicate tracks in the result.
|
func (c Client) GetTracks(ids ...spotify.ID) ([]*spotify.FullTrack, error) {
tracks, err := c.client.GetTracks(ids...)
return tracks, err
}
|
[
"func (c *Client) GetTracks(ids ...ID) ([]*FullTrack, error) {\n\tif len(ids) > 50 {\n\t\treturn nil, errors.New(\"spotify: FindTracks supports up to 50 tracks\")\n\t}\n\tspotifyURL := c.baseURL + \"tracks?ids=\" + strings.Join(toStringSlice(ids), \",\")\n\n\tvar t struct {\n\t\tTracks []*FullTrack `jsosn:\"tracks\"`\n\t}\n\n\terr := c.get(spotifyURL, &t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn t.Tracks, nil\n}",
"func GetTracks(c echo.Context) *[]trackR.Track {\n\treturn c.Get(\"tracks\").(*[]trackR.Track)\n}",
"func (artist *Artist) GetTracks() *TrackCollection {\n\tif artist.Tracks == nil {\n\t\tartist.Tracks = new(TrackCollection)\n\t\tdbTarget := artist.Tracks.ResponseObject()\n\t\ttrackRef := new(Track)\n\t\terr := datastore.Query(trackRef).\n\t\t\tWhere().\n\t\t\tEqual(&trackRef.Artist, artist.Id).\n\t\t\tSelectToTarget(dbTarget)\n\t\tif err != nil {\n\t\t\tlog.Print(\"Artist.GetTracks Error: \" + err.Error())\n\t\t\treturn nil\n\t\t}\n\t\tfor _, track := range artist.Tracks.tracks {\n\t\t\ttrack.Artist = artist\n\t\t}\n\t}\n\treturn artist.Tracks\n}",
"func (s *session) SearchTracks(query string, pages int) []spotify.FullTrack {\n\n\tvar tracks []spotify.FullTrack\n\tvar results *spotify.SearchResult\n\tvar err error\n\tlimit := pages * 20\n\n\tfor {\n\t\tfmt.Printf(\"searching...\\r\")\n\t\tresults, err = s.Client().SearchOpt(\n\t\t\tquery,\n\t\t\tspotify.SearchTypeTrack,\n\t\t\t&spotify.Options{\n\t\t\t\tLimit: &limit,\n\t\t\t},\n\t\t)\n\t\tif Session.ShouldTryAgain(err) {\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\tif err != nil {\n\t\tfmt.Println(\"%s\", err)\n\t\treturn tracks\n\t}\n\n\t//fmt.Printf(\" [%04d]\\n\", results.Tracks.Total)\n\n\tfor i := 0; i < pages || pages == -1; i++ {\n\t\tfor {\n\t\t\ttracks = append(tracks, results.Tracks.Tracks...)\n\t\t\terr = s.Client().NextTrackResults(results)\n\t\t\tif err == spotify.ErrNoMorePages {\n\t\t\t\treturn tracks\n\t\t\t}\n\t\t\tif s.ShouldTryAgain(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif nil != err {\n\t\t\t\tfmt.Printf(\"failed to get next result page for %s: %s\", query, err)\n\t\t\t\treturn tracks\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn tracks\n\n}",
"func (s *TrackService) FindTracks() ([]models.Track, error) {\n\ttracks := make([]models.Track, 0)\n\n\tquery, err := s.db.Preparex(\n\t\t`SELECT * \n\t\tFROM tracks\n\t\tWHERE deleted = 0`)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\terr = query.Select(&tracks)\n\tif err == sql.ErrNoRows {\n\t\treturn tracks, nil\n\t}\n\n\tif err != nil {\n\t\tlog.Println(\"Failed to retrieve tracks\", err)\n\t\treturn nil, err\n\t}\n\n\treturn tracks, nil\n}",
"func (c *Client) ListTracks(purchasedOnly bool, updatedMin int64, pageToken string) (*TrackList, error) {\n\tvar exportType mmdspb.GetTracksToExportRequest_TracksToExportType\n\tswitch purchasedOnly {\n\tcase true:\n\t\texportType = mmdspb.GetTracksToExportRequest_PURCHASED_AND_PROMOTIONAL\n\tcase false:\n\t\texportType = mmdspb.GetTracksToExportRequest_ALL\n\t}\n\tres, err := c.getTracksToExport(&mmdspb.GetTracksToExportRequest{\n\t\tClientId: c.id,\n\t\tExportType: exportType,\n\t\tUpdatedMin: updatedMin,\n\t\tContinuationToken: pageToken,\n\t})\n\tswitch err := err.(type) {\n\tcase nil:\n\t\tif res.Status != mmdspb.GetTracksToExportResponse_OK {\n\t\t\treturn nil, ListError(res.Status)\n\t\t}\n\tcase *textproto.Error:\n\t\t// The Google Play servers respond with 304 Not Modified\n\t\t// if no tracks have been modified after the updatedMin\n\t\t// timestamp. This is not exactly an error condition,\n\t\t// so we break out of the switch rather than return the\n\t\t// error; the remaining function body then arranges to\n\t\t// return an empty TrackList.\n\t\tif err.Code == http.StatusNotModified {\n\t\t\tbreak\n\t\t}\n\t\treturn nil, err\n\tdefault:\n\t\treturn nil, err\n\t}\n\ttrackList := new(TrackList)\n\tconvert.Convert(trackList, res)\n\ttrackList.PurchasedOnly = purchasedOnly\n\treturn trackList, nil\n}",
"func (c *Client) ImportTracks(tracks []*Track) (urls []string, errs []error) {\n\t// Construct and the client-ID-to-track-index mapping and the\n\t// initial metadata upload.\n\tcidm := make(map[string]int)\n\ttrks := make([]*mmldpb.Track, len(tracks))\n\terrs = make([]error, len(tracks))\n\tfor i, track := range tracks {\n\t\tif _, ok := cidm[track.ClientId]; ok {\n\t\t\terrs[i] = fmt.Errorf(\"trying to import two tracks with the same client-side ID\")\n\t\t\tcontinue\n\t\t}\n\t\ttrks[i] = new(mmldpb.Track)\n\t\tconvert.Convert(trks[i], track)\n\t\tcidm[track.ClientId] = i\n\t}\n\t// Upload track metadata.\n\tres, err := c.uploadMetadata(&mmuspb.UploadMetadataRequest{\n\t\tUploaderId: c.id,\n\t\tTrack: trks,\n\t})\n\tif err != nil {\n\t\tfor i := range errs {\n\t\t\tif errs[i] == nil {\n\t\t\t\terrs[i] = err\n\t\t\t}\n\t\t}\n\t\treturn nil, errs\n\t}\n\t// Satisfy any requests for track samples and append the\n\t// responses to tres.\n\ttres := res.TrackSampleResponse\n\tif n := len(res.SignedChallengeInfo); n > 0 {\n\t\tspls := make([]*mmudpb.TrackSample, n)\n\t\tfor i, sci := range res.SignedChallengeInfo {\n\t\t\tci := sci.ChallengeInfo\n\t\t\tj := cidm[ci.ClientTrackId]\n\t\t\tvar sample []byte\n\t\t\tif sf := tracks[j].SampleFunc; sf != nil {\n\t\t\t\tsample = sf(\n\t\t\t\t\tint(ci.StartMillis),\n\t\t\t\t\tint(ci.DurationMillis),\n\t\t\t\t)\n\t\t\t}\n\t\t\tif sample == nil {\n\t\t\t\t// A nil sample is different from an\n\t\t\t\t// empty one: the former results in an\n\t\t\t\t// invalid sample message. So, if\n\t\t\t\t// Sampler leaves us with a nil sample,\n\t\t\t\t// replace it with an empty one.\n\t\t\t\tsample = make([]byte, 0)\n\t\t\t}\n\t\t\tspls[i] = &mmudpb.TrackSample{\n\t\t\t\tTrack: trks[j],\n\t\t\t\tSample: sample,\n\t\t\t\tSignedChallengeInfo: sci,\n\t\t\t\tSampleFormat: mmldpb.Track_MP3,\n\t\t\t}\n\t\t}\n\t\tsres, err := c.uploadSample(&mmuspb.UploadSampleRequest{\n\t\t\tUploaderId: c.id,\n\t\t\tTrackSample: spls,\n\t\t})\n\t\tif err != nil {\n\t\t\tfor _, spl := range spls {\n\t\t\t\terrs[cidm[spl.Track.ClientId]] = err\n\t\t\t}\n\t\t} else {\n\t\t\ttres = append(tres, sres.TrackSampleResponse...)\n\t\t}\n\t}\n\t// Parse responses to track metadata and samples. The result\n\t// is a map from track indices to their server IDs.\n\tsidm := make(map[int]string)\n\tfor _, res := range tres {\n\t\ti := cidm[res.ClientTrackId]\n\t\tif res.ResponseCode != mmuspb.TrackSampleResponse_UPLOAD_REQUESTED {\n\t\t\terrs[i] = ImportError(res.ResponseCode)\n\t\t\tcontinue\n\t\t}\n\t\tsidm[i] = res.ServerTrackId\n\t}\n\t// Acquire upload sessions.\n\turls = make([]string, len(tracks))\n\tfor i, id := range sidm {\n\t\ttrk := tracks[i]\n\t\tres, err := c.getUploadSession(&mmssjs.GetUploadSessionRequest{\n\t\t\tName: id,\n\t\t\tUploaderId: c.id,\n\t\t\tClientId: trk.ClientId,\n\t\t\tServerId: id,\n\t\t\tTrackBitRate: trk.BitRate,\n\t\t\tSyncNow: true,\n\t\t\t// BUG(lor): Client.ImportTracks does not\n\t\t\t// activate the upload progress tracker on\n\t\t\t// https://play.google.com/music.\n\t\t})\n\t\tif err != nil {\n\t\t\terrs[i] = err\n\t\t\tcontinue\n\t\t}\n\t\tif res.Error != nil {\n\t\t\terrs[i] = res.Error\n\t\t\tcontinue\n\t\t}\n\t\turls[i] = res.Transfers[0].PutUrl\n\t}\n\treturn urls, errs\n}",
"func TestGetAllTracks(t *testing.T) {\n\ttestDB := setupDB(t)\n\tdefer clearTrackCol(t, testDB)\n\n\tvar newTrack Track\n\tnewTrack.H_date = \"2016-02-19 00:00:00 +0000 UTC\"\n\tnewTrack.Pilot = \"Miguel Angel Gordillo\"\n\tnewTrack.Glider = \"RV8\"\n\tnewTrack.Glider_id = \"EC-XLL\"\n\tnewTrack.Track_length = 443.2573603705269\n\tnewTrack.Track_src_url = \"http://skypolaris.org/wp-content/uploads/IGS%20Files/Madrid%20to%20Jerez.igc\"\n\n\t//Insert two tracks\n\tinsertTrack(&newTrack, testDB)\n\tinsertTrack(&newTrack, testDB)\n\n\ttracks := getAllTracks(testDB)\n\n\tif len(tracks) != 2 {\n\t\tt.Fatal(\"Could not retrieve two tracks\")\n\t}\n\n}",
"func (a *API) SaveTracks(ids ...string) error {\n\tquery := make(url.Values)\n\tquery.Add(\"ids\", strings.Join(ids, \",\"))\n\n\treturn a.put(\"v1\", \"/me/tracks\", query, nil)\n}",
"func getAllTracks(db *DBInfo) []Track {\n\tsession, err := mgo.Dial(db.ConnectionString)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdefer session.Close()\n\n\tvar all []Track\n\n\terr = session.DB(db.DBString).C(db.TrackCollectionString).Find(bson.M{}).All(&all)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn all\n}",
"func GetTrackIDs(ctx context.Context, s Scope, processor *perfetto.Processor) ([]int64, error) {\n\tqueryResult, err := processor.Query(fmt.Sprintf(trackIDQuery, s))\n\tif err != nil || queryResult.GetNumRecords() <= 0 {\n\t\treturn []int64{}, log.Err(ctx, err, \"Failed to query GPU render stage track ids\")\n\t}\n\tresult := make([]int64, queryResult.GetNumRecords())\n\tfor i, v := range queryResult.GetColumns()[0].GetLongValues() {\n\t\tresult[i] = v\n\t}\n\treturn result, nil\n}",
"func (trackService *Service) Tracks(date string) ([]*app.TrackResponse, error) {\n\tdateEnd, err := time.Parse(trackService.DateLayout, date)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdateBegin := dateEnd.AddDate(0, 0, -7)\n\tcurrencies, err := trackService.CurrencyRepo.FetchTracked()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult := []*app.TrackResponse{}\n\tfor i := range currencies {\n\t\trates, _ := trackService.RateRepo.FetchBetweenDate(currencies[i].ID, &dateBegin, &dateEnd)\n\t\tfrom := currencies[i].From\n\t\tto := currencies[i].To\n\t\t// Insufficient data\n\t\trateValue := float32(-1)\n\t\tavg := float32(-1)\n\t\tif len(rates) >= 7 {\n\t\t\trateValue = rates[6].RateValue\n\t\t\tavg = trackService.calculateAvg(rates)\n\t\t}\n\t\tif currencies[i].Tracked {\n\t\t\tresult = append(result, &app.TrackResponse{\n\t\t\t\tID: currencies[i].ID,\n\t\t\t\tFrom: from,\n\t\t\t\tTo: to,\n\t\t\t\tRateValue: rateValue,\n\t\t\t\tAvg: avg,\n\t\t\t})\n\t\t}\n\t\tif currencies[i].TrackedRev {\n\t\t\tresult = append(result, &app.TrackResponse{\n\t\t\t\tID: currencies[i].ID,\n\t\t\t\tFrom: to,\n\t\t\t\tTo: from,\n\t\t\t\tRateValue: 1 / rateValue,\n\t\t\t\tAvg: 1 / avg,\n\t\t\t})\n\t\t}\n\t}\n\treturn result, nil\n}",
"func GetRecentTracks() (tracks []Track, err error) {\n\tendpoint := fmt.Sprintf(\"http://ws.audioscrobbler.com/2.0/?method=user.getrecenttracks&user=%s&api_key=%s+&format=json\", os.Getenv(\"LASTFM_USER\"), os.Getenv(\"LASTFM_API_KEY\"))\n\n\tlastFmClient := http.Client{\n\t\tTimeout: time.Second * 5,\n\t}\n\n\treq, reqErr := http.NewRequest(http.MethodGet, endpoint, nil)\n\tif reqErr != nil {\n\t\treturn nil, reqErr\n\t}\n\n\tres, getErr := lastFmClient.Do(req)\n\tif getErr != nil {\n\t\treturn nil, fmt.Errorf(\"last.fm api request failed: %v\", getErr)\n\t}\n\tif res.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"last.fm api request failed with status code: %d\", res.StatusCode)\n\t}\n\n\tbody, readErr := ioutil.ReadAll(res.Body)\n\tif readErr != nil {\n\t\treturn nil, readErr\n\t}\n\n\tvar lastFmTracks LastFmRecentTracks\n\tjsonErr := json.Unmarshal(body, &lastFmTracks)\n\tif jsonErr != nil {\n\t\treturn nil, jsonErr\n\t}\n\n\ttracks = []Track{}\n\tfor _, track := range lastFmTracks.RecentTracks.Track {\n\t\ttracks = append(\n\t\t\ttracks,\n\t\t\tTrack{\n\t\t\t\tName: track.TrackName,\n\t\t\t\tURL: track.TrackURL,\n\t\t\t\tAlbum: track.Album.Title,\n\t\t\t\tArtist: track.Artist.Title,\n\t\t\t},\n\t\t)\n\t}\n\n\treturn tracks, nil\n}",
"func FormatTracks(t []*soundcloud.Track) []*Track {\n\ttt := make([]*Track, len(t))\n\tfor i := 0; i < len(t); i++ {\n\t\ttt[i] = FormatTrack(t[i])\n\t}\n\treturn tt\n}",
"func (r *Resolver) Tracks() generated.TracksResolver { return &tracksResolver{r} }",
"func (a Artist) GetTopTracks(limit int) ([]Track, error) {\n\treturn GetTopTracksForArtistID(a.ID, limit)\n}",
"func (tracker DirectoryBasedTracker) GatherTracks(config config.Config) (tracks []Track) {\n\tdefaultDir := \"./\"\n\ttracksDir := \"./tracks\"\n\tdefaultExists := false\n\n\t// try to read steps from the default track and step at the top-level directory, if it exists\n\tt, included, _ := tracker.readTrack(config, DEFAULT_TRACK_NAME, defaultDir)\n\tif included && t.StepsCount > 0 {\n\t\tdefaultExists = true\n\t\ttracker.Log.Println(fmt.Sprintf(\"Tracks: Adding default track\"))\n\t\ttracks = append(tracks, t)\n\t}\n\n\t// read tracks from the usual tracks directory\n\titems, _ := afero.ReadDir(tracker.Fs, tracksDir)\n\tfor _, item := range items {\n\t\tif item.IsDir() {\n\t\t\tt, included, _ := tracker.readTrack(config, item.Name(), fmt.Sprintf(\"%s/%s\", tracksDir, item.Name()))\n\t\t\tif included && t.StepsCount > 0 {\n\t\t\t\ttracker.Log.Println(fmt.Sprintf(\"Tracks: Adding %s\", item.Name()))\n\t\t\t\ttracks = append(tracks, t)\n\t\t\t}\n\t\t}\n\t}\n\n\t// best practice is for one or the other of the above two situations to be present\n\tif defaultExists && len(tracks) > 1 {\n\t\ttracker.Log.Warnf(\"Detected that a default track (%s) exists along with one or more explicit tracks (%s). Best practice is to migrate your default track to a named one instead.\", defaultDir, tracksDir)\n\t}\n\n\treturn\n}",
"func allTrackIDs(w http.ResponseWriter, r *http.Request) {\n\t// Connects to the database.\n\tdatabase := mongodb.DatabaseInit(Collection)\n\n\t// Gets the Count of Tracks in the DB.\n\tcount, _ := database.GetCount()\n\n\t// Check if there are any tracks in the DB.\n\tif count != 0 {\n\t\t// Slice of ints, to hold the IDs.\n\t\tvar idSlice []int\n\n\t\t// Gets all tracks from the database.\n\t\t// Loops through them, appending their ID to the new slice.\n\t\ttracks, _ := database.FindAll()\n\t\tfor i := 0; i < len(tracks); i++ {\n\t\t\tidSlice = append(idSlice, tracks[i].ID)\n\t\t}\n\n\t\t// Converts the struct to json.\n\t\tjson, err := json.Marshal(idSlice)\n\t\tif err != nil {\n\t\t\t// Sets header status code to 500 \"Internal server error\" and logs the error.\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tlog.Fatal(err)\n\t\t} else {\n\t\t\t// Sets header content-type to application/json and status code to 200 (OK).\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\t\t// Returns the array of IDs.\n\t\t\tw.Write([]byte(json))\n\t\t}\n\t} else {\n\t\t// There are no tracks stored in the DB.\n\t\t// Sets header content-type to application/json and status code to 404 (Not found).\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusNotFound)\n\n\t\t// Returns an empty array.\n\t\tw.Write([]byte(\"[]\"))\n\t}\n}",
"func (f *ArtistService) GetArtistByTrack(req *pb.SimpleArtistRequest, stream pb.ArtistService_GetArtistByTrackServer) error {\n\tctx := stream.Context()\n\n\tspan := opentracing.SpanFromContext(ctx)\n\tspan.SetTag(\"service\", \"gRPC-artist-get-by-track\")\n\tdefer span.Finish()\n\n\tartists, err := f.service.GetArtistByTrack(ctx,req.GetId())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, a := range artists {\n\t\tif err := stream.Send(a); err != nil {\n\t\t\tfmt.Println(\"Error processing stream :: \", err.Error())\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
PlayOpt is like Play but with more options.
|
func (c Client) PlayOpt(opt *spotify.PlayOptions) error {
err := c.client.PlayOpt(opt)
return err
}
|
[
"func (a AnadoluAtesi) Play() {\n\tfmt.Printf(\"%s plays %s\\n\", a.Name, a.DanceType)\n}",
"func RunOpt(opts []string) {\n\tLog.Info(\"cmd = %v\", MakeCmdString(opts))\n\tLog.Info(\"method = run.Cmd\")\n\ts := time.Now()\n\tstdout, err := run.Cmd(opts)\n\tLog.Info(\"time = %.03f\", time.Since(s).Seconds())\n\tLog.Info(\"size = %v\", len(stdout))\n\tif err == nil {\n\t\tLog.Info(\"status = passed\")\n\t} else {\n\t\tLog.Info(\"status = failed - %v\", err)\n\t}\n\treturn\n}",
"func (n *Step) Play() {\n\t*n = Step(true)\n}",
"func OptSend(lim rate.Limit, callback func(int64, *bbpb.Build)) StartOption {\n\treturn func(s *State) {\n\t\tvar err error\n\t\ts.sendCh, err = dispatcher.NewChannel(s.ctx, &dispatcher.Options{\n\t\t\tQPSLimit: rate.NewLimiter(lim, 1),\n\t\t\tBuffer: buffer.Options{\n\t\t\t\tMaxLeases: 1,\n\t\t\t\tBatchItemsMax: 1,\n\t\t\t\tFullBehavior: &buffer.DropOldestBatch{\n\t\t\t\t\tMaxLiveItems: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t}, func(batch *buffer.Batch) error {\n\t\t\tbuildPb, vers := func() (*bbpb.Build, int64) {\n\t\t\t\ts.buildPbMu.Lock()\n\t\t\t\tdefer s.buildPbMu.Unlock()\n\n\t\t\t\t// technically we don't need atomic here because copyExclusionMu is held\n\t\t\t\t// in WRITE mode, but it's clearer to mirror usage directly.\n\t\t\t\tvers := atomic.LoadInt64(&s.buildPbVers)\n\n\t\t\t\tif s.buildPbVersSent >= vers {\n\t\t\t\t\treturn nil, 0\n\t\t\t\t}\n\t\t\t\ts.buildPbVersSent = vers\n\n\t\t\t\tbuild := proto.Clone(s.buildPb).(*bbpb.Build)\n\n\t\t\t\t// now we populate Output.Properties\n\t\t\t\tif s.topLevelOutput != nil || len(s.outputProperties) != 0 {\n\t\t\t\t\tbuild.Output.Properties = s.topLevelOutput.getStructClone()\n\t\t\t\t\tfor ns, child := range s.outputProperties {\n\t\t\t\t\t\tst := child.getStructClone()\n\t\t\t\t\t\tif st == nil {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif build.Output.Properties == nil {\n\t\t\t\t\t\t\tbuild.Output.Properties, _ = structpb.NewStruct(nil)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbuild.Output.Properties.Fields[ns] = structpb.NewStructValue(st)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn build, vers\n\t\t\t}()\n\t\t\tif buildPb == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tcallback(vers, buildPb)\n\t\t\treturn nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\t// This can only happen if Options is malformed.\n\t\t\t// Since it's statically computed above, that's not possible (or the tests\n\t\t\t// are also panicing).\n\t\t\tpanic(err)\n\t\t}\n\t}\n}",
"func (t *Transport) RoundTripOpt(req *http.Request, opt h2quic.RoundTripOpt) (*http.Response, error) {\n\n\t// initialize the SCION networking context once for all Transports\n\tinitOnce.Do(func() {\n\t\tif snet.DefNetwork == nil {\n\t\t\tinitErr = scionutil.InitSCION(t.LAddr)\n\t\t}\n\t})\n\tif initErr != nil {\n\t\treturn nil, initErr\n\t}\n\n\t// set the dial function once for each Transport\n\tt.dialOnce.Do(func() {\n\t\tdial := func(network, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.Session, error) {\n\t\t\traddr, ok := t.DNS[req.URL.Host]\n\t\t\tif !ok {\n\t\t\t\tlog.Fatal(\"shttp: Host not found in DNS map\")\n\t\t\t}\n\t\t\treturn squic.DialSCION(nil, t.LAddr, raddr, nil)\n\t\t}\n\t\tt.rt = &h2quic.RoundTripper{\n\t\t\tDial: dial,\n\t\t}\n\t})\n\n\treturn t.rt.RoundTripOpt(req, opt)\n}",
"func (t TalkToMe) WithVLC(enable bool) TalkToMe {\n\tif enable {\n\t\treturn t | (1 << 4)\n\t}\n\treturn t | (0 << 4)\n}",
"func askForPlay() string {\n\tfor {\n\t\tfmt.Println(\"Please type in R (Rock), P (Paper), or S (Scissors)\")\n\t\tplayPointer := flag.String(\"Play\", \"None\", \"Enter R, P, or S\")\n\t\tflag.Parse()\n\n\t\tif *playPointer != \"R\" && *playPointer != \"P\" && *playPointer != \"S\" {\n\t\t\tfmt.Println(\"Your choice cannot be interpretted\")\n\t\t} else {\n\t\t\treturn *playPointer\n\t\t}\n\t}\n}",
"func Play() {\n\tfmt.Println(\"Play!\")\n}",
"func (o *Object) Play() {\n\to.SetPlaying(true)\n}",
"func StartPlayback(opts *PlayerOptions) error {\n\tv, err := query.Values(nil) // Don't pass anything here because if we do and we start playback with a large list URIs they will be put in the query string and give us an error\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\topts.DeviceID = \"\"\n\tj, err := json.Marshal(opts)\n\n\tif err != nil {\n\t\tlog.Fatal(\"fatal\", err)\n\t}\n\n\tb := bytes.NewBuffer(j)\n\n\tt := getAccessToken()\n\n\tr := buildRequest(\"PUT\", apiURLBase+\"me/player/play\", v, b)\n\tr.Header.Add(\"Authorization\", \"Bearer \"+t)\n\n\terr = makeRequest(r, nil)\n\t// TODO: Handle error here\n\n\treturn err\n}",
"func (p *Player) TogglePlay() {\n\tif p.Video.Paused {\n\t\tp.Play()\n\t} else {\n\t\tp.Pause()\n\t}\n}",
"func (ap *Panel) play(streamer beep.StreamSeekCloser, audioSampleRate beep.SampleRate) {\n\tap.Stop()\n\tresampler := beep.Resample(4, audioSampleRate, ap.speakerSampleRate, streamer)\n\tap.mixer.Add(resampler)\n}",
"func (p Piano) Play() {\n\tfmt.Printf(\"Plip plip %d keys\\n\", p.Keys)\n}",
"func RunSilentOpt(opts []string) {\n\tLog.Info(\"cmd = %v\", MakeCmdString(opts))\n\tLog.Info(\"method = run.Cmd\")\n\ts := time.Now()\n\tstdout, err := run.CmdSilent(opts)\n\tLog.Info(\"time = %.03f\", time.Since(s).Seconds())\n\tLog.Info(\"size = %v\", len(stdout))\n\tif err == nil {\n\t\tLog.Info(\"status = passed\")\n\t} else {\n\t\tLog.Info(\"status = failed - %v\", err)\n\t}\n\treturn\n}",
"func (s Service) Option(name string) interface{} { return s.Options[name] }",
"func composeOptions(opts ...Option) (option, error) {\n\tres := option{\n\t\tretry: defaultMaxRetry,\n\t\tqueue: base.DefaultQueueName,\n\t\ttaskID: uuid.NewString(),\n\t\ttimeout: 0, // do not set to defaultTimeout here\n\t\tdeadline: time.Time{},\n\t\tprocessAt: time.Now(),\n\t}\n\tfor _, opt := range opts {\n\t\tswitch opt := opt.(type) {\n\t\tcase retryOption:\n\t\t\tres.retry = int(opt)\n\t\tcase queueOption:\n\t\t\tqname := string(opt)\n\t\t\tif err := base.ValidateQueueName(qname); err != nil {\n\t\t\t\treturn option{}, err\n\t\t\t}\n\t\t\tres.queue = qname\n\t\tcase taskIDOption:\n\t\t\tid := string(opt)\n\t\t\tif isBlank(id) {\n\t\t\t\treturn option{}, errors.New(\"task ID cannot be empty\")\n\t\t\t}\n\t\t\tres.taskID = id\n\t\tcase timeoutOption:\n\t\t\tres.timeout = time.Duration(opt)\n\t\tcase deadlineOption:\n\t\t\tres.deadline = time.Time(opt)\n\t\tcase uniqueOption:\n\t\t\tttl := time.Duration(opt)\n\t\t\tif ttl < 1*time.Second {\n\t\t\t\treturn option{}, errors.New(\"Unique TTL cannot be less than 1s\")\n\t\t\t}\n\t\t\tres.uniqueTTL = ttl\n\t\tcase processAtOption:\n\t\t\tres.processAt = time.Time(opt)\n\t\tcase processInOption:\n\t\t\tres.processAt = time.Now().Add(time.Duration(opt))\n\t\tcase retentionOption:\n\t\t\tres.retention = time.Duration(opt)\n\t\tcase groupOption:\n\t\t\tkey := string(opt)\n\t\t\tif isBlank(key) {\n\t\t\t\treturn option{}, errors.New(\"group key cannot be empty\")\n\t\t\t}\n\t\t\tres.group = key\n\t\tdefault:\n\t\t\t// ignore unexpected option\n\t\t}\n\t}\n\treturn res, nil\n}",
"func SweeperOption(name string, duration int, settings map[string]interface{}) Option {\n\treturn Option{func(op *options) {\n\t\tvals := make([]OptionItem, 0)\n\t\tvals = append(vals, OptionItem{\"duration\", duration})\n\n\t\t// Append settings if existing\n\t\tif len(settings) > 0 {\n\t\t\tfor k, v := range settings {\n\t\t\t\tvals = append(vals, OptionItem{k, v})\n\t\t\t}\n\t\t}\n\n\t\t// Append with overriding way\n\t\top.values[name] = vals\n\t}}\n}",
"func (c *Cmd) Option(short rune, long string, description string) {\n\tc.addOption(&option{short: short, long: long, description: description})\n}",
"func (this *Media) AddOption(options string) error {\n\tif this.ptr == nil {\n\t\treturn syscall.EINVAL\n\t}\n\n\tc := C.CString(options)\n\tC.libvlc_media_add_option(this.ptr, c)\n\tC.free(unsafe.Pointer(c))\n\n\treturn checkError()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
CrititcalTest wraps testers to signify that the tester is considered critical
|
func CriticalTest(f func(ctx *TestContext)) Tester {
return testFunc{f, CriticalTests}
}
|
[
"func (s *Suite) Critical(name string, test SuiteTestHandler) {\n\tfail := func(err interface{}) {\n\t\ts.failed++\n\t\ts.fail(name)\n\t\ts.T.Fatal(err)\n\t}\n\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tfail(err)\n\t\t}\n\t}()\n\n\tif err := test(s); err != nil {\n\t\tfail(err)\n\t}\n\n\ts.passed++\n\ts.ok(name)\n}",
"func ConformanceTests(t *testing.T, props map[string]string, lockstore lock.Store, config TestConfig) {\n\t// Test vars\n\tkey := strings.ReplaceAll(uuid.New().String(), \"-\", \"\")\n\tt.Logf(\"Base key for test: %s\", key)\n\n\tt.Run(\"init\", func(t *testing.T) {\n\t\tctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)\n\t\tdefer cancel()\n\t\terr := lockstore.InitLockStore(ctx, lock.Metadata{Base: metadata.Base{\n\t\t\tProperties: props,\n\t\t}})\n\t\trequire.NoError(t, err)\n\t})\n\n\t// Don't run more tests if init failed\n\tif t.Failed() {\n\t\tt.Fatal(\"Init failed, stopping further tests\")\n\t}\n\n\tconst lockOwner = \"conftest\"\n\tlockKey1 := key + \"-1\"\n\tlockKey2 := key + \"-2\"\n\n\tvar expirationCh *time.Timer\n\n\tt.Run(\"TryLock\", func(t *testing.T) {\n\t\t// Acquire a lock\n\t\tt.Run(\"acquire lock1\", func(t *testing.T) {\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\t\t\tdefer cancel()\n\t\t\tres, err := lockstore.TryLock(ctx, &lock.TryLockRequest{\n\t\t\t\tResourceID: lockKey1,\n\t\t\t\tLockOwner: lockOwner,\n\t\t\t\tExpiryInSeconds: 15,\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, res)\n\t\t\tassert.True(t, res.Success)\n\t\t})\n\n\t\t// Acquire a second lock (with a shorter expiration)\n\t\tt.Run(\"acquire lock2\", func(t *testing.T) {\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\t\t\tdefer cancel()\n\t\t\tres, err := lockstore.TryLock(ctx, &lock.TryLockRequest{\n\t\t\t\tResourceID: lockKey2,\n\t\t\t\tLockOwner: lockOwner,\n\t\t\t\tExpiryInSeconds: 3,\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, res)\n\t\t\tassert.True(t, res.Success)\n\n\t\t\t// Set expirationCh to when lock2 expires\n\t\t\texpirationCh = time.NewTimer(3 * time.Second)\n\t\t})\n\n\t\t// Acquiring the same lock again should fail\n\t\tt.Run(\"fails to acquire existing lock\", func(t *testing.T) {\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\t\t\tdefer cancel()\n\t\t\tres, err := lockstore.TryLock(ctx, &lock.TryLockRequest{\n\t\t\t\tResourceID: lockKey1,\n\t\t\t\tLockOwner: lockOwner,\n\t\t\t\tExpiryInSeconds: 15,\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, res)\n\t\t\tassert.False(t, res.Success)\n\t\t})\n\t})\n\n\tt.Run(\"Unlock\", func(t *testing.T) {\n\t\tt.Run(\"fails to unlock with nonexistent resource ID\", func(t *testing.T) {\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\t\t\tdefer cancel()\n\t\t\tres, err := lockstore.Unlock(ctx, &lock.UnlockRequest{\n\t\t\t\tResourceID: \"nonexistent\",\n\t\t\t\tLockOwner: lockOwner,\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, res)\n\t\t\tassert.Equal(t, lock.LockDoesNotExist, res.Status)\n\t\t})\n\n\t\tt.Run(\"fails to unlock with wrong owner\", func(t *testing.T) {\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\t\t\tdefer cancel()\n\t\t\tres, err := lockstore.Unlock(ctx, &lock.UnlockRequest{\n\t\t\t\tResourceID: lockKey1,\n\t\t\t\tLockOwner: \"nonowner\",\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, res)\n\t\t\tassert.Equal(t, lock.LockBelongsToOthers, res.Status)\n\t\t})\n\n\t\tt.Run(\"unlocks successfully\", func(t *testing.T) {\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\t\t\tdefer cancel()\n\t\t\tres, err := lockstore.Unlock(ctx, &lock.UnlockRequest{\n\t\t\t\tResourceID: lockKey1,\n\t\t\t\tLockOwner: lockOwner,\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, res)\n\t\t\tassert.Equal(t, lock.Success, res.Status)\n\t\t})\n\t})\n\n\tt.Run(\"lock expires\", func(t *testing.T) {\n\t\t// Wait until the lock is supposed to expire\n\t\t<-expirationCh.C\n\n\t\t// Assert that the lock doesn't exist anymore - we should be able to re-acquire it\n\t\tassert.Eventually(t, func() bool {\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\t\t\tdefer cancel()\n\t\t\tres, err := lockstore.TryLock(ctx, &lock.TryLockRequest{\n\t\t\t\tResourceID: lockKey2,\n\t\t\t\tLockOwner: lockOwner,\n\t\t\t\tExpiryInSeconds: 3,\n\t\t\t})\n\t\t\treturn err == nil && res != nil && res.Success\n\t\t}, 5*time.Second, 100*time.Millisecond, \"Lock 2 was not released in time after its scheduled expiration\")\n\t})\n}",
"func StakingClientImplementationTests(t *testing.T, backend api.Backend, timeSource epochtime.SetableBackend) {\n\tfor _, tc := range []struct {\n\t\tn string\n\t\tfn func(*testing.T, api.Backend, epochtime.SetableBackend)\n\t}{\n\t\t{\"Transfer\", testTransfer},\n\t\t{\"TransferSelf\", testSelfTransfer},\n\t\t{\"Burn\", testBurn},\n\t\t{\"Escrow\", testEscrow},\n\t\t{\"EscrowSelf\", testSelfEscrow},\n\t} {\n\t\tt.Run(tc.n, func(t *testing.T) { tc.fn(t, backend, timeSource) })\n\t}\n}",
"func (m *MockSession) Crit(arg0 string, arg1 ...interface{}) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0}\n\tfor _, a := range arg1 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"Crit\", varargs...)\n}",
"func (mysuit *MySuite) TestTiger_ClacMul(c *check.C) () {\n\tutest.Init(orgID)\n\tcontractOwner := utest.DeployContract(c, contractName, orgID, contractMethods, contractInterfaces)\n\ttest := NewTestObject(contractOwner)\n\ttest.setSender(contractOwner).InitChain()\n\t//TODO\n}",
"func TestRenterThree(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\n\t// Create a group for the subtests\n\tgroupParams := siatest.GroupParams{\n\t\tHosts: 5,\n\t\tRenters: 1,\n\t\tMiners: 1,\n\t}\n\n\t// Specify subtests to run\n\tsubTests := []test{\n\t\t{\"TestAllowanceDefaultSet\", testAllowanceDefaultSet},\n\t\t{\"TestStreamLargeFile\", testStreamLargeFile},\n\t}\n\n\t// Run tests\n\tif err := runRenterTests(t, groupParams, subTests); err != nil {\n\t\tt.Fatal(err)\n\t}\n}",
"func (mysuit *MySuite) TestTiger_ClacByLine(c *check.C) () {\n\tutest.Init(orgID)\n\tcontractOwner := utest.DeployContract(c, contractName, orgID, contractMethods, contractInterfaces)\n\ttest := NewTestObject(contractOwner)\n\ttest.setSender(contractOwner).InitChain()\n\t//TODO\n}",
"func TestRace(ctx context.Context) error {\n\tmg.CtxDeps(ctx, getGotestsum)\n\tsay(\"running race condition tests\")\n\treturn runTests(\"-race\")\n}",
"func (mysuit *MySuite) TestTiger_ClacFee(c *check.C) () {\n\tutest.Init(orgID)\n\tcontractOwner := utest.DeployContract(c, contractName, orgID, contractMethods, contractInterfaces)\n\ttest := NewTestObject(contractOwner)\n\ttest.setSender(contractOwner).InitChain()\n\t//TODO\n}",
"func (t *Tester) CoveredBy() ([]string, error) {\n\toutputDir, err := ioutil.TempDir(\"\", \"test_finder\")\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\tdefer os.RemoveAll(outputDir)\n\n\ttestBin, err := t.compileTest(outputDir)\n\tif err != nil {\n\t\treturn []string{}, fmt.Errorf(\"error compiling test for go pkg %s: %s\", t.testPos.pkg, err)\n\t}\n\n\tallTests, err := findTests(t.testPos.pkg, t.run)\n\tif err != nil {\n\t\treturn []string{}, fmt.Errorf(\"error finding tests in go pkg %s: %s\", t.testPos.pkg, err)\n\t}\n\n\tif len(allTests) == 0 {\n\t\treturn []string{}, nil\n\t}\n\n\treturn t.coverFinder.coveringTests(t, testBin, outputDir, allTests, t.includeSubtests)\n}",
"func testCheckers() {\n\t// Seed the initial population\n\tpop_size := 100\n\tpop := make([]games.Agent, pop_size)\n\tfor i := 0; i < pop_size; i++ {\n\t\tpop[i] = neuralnetwork.RandomNetwork(65, 130, 24)\n\t}\n\n\t// Run neuroevolution to produce an agent. The checkers games used by the\n\t// evolutionary algorithm will be cut off after 100 moves to prevent\n\t// random players from prolonging the game indefinitely.\n\tevolved_agent := evolution.EvolveAgents(games.MakeCheckers(100), games.CheckersPlayerMaker,\n\t\t512, 64, pop) // Each member of the population will be tested at maximum 64 times.\n\t// After 512 generations the algorithm concludes if it hasn't already spawned\n\t// an agent that can win 64 times for 10 generations.\n\tfmt.Println(\"Training complete!\")\n\n\t// Play checkers against the user indefinitely\n\tfor {\n\t\tvictor := games.Checkers(games.CheckersPlayerMaker(evolved_agent), games.HumanCheckersPlayer)\n\t\tif victor == -1 {\n\t\t\tfmt.Println(\"\\n\\nYou win!\")\n\t\t} else if victor == 0 {\n\t\t\tfmt.Println(\"\\n\\nDraw!\")\n\t\t} else if victor == 1 {\n\t\t\tfmt.Println(\"\\n\\nYou lose!\")\n\t\t}\n\t}\n\n}",
"func TestRenter(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\n\t// Create a group for the subtests\n\tgroupParams := siatest.GroupParams{\n\t\tHosts: 5,\n\t\tRenters: 1,\n\t\tMiners: 1,\n\t}\n\n\t// Specify subtests to run\n\tsubTests := []test{\n\t\t{\"TestClearDownloadHistory\", testClearDownloadHistory},\n\t\t{\"TestDirectories\", testDirectories},\n\t\t{\"TestSetFileTrackingPath\", testSetFileTrackingPath},\n\t\t{\"TestDownloadAfterRenew\", testDownloadAfterRenew},\n\t\t{\"TestDownloadMultipleLargeSectors\", testDownloadMultipleLargeSectors},\n\t\t{\"TestLocalRepair\", testLocalRepair},\n\t}\n\n\t// Run tests\n\tif err := runRenterTests(t, groupParams, subTests); err != nil {\n\t\tt.Fatal(err)\n\t}\n}",
"func beforeTest() {\n\tfor _, g := range interestingGoroutines() {\n\t\tbeforeTestGorountines[g] = true\n\t}\n}",
"func (t *TestingTool) Monitor(kubeClient kubernetes.Interface, testingErr chan error) {\n\ttestingErr <- fmt.Errorf(\"Not implemented\")\n}",
"func TestEnforcer(t *testing.T) {\n\tsuite.Run(t, &EnforcerTestSuite{})\n}",
"func TestRenterContracts(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\n\t// Create a group for testing\n\tgroupParams := siatest.GroupParams{\n\t\tHosts: 2,\n\t\tRenters: 1,\n\t\tMiners: 1,\n\t}\n\ttestDir := renterTestDir(t.Name())\n\ttg, err := siatest.NewGroupFromTemplate(testDir, groupParams)\n\tif err != nil {\n\t\tt.Fatal(\"Failed to create group:\", err)\n\t}\n\tdefer func() {\n\t\tif err := tg.Close(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\t// Get Renter\n\tr := tg.Renters()[0]\n\trg, err := r.RenterGet()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Record the start period at the beginning of test\n\tcurrentPeriodStart := rg.CurrentPeriod\n\tperiod := rg.Settings.Allowance.Period\n\trenewWindow := rg.Settings.Allowance.RenewWindow\n\tnumRenewals := 0\n\n\t// Check if the current period was set in the past\n\tcg, err := r.ConsensusGet()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif currentPeriodStart > cg.Height-renewWindow {\n\t\tt.Fatalf(`Current period not set in the past as expected.\n\t\tCP: %v\n\t\tBH: %v\n\t\tRW: %v\n\t\t`, currentPeriodStart, cg.Height, renewWindow)\n\t}\n\n\t// Confirm Contracts were created as expected. There should only be active\n\t// contracts and no inactive or expired contracts\n\terr = build.Retry(200, 100*time.Millisecond, func() error {\n\t\trc, err := r.RenterInactiveContractsGet()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(rc.ActiveContracts) != len(tg.Hosts()) {\n\t\t\treturn fmt.Errorf(\"Expected %v active contracts, got %v\", len(tg.Hosts()), len(rc.ActiveContracts))\n\t\t}\n\t\tif len(rc.InactiveContracts) != 0 {\n\t\t\treturn fmt.Errorf(\"Expected 0 inactive contracts, got %v\", len(rc.InactiveContracts))\n\t\t}\n\t\trcExpired, err := r.RenterExpiredContractsGet()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(rcExpired.ExpiredContracts) != 0 {\n\t\t\treturn fmt.Errorf(\"Expected 0 expired contracts, got %v\", len(rcExpired.ExpiredContracts))\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trc, err := r.RenterContractsGet()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Confirm contract end heights were set properly\n\tfor _, c := range rc.ActiveContracts {\n\t\tif c.EndHeight != currentPeriodStart+period+renewWindow {\n\t\t\tt.Log(\"Endheight:\", c.EndHeight)\n\t\t\tt.Log(\"Allowance Period:\", period)\n\t\t\tt.Log(\"Renew Window:\", renewWindow)\n\t\t\tt.Log(\"Current Period:\", currentPeriodStart)\n\t\t\tt.Fatal(\"Contract endheight not set to Current period + Allowance Period + Renew Window\")\n\t\t}\n\t}\n\n\t// Record original Contracts and create Maps for comparison\n\toriginalContracts := rc.ActiveContracts\n\toriginalContractIDMap := make(map[types.FileContractID]struct{})\n\tfor _, c := range originalContracts {\n\t\toriginalContractIDMap[c.ID] = struct{}{}\n\t}\n\n\t// Mine blocks to force contract renewal\n\tif err = renewContractsByRenewWindow(r, tg); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tnumRenewals++\n\n\t// Confirm Contracts were renewed as expected, all original contracts should\n\t// have been renewed if GoodForRenew = true. There should be the same\n\t// number of active and inactive contracts, and 0 expired contracts since we\n\t// are still within the endheight of the original contracts, and the\n\t// inactive contracts should be the same contracts as the original active\n\t// contracts.\n\terr = build.Retry(200, 100*time.Millisecond, func() error {\n\t\trc, err := r.RenterInactiveContractsGet()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(originalContracts) != len(rc.InactiveContracts) {\n\t\t\treturn fmt.Errorf(\"Didn't get expected number of inactive contracts, expected %v got %v\", len(originalContracts), len(rc.InactiveContracts))\n\t\t}\n\t\tfor _, c := range rc.InactiveContracts {\n\t\t\tif _, ok := originalContractIDMap[c.ID]; !ok {\n\t\t\t\treturn errors.New(\"ID from rc not found in originalContracts\")\n\t\t\t}\n\t\t}\n\t\trcExpired, err := r.RenterExpiredContractsGet()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(rcExpired.ExpiredContracts) != 0 {\n\t\t\treturn fmt.Errorf(\"Expected 0 expired contracts, got %v\", len(rcExpired.ExpiredContracts))\n\t\t}\n\t\t// checkContracts will confirm correct number of inactive and active contracts\n\t\tif err = checkContracts(len(tg.Hosts()), numRenewals, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = checkRenewedContracts(rc.ActiveContracts); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Confirm contract end heights were set properly End height should be the\n\t// end of the next period as the contracts are renewed due to reaching the\n\t// renew window\n\trc, err = r.RenterInactiveContractsGet()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, c := range rc.ActiveContracts {\n\t\tif c.EndHeight != currentPeriodStart+(2*period)+renewWindow && c.GoodForRenew {\n\t\t\tt.Log(\"Endheight:\", c.EndHeight)\n\t\t\tt.Log(\"Allowance Period:\", period)\n\t\t\tt.Log(\"Renew Window:\", renewWindow)\n\t\t\tt.Log(\"Current Period:\", currentPeriodStart)\n\t\t\tt.Fatal(\"Contract endheight not set to Current period + 2 * Allowance Period + Renew Window\")\n\t\t}\n\t}\n\n\t// Record inactive contracts\n\tinactiveContracts := rc.InactiveContracts\n\tinactiveContractIDMap := make(map[types.FileContractID]struct{})\n\tfor _, c := range inactiveContracts {\n\t\tinactiveContractIDMap[c.ID] = struct{}{}\n\t}\n\n\t// Mine to force inactive contracts to be expired contracts\n\tm := tg.Miners()[0]\n\tcg, err = r.ConsensusGet()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor i := 0; i < int(inactiveContracts[0].EndHeight-cg.Height+types.MaturityDelay); i++ {\n\t\tif err = m.MineBlock(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\t// Waiting for nodes to sync\n\tif err = tg.Sync(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Confirm contracts, the expired contracts should now be the same contracts\n\t// as the previous inactive contracts.\n\terr = build.Retry(200, 100*time.Millisecond, func() error {\n\t\trc, err = r.RenterExpiredContractsGet()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(rc.ActiveContracts) != len(tg.Hosts()) {\n\t\t\treturn errors.New(\"Waiting for active contracts to form\")\n\t\t}\n\t\tif len(rc.ExpiredContracts) != len(inactiveContracts) {\n\t\t\treturn fmt.Errorf(\"Expected the same number of expired and inactive contracts; got %v expired and %v inactive\", len(rc.ExpiredContracts), len(inactiveContracts))\n\t\t}\n\t\tfor _, c := range inactiveContracts {\n\t\t\tif _, ok := inactiveContractIDMap[c.ID]; !ok {\n\t\t\t\treturn errors.New(\"ID from rc not found in inactiveContracts\")\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Renewing contracts by spending is very time consuming, the rest of the\n\t// test is only run during vlong so the rest of the test package doesn't\n\t// time out\n\tif !build.VLONG {\n\t\treturn\n\t}\n\n\t// Record current active and expired contracts\n\terr = build.Retry(200, 100*time.Millisecond, func() error {\n\t\trc, err = r.RenterContractsGet()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(rc.ActiveContracts) != len(tg.Hosts()) {\n\t\t\treturn fmt.Errorf(\"waiting for active contracts to form\")\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trc, err = r.RenterExpiredContractsGet()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tactiveContracts := rc.ActiveContracts\n\texpiredContracts := rc.ExpiredContracts\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\texpiredContractIDMap := make(map[types.FileContractID]struct{})\n\tfor _, c := range expiredContracts {\n\t\texpiredContractIDMap[c.ID] = struct{}{}\n\t}\n\n\t// Capturing end height to compare against renewed contracts\n\tendHeight := rc.ActiveContracts[0].EndHeight\n\n\t// Renew contracts by running out of funds\n\tstartingUploadSpend, err := renewContractsBySpending(r, tg)\n\tif err != nil {\n\t\tr.PrintDebugInfo(t, true, true, true)\n\t\tt.Fatal(err)\n\t}\n\tnumRenewals++\n\n\t// Confirm contracts were renewed as expected. Active contracts prior to\n\t// renewal should now be in the inactive contracts\n\terr = build.Retry(200, 100*time.Millisecond, func() error {\n\t\trc, err = r.RenterInactiveContractsGet()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(rc.ActiveContracts) != len(tg.Hosts()) {\n\t\t\treturn errors.New(\"Waiting for active contracts to form\")\n\t\t}\n\t\trcExpired, err := r.RenterExpiredContractsGet()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Confirm active and inactive contracts\n\t\tinactiveContractIDMap := make(map[types.FileContractID]struct{})\n\t\tfor _, c := range rc.InactiveContracts {\n\t\t\tinactiveContractIDMap[c.ID] = struct{}{}\n\t\t}\n\t\tfor _, c := range activeContracts {\n\t\t\tif _, ok := inactiveContractIDMap[c.ID]; !ok && c.UploadSpending.Cmp(startingUploadSpend) <= 0 {\n\t\t\t\treturn errors.New(\"ID from activeContacts not found in rc\")\n\t\t\t}\n\t\t}\n\n\t\t// Check that there are inactive contracts, and that the inactive\n\t\t// contracts correctly mark the GoodForUpload and GoodForRenew fields as\n\t\t// false.\n\t\tif len(rc.InactiveContracts) == 0 {\n\t\t\treturn errors.New(\"no reported inactive contracts\")\n\t\t}\n\t\tfor _, c := range rc.InactiveContracts {\n\t\t\tif c.GoodForUpload || c.GoodForRenew {\n\t\t\t\treturn errors.New(\"an inactive contract is being reported as either good for upload or good for renew\")\n\t\t\t}\n\t\t}\n\n\t\t// Confirm expired contracts\n\t\tif len(expiredContracts) != len(rcExpired.ExpiredContracts) {\n\t\t\treturn fmt.Errorf(\"Didn't get expected number of expired contracts, expected %v got %v\", len(expiredContracts), len(rcExpired.ExpiredContracts))\n\t\t}\n\t\tfor _, c := range rcExpired.ExpiredContracts {\n\t\t\tif _, ok := expiredContractIDMap[c.ID]; !ok {\n\t\t\t\treturn errors.New(\"ID from rcExpired not found in expiredContracts\")\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Confirm contract end heights were set properly\n\t// End height should not have changed since the renewal\n\t// was due to running out of funds\n\trc, err = r.RenterContractsGet()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, c := range rc.ActiveContracts {\n\t\tif c.EndHeight != endHeight && c.GoodForRenew && c.UploadSpending.Cmp(startingUploadSpend) <= 0 {\n\t\t\tt.Log(\"Allowance Period:\", period)\n\t\t\tt.Log(\"Current Period:\", currentPeriodStart)\n\t\t\tt.Fatalf(\"Contract endheight Changed, EH was %v, expected %v\\n\", c.EndHeight, endHeight)\n\t\t}\n\t}\n\n\t// Mine blocks to force contract renewal to start with fresh set of contracts\n\tif err = renewContractsByRenewWindow(r, tg); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tnumRenewals++\n\n\t// Confirm Contracts were renewed as expected\n\terr = build.Retry(200, 100*time.Millisecond, func() error {\n\t\trc, err := r.RenterInactiveContractsGet()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trcExpired, err := r.RenterExpiredContractsGet()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// checkContracts will confirm correct number of inactive and active contracts\n\t\tif err = checkContracts(len(tg.Hosts()), numRenewals, append(rc.InactiveContracts, rcExpired.ExpiredContracts...), rc.ActiveContracts); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Test canceling contract\n\t// Grab contract to cancel\n\trc, err = r.RenterContractsGet()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcontract := rc.ActiveContracts[0]\n\t// Cancel Contract\n\tif err := r.RenterContractCancelPost(contract.ID); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Add a new host so new contract can be formed\n\thostParams := node.Host(testDir + \"/host\")\n\t_, err = tg.AddNodes(hostParams)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = build.Retry(200, 100*time.Millisecond, func() error {\n\t\t// Check that Contract is now in inactive contracts and no longer in Active contracts\n\t\trc, err = r.RenterInactiveContractsGet()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Confirm Renter has the expected number of contracts, meaning canceled contract should have been replaced.\n\t\tif len(rc.ActiveContracts) < len(tg.Hosts())-1 {\n\t\t\treturn fmt.Errorf(\"Canceled contract was not replaced, only %v active contracts, expected at least %v\", len(rc.ActiveContracts), len(tg.Hosts())-1)\n\t\t}\n\t\tfor _, c := range rc.ActiveContracts {\n\t\t\tif c.ID == contract.ID {\n\t\t\t\treturn errors.New(\"Contract not cancelled, contract found in Active Contracts\")\n\t\t\t}\n\t\t}\n\t\ti := 1\n\t\tfor _, c := range rc.InactiveContracts {\n\t\t\tif c.ID == contract.ID {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif i == len(rc.InactiveContracts) {\n\t\t\t\treturn errors.New(\"Contract not found in Inactive Contracts\")\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}",
"func ReconcileTesterWithoutRedundantCheck(t *testing.T, method string,\n\tcurrent, revised interface{},\n\tcreateCalls, updateCalls map[string][]MockFuncCall,\n\treconcile func(*MockClient, interface{}) error,\n\tlistInvolved bool,\n\tinitObjects ...client.Object) {\n\n\t// initialize client\n\tc := NewMockClient()\n\tc.AddObjects(initObjects)\n\n\t// test create new\n\tmethodPlus := fmt.Sprintf(\"%s(create)\", method)\n\ttestReconcileForResource(t, c, methodPlus, current, createCalls, reconcile)\n\n\t// test updates required\n\tmethodPlus = fmt.Sprintf(\"%s(update-with-change)\", method)\n\ttestReconcileForResource(t, c, methodPlus, revised, updateCalls, reconcile)\n}",
"func TestCrossLayerUser(t *testing.T) {\n\tzeroTime := hexutil.Uint64(0)\n\tfutureTime := hexutil.Uint64(20)\n\tfarFutureTime := hexutil.Uint64(2000)\n\ttests := []regolithScheduledTest{\n\t\t{name: \"NoRegolith\", regolithTime: nil, activateRegolith: false},\n\t\t{name: \"NotYetRegolith\", regolithTime: &farFutureTime, activateRegolith: false},\n\t\t{name: \"RegolithAtGenesis\", regolithTime: &zeroTime, activateRegolith: true},\n\t\t{name: \"RegolithAfterGenesis\", regolithTime: &futureTime, activateRegolith: true},\n\t}\n\tfor _, test := range tests {\n\t\ttest := test // Use a fixed reference as the tests run in parallel\n\t\tt.Run(test.name, func(gt *testing.T) {\n\t\t\trunCrossLayerUserTest(gt, test)\n\t\t})\n\t}\n}",
"func init() {\n\ttesting.AddTest(&testing.Test{\n\t\tFunc: RecreateUserVaultTPM1,\n\t\tDesc: \"Verifies that for TPMv1.2 devices, cryptohome recreates user's vault directory when the TPM is re-owned\",\n\t\tContacts: []string{\n\t\t\t\"[email protected]\",\n\t\t\t\"[email protected]\",\n\t\t},\n\t\tSoftwareDeps: []string{\"reboot\", \"tpm1\"},\n\t\tAttr: []string{\"group:hwsec_destructive_func\"},\n\t\tTimeout: 5 * time.Minute,\n\t})\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WarningTest wraps testers to signify that the tester is a warning test
|
func WarningTest(f func(ctx *TestContext)) Tester {
return testFunc{f, WarningTests}
}
|
[
"func (t *Test) Warn(reason string) {\n\tt.Warned = true\n\tt.Err = errors.New(reason)\n}",
"func TestNewWarn(tb testing.TB, options ...zap.Option) (*zap.Logger, *observer.ObservedLogs) {\n\ttb.Helper()\n\n\treturn TestNewWithLevel(tb, zapcore.WarnLevel, options...)\n}",
"func (m *MockLogging) Warningf(arg0 string, arg1 ...interface{}) {\n\tvarargs := []interface{}{arg0}\n\tfor _, a := range arg1 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"Warningf\", varargs...)\n}",
"func (t *UpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {\n\ttolerateDuringSkew := exutil.TolerateVersionSkewInTests()\n\tfiringAlertsWithBugs := helper.MetricConditions{\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubePodNotReady\", \"namespace\": \"openshift-kube-apiserver-operator\"},\n\t\t\tText: \"https://bugzilla.redhat.com/show_bug.cgi?id=1939580\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubePodNotReady\", \"namespace\": \"openshift-kube-apiserver-operator\"},\n\t\t\tText: \"https://bugzilla.redhat.com/show_bug.cgi?id=1939580\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"ClusterOperatorDegraded\", \"name\": \"openshift-apiserver\"},\n\t\t\tText: \"https://bugzilla.redhat.com/show_bug.cgi?id=1939580\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"ClusterOperatorDown\", \"name\": \"authentication\"},\n\t\t\tText: \"https://bugzilla.redhat.com/show_bug.cgi?id=1939580\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"ClusterOperatorDown\", \"name\": \"machine-config\"},\n\t\t\tText: \"https://bugzilla.redhat.com/show_bug.cgi?id=1955300\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"ClusterOperatorDegraded\", \"name\": \"authentication\"},\n\t\t\tText: \"https://bugzilla.redhat.com/show_bug.cgi?id=1939580\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubeDaemonSetRolloutStuck\"},\n\t\t\tText: \"https://bugzilla.redhat.com/show_bug.cgi?id=1943667\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubeAPIErrorBudgetBurn\"},\n\t\t\tText: \"https://bugzilla.redhat.com/show_bug.cgi?id=1953798\",\n\t\t\tMatches: func(_ *model.Sample) bool {\n\t\t\t\treturn framework.ProviderIs(\"gce\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"AggregatedAPIDown\", \"namespace\": \"default\", \"name\": \"v1beta1.metrics.k8s.io\"},\n\t\t\tText: \"https://bugzilla.redhat.com/show_bug.cgi?id=1970624\",\n\t\t\tMatches: func(_ *model.Sample) bool {\n\t\t\t\treturn framework.ProviderIs(\"gce\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// Should be removed one release after the attached bugzilla is fixed.\n\t\t\tSelector: map[string]string{\"alertname\": \"HighlyAvailableWorkloadIncorrectlySpread\", \"namespace\": \"openshift-monitoring\", \"workload\": \"prometheus-k8s\"},\n\t\t\tText: \"https://bugzilla.redhat.com/show_bug.cgi?id=1949262\",\n\t\t},\n\t\t{\n\t\t\t// Should be removed one release after the attached bugzilla is fixed.\n\t\t\tSelector: map[string]string{\"alertname\": \"HighlyAvailableWorkloadIncorrectlySpread\", \"namespace\": \"openshift-monitoring\", \"workload\": \"alertmanager-main\"},\n\t\t\tText: \"https://bugzilla.redhat.com/show_bug.cgi?id=1955489\",\n\t\t},\n\t\t{\n\t\t\t// Should be removed one release after the attached bugzilla is fixed, or after that bug is fixed in a backport to the previous minor.\n\t\t\tSelector: map[string]string{\"alertname\": \"ExtremelyHighIndividualControlPlaneCPU\"},\n\t\t\tText: \"https://bugzilla.redhat.com/show_bug.cgi?id=1985073\",\n\t\t\tMatches: func(_ *model.Sample) bool {\n\t\t\t\treturn framework.ProviderIs(\"gce\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubeJobFailed\", \"namespace\": \"openshift-multus\"},\n\t\t\tText: \"https://bugzilla.redhat.com/show_bug.cgi?id=2054426\",\n\t\t\tMatches: func(sample *model.Sample) bool {\n\t\t\t\t// Only match if the job_name label starts with ip-reconciler:\n\t\t\t\tif strings.HasPrefix(string(sample.Metric[model.LabelName(\"job_name\")]), \"ip-reconciler-\") {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t\treturn false\n\t\t\t},\n\t\t},\n\t}\n\tallowedFiringAlerts := helper.MetricConditions{\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"TargetDown\", \"namespace\": \"openshift-e2e-loki\"},\n\t\t\tText: \"Loki is nice to have, but we can allow it to be down\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubePodNotReady\", \"namespace\": \"openshift-e2e-loki\"},\n\t\t\tText: \"Loki is nice to have, but we can allow it to be down\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubeDeploymentReplicasMismatch\", \"namespace\": \"openshift-e2e-loki\"},\n\t\t\tText: \"Loki is nice to have, but we can allow it to be down\",\n\t\t},\n\t}\n\n\tpendingAlertsWithBugs := helper.MetricConditions{\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"ClusterMonitoringOperatorReconciliationErrors\"},\n\t\t\tText: \"https://bugzilla.redhat.com/show_bug.cgi?id=1932624\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubeClientErrors\"},\n\t\t\tText: \"https://bugzilla.redhat.com/show_bug.cgi?id=1925698\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"NetworkPodsCrashLooping\"},\n\t\t\tText: \"https://bugzilla.redhat.com/show_bug.cgi?id=2009078\",\n\t\t},\n\t}\n\tallowedPendingAlerts := helper.MetricConditions{\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"etcdMemberCommunicationSlow\"},\n\t\t\tText: \"Excluded because it triggers during upgrade (detects ~5m of high latency immediately preceeding the end of the test), and we don't want to change the alert because it is correct\",\n\t\t},\n\t}\n\n\t// we exclude alerts that have their own separate tests.\n\tfor _, alertTest := range allowedalerts.AllAlertTests(context.TODO(), nil, 0) {\n\t\tswitch alertTest.AlertState() {\n\t\tcase allowedalerts.AlertPending:\n\t\t\t// a pending test covers pending and everything above (firing)\n\t\t\tallowedPendingAlerts = append(allowedPendingAlerts,\n\t\t\t\thelper.MetricCondition{\n\t\t\t\t\tSelector: map[string]string{\"alertname\": alertTest.AlertName()},\n\t\t\t\t\tText: \"has a separate e2e test\",\n\t\t\t\t},\n\t\t\t)\n\t\t\tallowedFiringAlerts = append(allowedFiringAlerts,\n\t\t\t\thelper.MetricCondition{\n\t\t\t\t\tSelector: map[string]string{\"alertname\": alertTest.AlertName()},\n\t\t\t\t\tText: \"has a separate e2e test\",\n\t\t\t\t},\n\t\t\t)\n\t\tcase allowedalerts.AlertInfo:\n\t\t\t// an info test covers all firing\n\t\t\tallowedFiringAlerts = append(allowedFiringAlerts,\n\t\t\t\thelper.MetricCondition{\n\t\t\t\t\tSelector: map[string]string{\"alertname\": alertTest.AlertName()},\n\t\t\t\t\tText: \"has a separate e2e test\",\n\t\t\t\t},\n\t\t\t)\n\t\t}\n\t}\n\n\tknownViolations := sets.NewString()\n\tunexpectedViolations := sets.NewString()\n\tunexpectedViolationsAsFlakes := sets.NewString()\n\tdebug := sets.NewString()\n\n\tg.By(\"Checking for alerts\")\n\n\tstart := time.Now()\n\n\t// Block until upgrade is done\n\tg.By(\"Waiting for upgrade to finish before checking for alerts\")\n\t<-done\n\n\t// Additonal delay after upgrade completion to allow pending alerts to settle\n\tg.By(\"Waiting before checking for alerts\")\n\ttime.Sleep(1 * time.Minute)\n\n\ttestDuration := time.Now().Sub(start).Round(time.Second)\n\n\t// Invariant: No non-info level alerts should have fired during the upgrade\n\tfiringAlertQuery := fmt.Sprintf(`\nsort_desc(\ncount_over_time(ALERTS{alertstate=\"firing\",severity!=\"info\",alertname!~\"Watchdog|AlertmanagerReceiversNotConfigured\"}[%[1]s:1s])\n) > 0\n`, testDuration)\n\tresult, err := helper.RunQuery(context.TODO(), t.prometheusClient, firingAlertQuery)\n\to.Expect(err).NotTo(o.HaveOccurred(), \"unable to check firing alerts during upgrade\")\n\tfor _, series := range result.Data.Result {\n\t\tlabels := helper.StripLabels(series.Metric, \"alertname\", \"alertstate\", \"prometheus\")\n\t\tviolation := fmt.Sprintf(\"alert %s fired for %s seconds with labels: %s\", series.Metric[\"alertname\"], series.Value, helper.LabelsAsSelector(labels))\n\t\tif cause := allowedFiringAlerts.Matches(series); cause != nil {\n\t\t\tdebug.Insert(fmt.Sprintf(\"%s (allowed: %s)\", violation, cause.Text))\n\t\t\tcontinue\n\t\t}\n\t\tif cause := firingAlertsWithBugs.Matches(series); cause != nil {\n\t\t\tknownViolations.Insert(fmt.Sprintf(\"%s (open bug: %s)\", violation, cause.Text))\n\t\t} else {\n\t\t\tunexpectedViolations.Insert(violation)\n\t\t}\n\t}\n\n\t// Invariant: There should be no pending alerts 1m after the upgrade completes\n\tpendingAlertQuery := fmt.Sprintf(`\nsort_desc(\n time() * ALERTS + 1\n -\n last_over_time((\n time() * ALERTS{alertname!~\"Watchdog|AlertmanagerReceiversNotConfigured\",alertstate=\"pending\",severity!=\"info\"}\n unless\n ALERTS offset 1s\n )[%[1]s:1s])\n)\n`, testDuration)\n\tresult, err = helper.RunQuery(context.TODO(), t.prometheusClient, pendingAlertQuery)\n\to.Expect(err).NotTo(o.HaveOccurred(), \"unable to retrieve pending alerts after upgrade\")\n\tfor _, series := range result.Data.Result {\n\t\tlabels := helper.StripLabels(series.Metric, \"alertname\", \"alertstate\", \"prometheus\")\n\t\tviolation := fmt.Sprintf(\"alert %s pending for %s seconds with labels: %s\", series.Metric[\"alertname\"], series.Value, helper.LabelsAsSelector(labels))\n\t\tif cause := allowedPendingAlerts.Matches(series); cause != nil {\n\t\t\tdebug.Insert(fmt.Sprintf(\"%s (allowed: %s)\", violation, cause.Text))\n\t\t\tcontinue\n\t\t}\n\t\tif cause := pendingAlertsWithBugs.Matches(series); cause != nil {\n\t\t\tknownViolations.Insert(fmt.Sprintf(\"%s (open bug: %s)\", violation, cause.Text))\n\t\t} else {\n\t\t\t// treat pending errors as a flake right now because we are still trying to determine the scope\n\t\t\t// TODO: move this to unexpectedViolations later\n\t\t\tunexpectedViolationsAsFlakes.Insert(violation)\n\t\t}\n\t}\n\n\tif len(debug) > 0 {\n\t\tframework.Logf(\"Alerts were detected during upgrade which are allowed:\\n\\n%s\", strings.Join(debug.List(), \"\\n\"))\n\t}\n\tif len(unexpectedViolations) > 0 {\n\t\tif !tolerateDuringSkew {\n\t\t\tframework.Failf(\"Unexpected alerts fired or pending during the upgrade:\\n\\n%s\", strings.Join(unexpectedViolations.List(), \"\\n\"))\n\t\t}\n\t}\n\tif flakes := sets.NewString().Union(knownViolations).Union(unexpectedViolations).Union(unexpectedViolationsAsFlakes); len(flakes) > 0 {\n\t\tdisruption.FrameworkFlakef(f, \"Unexpected alert behavior during upgrade:\\n\\n%s\", strings.Join(flakes.List(), \"\\n\"))\n\t}\n\tframework.Logf(\"No alerts fired during upgrade\")\n}",
"func Fail(message string) {\n\tfmt.Printf(\"[FAILED] [%s] %s\\n\", testname, message)\n}",
"func (m *MockSession) Warn(arg0 string, arg1 ...interface{}) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0}\n\tfor _, a := range arg1 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"Warn\", varargs...)\n}",
"func UseTestLogger(t testing.TB) {\n\tmu.Lock()\n\tlogger = t\n\tmu.Unlock()\n\tSetLevel(WarningLevel)\n}",
"func (e *Event) NoTestsWarn() bool {\n\treturn e.Test != \"\" && e.Output == \"testing: warning: no tests to run\\n\"\n}",
"func (cx *TestContext) FailNow() {\n\tcx.t.FailNow()\n}",
"func (t *T) FailNow() {\n\tt.context.incFailCount()\n\tt.test.FailNow()\n}",
"func (m *MockMachine) Warnf(arg0, arg1 string, arg2 ...interface{}) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"Warnf\", varargs...)\n}",
"func (_m *MockUI) Warn(msg string) {\n\t_m.ctrl.Call(_m, \"Warn\", msg)\n}",
"func TestFailures(t *testing.T) {\n\tif True() {\n\t\tt.SkipNow()\n\t}\n\n\tAssert(t, !True(), \"We have a problem Houston!\", true)\n\tEquals(t, false, true)\n\tNotEquals(t, 0, 0)\n\tOK(t, err)\n\terr = errors.New(\"\")\n\tNotOK(t, err)\n}",
"func Warning(val *string) {\n\tfmt.Printf(\"[WARNING] %s\\n\", tea.StringValue(val))\n}",
"func (n *noop) Warn(msg ...interface{}) {}",
"func DefPredeclaredTestFuncs() {}",
"func TestLoggerStreamsProperLogDataForWarning(t *testing.T) {\n\tl := NewLogger(\"TestLoggerStreamsProperLogDataForWarning\", false)\n\n\tstream := newMockStream(1)\n\tl.RegisterStream(Warning, stream)\n\n\tl.Warning(\"Hi there\")\n\n\tmessage := <-stream.out\n\texpected := `{\"tag\":\"TestLoggerStreamsProperLogDataForWarning\",\"message\":\"Hi there\"}`\n\tif message != expected {\n\t\tt.Fatalf(\"Unexpected message received from stream. Expected: `%s`, found: `%s`\", expected, message)\n\t}\n\n\tl.SetMetadata(map[string]string{\"custom\": \"field\"})\n\tl.Warningf(\"this log should have metadata\")\n\n\tmessage = <-stream.out\n\texpected = `{\"tag\":\"TestLoggerStreamsProperLogDataForWarning\",\"message\":\"this log should have metadata\",\"metadata\":{\"custom\":\"field\"}}`\n\tif message != expected {\n\t\tt.Fatalf(\"Unexpected message received from stream. Expected: `%s`, found: `%s`\", expected, message)\n\t}\n}",
"func (ft *fakeT) FailNow() {\n\tft.fatal(\"\")\n}",
"func TestNoTraces(_ *testing.T) {\n\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Skip aborts the test with a SKIP status, that is considered passing
|
func (t *TestContext) Skip() {
panic(newTestResult(resultSkipped, "", 2, t))
}
|
[
"func (t *Test) Skip(reason string) {\n\tt.Skipped = true\n\tt.Err = errors.New(reason)\n}",
"func skipTest(t *testing.T, flagName string) {\n\tif shouldSkipTest(flagName) {\n\t\tt.Logf(\"Skipping test due to %s flag\", flagName)\n\t\tt.SkipNow()\n\t}\n}",
"func (t *T) Skip(args ...interface{}) {\n\tt.context.log(t.decorate(fmt.Sprintln(args...)))\n\tt.test.SkipNow()\n}",
"func Skipped(testCase reporters.JUnitTestCase) bool { return testCase.Skipped != nil }",
"func (s *FSMSuite) TestRollbackPlanSkip(c *check.C) {\n\tengine := newTestEngine(func() storage.OperationPlan {\n\t\treturn s.planner.newPlan(\n\t\t\ts.planner.initPhase(storage.OperationPhaseStateCompleted),\n\t\t\ts.planner.bootstrapPhase(\n\t\t\t\ts.planner.bootstrapSubPhase(\"node-1\", storage.OperationPhaseStateRolledBack),\n\t\t\t\ts.planner.bootstrapSubPhase(\"node-2\", storage.OperationPhaseStateCompleted)),\n\t\t\ts.planner.upgradePhase(storage.OperationPhaseStateUnstarted))\n\t})\n\n\tfsm, err := New(Config{Engine: engine})\n\tc.Assert(err, check.IsNil)\n\n\terr = fsm.RollbackPlan(context.TODO(), utils.DiscardProgress, false)\n\tc.Assert(err, check.IsNil)\n\n\tplan, err := fsm.GetPlan()\n\tc.Assert(err, check.IsNil)\n\t// Make sure plan is rolled back now.\n\tc.Assert(IsRolledBack(*plan), check.Equals, true)\n\t// Make sure unstarted/rolled back phases were skipped over.\n\ts.checkChangelog(c, engine.changelog, s.planner.newChangelog(\n\t\ts.planner.bootstrapSubChange(\"node-2\", storage.OperationPhaseStateInProgress),\n\t\ts.planner.bootstrapSubChange(\"node-2\", storage.OperationPhaseStateRolledBack),\n\t\ts.planner.initChange(storage.OperationPhaseStateInProgress),\n\t\ts.planner.initChange(storage.OperationPhaseStateRolledBack),\n\t))\n}",
"func (pr *CycleReporter) Skip() {\n\tpr.done()\n\tif pr == nil || pr.metric.skips == nil {\n\t\treturn\n\t}\n\tpr.metric.skips.Add(1, pr.metric.fields...)\n}",
"func (t *Test) Skipf(format string, args ...interface{}) {\n\tt.Skipped = true\n\tt.Err = fmt.Errorf(format, args...)\n}",
"func SkipWithPraefect(t testing.TB, reason string) {\n\tif IsPraefectEnabled() {\n\t\tt.Skipf(reason)\n\t}\n}",
"func (h *harnessTest) Skipf(format string, args ...interface{}) {\n\th.t.Skipf(format, args...)\n}",
"func AssertSkipped(t *testing.T, err error) {\n\tt.Helper()\n\trequire.True(t, errors.As(err, &pipe.ErrSkip{}), \"expected a pipe.ErrSkip but got %v\", err)\n}",
"func Test_VHost_Next_Skip(t *testing.T) {\n\twant := \"example.com\"\n\n\tapp := fiber.New()\n\tapp.Use(New(Config{\n\t\tNext: func(c *fiber.Ctx) bool {\n\t\t\tif c.Get(\"X-test-skip\") == \"yes\" {\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\treturn false\n\t\t},\n\t\tHostname: want,\n\t\tHandler: func(c *fiber.Ctx) error {\n\t\t\tt.Error(\"Error: did not skip when Next returned true\")\n\t\t\treturn nil\n\t\t},\n\t\tHostnameRegexpString: \"\",\n\t}))\n\n\tapp.Get(\"/\", func(c *fiber.Ctx) error {\n\t\treturn c.SendString(\"test\")\n\t})\n\n\treq := httptest.NewRequest(\"GET\", \"http:\"+want, nil)\n\treq.Header.Add(\"X-test-skip\", \"yes\")\n\tapp.Test(req)\n}",
"func (m *MockGridBuilder) Skip(arg0 int) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Skip\", arg0)\n}",
"func (t *T) Skipped() bool {\n\treturn t.test.Skipped()\n}",
"func SkipTestdata(path, name string) bool { return name == \"testdata\" }",
"func skip(t *testing.T, tree *Tree) {\n\tt.Run(tree.name, func(t *testing.T) {\n\t\tfor _, child := range tree.children {\n\t\t\tskip(t, child)\n\t\t}\n\t\tt.Skip(\"tea skipped: dependency failed\")\n\t})\n}",
"func (o *IndexAPIKeysParams) SetSkip(skip *int32) {\n\to.Skip = skip\n}",
"func (l *Lexer) skip() {\n\tl.stepCursor()\n}",
"func (w *work) printSkip(t *tester, msg string) {\n\tif t.json {\n\t\ttype event struct {\n\t\t\tTime time.Time\n\t\t\tAction string\n\t\t\tPackage string\n\t\t\tOutput string `json:\",omitempty\"`\n\t\t}\n\t\tenc := json.NewEncoder(&w.out)\n\t\tev := event{Time: time.Now(), Package: w.dt.name, Action: \"start\"}\n\t\tenc.Encode(ev)\n\t\tev.Action = \"output\"\n\t\tev.Output = msg\n\t\tenc.Encode(ev)\n\t\tev.Action = \"skip\"\n\t\tev.Output = \"\"\n\t\tenc.Encode(ev)\n\t\treturn\n\t}\n\tfmt.Fprintln(&w.out, msg)\n}",
"func (expStatus *ExperimentStatus) SkippedExperimentStatus(expName, engineName string) {\n\texpStatus.Name = expName\n\texpStatus.Runner = engineName + \"-runner\"\n\texpStatus.ExpPod = \"N/A\"\n\texpStatus.Status = v1alpha1.ExperimentSkipped\n\texpStatus.Verdict = \"Fail\"\n\texpStatus.LastUpdateTime = metav1.Now()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ServerUrl returns the URL of the vertex server we are testing
|
func (t *TestContext) ServerUrl() string {
return t.serverURl
}
|
[
"func serverUrl() string {\n\treturn fmt.Sprintf(\"http://localhost:%d/\", *port)\n}",
"func (s *httpServer) GetServerURL() *url.URL { return s.serverURL }",
"func (s *MockServer) URL() string {\n\treturn s.testServer.URL\n}",
"func (c *GrpcClient) GetServerURL() string {\n\treturn c.connection.Target()\n}",
"func ServerURL() (string, error) {\n\tport, err := ConfigVal(\"httpd\", \"port\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\taddr, err := ConfigVal(\"httpd\", \"bind_address\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif addr == \"0.0.0.0\" {\n\t\taddr = \"127.0.0.1\"\n\t}\n\treturn \"http://\" + addr + \":\" + port + \"/\", nil\n}",
"func GetServerURL() string {\n\treturn fmt.Sprintf(\"%s://%s:%s\", getServerProtocol(), getServerAddr(), GetServerPort())\n}",
"func (m *MockHttpServer) Url() string {\n\treturn m.server.URL\n}",
"func (a *AccessClientSettings) GetServerUrl() string {\n\tif a == nil || a.ServerUrl == nil {\n\t\treturn \"\"\n\t}\n\treturn *a.ServerUrl\n}",
"func (c *CrowdSettings) GetServerUrl() string {\n\tif c == nil || c.ServerUrl == nil {\n\t\treturn \"\"\n\t}\n\treturn *c.ServerUrl\n}",
"func (s *Server) URL() string {\n\treturn s.url\n}",
"func (t *HTTPService) ServerAPIURL() string {\n\treturn t.serverURL\n}",
"func (a *acrAuthExtractor) ServerURL() string {\n\treturn fmt.Sprintf(\"%s.azurecr.io\", a.registryName)\n}",
"func (md *MdServer) URL() string {\n\treturn httpPrefix + md.BindAddr()\n}",
"func (c *ClientConfig) GetServerAddressURL() *url.URL {\n\tvar plunderURL url.URL\n\tplunderURL.Scheme = \"https\"\n\t// Build a url\n\tplunderURL.Host = fmt.Sprintf(\"%s:%d\", c.Address, +c.Port)\n\treturn &plunderURL\n}",
"func (c *Configuration) ServerPath() string {\n\tsrv, _ := url.Parse(c.context.GlobalString(\"server\"))\n\treturn strings.NewReplacer(\":\", \"_\", \"/\", string(os.PathSeparator)).Replace(srv.Host)\n}",
"func Server(serverUrl string, repoInfo *name.Registry) (string, error) {\n\tif serverUrl != \"\" {\n\t\turlObj, err := url.Parse(serverUrl)\n\t\tif err != nil || urlObj.Scheme != \"https\" {\n\t\t\treturn \"\", errors.Errorf(\"valid https URL required for trust server, got %s\", serverUrl)\n\t\t}\n\t\treturn serverUrl, nil\n\t}\n\tif repoInfo.RegistryStr() == name.DefaultRegistry {\n\t\treturn NotaryServer, nil\n\t}\n\n\treturn \"https://\" + repoInfo.Name(), nil\n}",
"func (config *Dendrite) RoomServerURL() string {\n\t// Hard code the roomserver to talk HTTP for now.\n\t// If we support HTTPS we need to think of a practical way to do certificate validation.\n\t// People setting up servers shouldn't need to get a certificate valid for the public\n\t// internet for an internal API.\n\treturn \"http://\" + string(config.Listen.RoomServer)\n}",
"func (s *Client) DFSURL() string {\n\treturn s.generatedServiceClientWithDFS().Endpoint()\n}",
"func Server() string {\n\treturn server\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
FormatUrl returns a fully formatted URL for the context's route, with all path params replaced by their respective values in the pathParams map
|
func (t *TestContext) FormatUrl(pathParams Params) string {
u := fmt.Sprintf("%s%s", t.serverURl, t.api.FullPath(FormatPath(t.routePath, pathParams)))
logging.Debug("Formatted url: %s", u)
return u
}
|
[
"func FormatUrl(baseUrl, format, endpoint string, params ...string) string {\n\treturn fmt.Sprintf(\"%s%s\", baseUrl, fmt.Sprintf(format, endpoint, params))\n}",
"func formatURL(uri, client string) string {\n\turl, err := url.Parse(uri)\n\tif err != nil {\n\t\treturn uri\n\t}\n\tv := url.Query()\n\tv.Set(\"application_name\", client)\n\turl.RawQuery = v.Encode()\n\treturn url.String()\n}",
"func (c Client) FormatURL(location string) string {\n\n\treturn fmt.Sprintf(\"%s/data/2.5/weather?q=%s&units=%s&appid=%s\", c.Base, location, c.Units, c.APIKey)\n\n}",
"func (fields *Fields) ToURL() string {\n\treturn toURL(fields.endpoint, fields.modifiers)\n}",
"func FormatURL(opts command.Options) (string, error) {\n\turl := opts.Url\n\n\tcfg, err := client.ParseDSN(url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn cfg.FormatDSN(), nil\n}",
"func (r *HTTPRequest) generateUrlWithParameters() (string, error) {\n\turl, err := url.Parse(r.URL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tq := url.Query()\n\tif r.Parameters != nil && len(r.Parameters) > 0 {\n\t\tfor name, values := range r.Parameters {\n\t\t\tfor _, value := range values {\n\t\t\t\tq.Add(name, value)\n\t\t\t}\n\t\t}\n\t}\n\turl.RawQuery = q.Encode()\n\n\treturn url.String(), nil\n}",
"func (r *Router) URLFor(name string, args ...interface{}) string {\n\tif name == \"\" {\n\t\treturn \"\"\n\t}\n\tnode := r.nameNodes[name]\n\tif node == nil || len(node.format) == 0 {\n\t\treturn \"\"\n\t}\n\tformat := make([]byte, len(node.format))\n\tcopy(format, node.format)\n\tfor i := node.paramNum + 1; i <= len(args); i++ {\n\t\tformat = append(format, \"%v\"...)\n\t}\n\treturn fmt.Sprintf(string(format), args...)\n}",
"func FormatURL(url string) string {\n\treturn fmt.Sprintf(\"<a href=\\\"%s\\\">%s</a>\", html.EscapeString(url), html.EscapeString(url))\n}",
"func BuildURL(r *rest.Request, template string, params map[string]string) *url.URL {\n\turl := r.BaseUrl()\n\n\tpath := template\n\tfor k, v := range params {\n\t\tpath = strings.Replace(path, k, v, -1)\n\t}\n\turl.Path = path\n\n\treturn url\n}",
"func (broadcastController *BroadcastController) FormatAsRelativeLink(params ...httprouter.Param) string {\n\treturn formatURL(params, broadcastPath, channelIDPathParamKey)\n}",
"func URL(endpoint string, query string, args ...interface{}) string {\n\tif query != \"\" {\n\t\tendpoint = endpoint + query\n\t}\n\n\treturn fmt.Sprintf(endpoint, args...)\n}",
"func (conf config) ToURL() (string, error) {\n\tvv := url.Values{}\n\tif conf.Options != nil {\n\t\tfor k, v := range conf.Options {\n\t\t\tvv.Set(k, v)\n\t\t}\n\t}\n\n\tif conf.User == \"\" ||\n\t\tconf.Password == \"\" ||\n\t\tconf.Host == \"\" ||\n\t\tconf.Port == \"\" ||\n\t\tconf.Database == \"\" {\n\t\treturn \"\", fmt.Errorf(\"invalid connection url\")\n\t}\n\n\tu := url.URL{\n\t\tScheme: AdapterName,\n\t\tUser: url.UserPassword(conf.User, conf.Password),\n\t\tHost: fmt.Sprintf(\"%s:%s\", conf.Host, conf.Port),\n\t\tPath: conf.Database,\n\t\tForceQuery: false,\n\t\tRawQuery: vv.Encode(),\n\t}\n\treturn u.String(), nil\n}",
"func (this *ResourceURL) GenURL(context *Context, dependencies []interface{}) string {\n\tvar (\n\t\tparents []aorm.ID\n\t\tquery []string\n\t\te = url.QueryEscape\n\t)\n\n\tif len(dependencies) > 0 {\n\t\tfor _, dep := range dependencies {\n\t\t\tswitch dp := dep.(type) {\n\t\t\tcase *DependencyParent:\n\t\t\t\tif len(parents) == 0 {\n\t\t\t\t\tparents = make([]aorm.ID, this.Resource.PathLevel, this.Resource.PathLevel)\n\t\t\t\t}\n\t\t\t\tif dp.Value != nil {\n\t\t\t\t\tparents[dp.Meta.Resource.PathLevel] = dp.Value\n\t\t\t\t} else {\n\t\t\t\t\tparents[dp.Meta.Resource.PathLevel] = aorm.FakeID(\"{\" + dp.Meta.Name + \"}\")\n\t\t\t\t}\n\t\t\tcase *DependencyQuery:\n\t\t\t\tquery = append(query, dp.Param+\"={\"+dp.Meta.Name+\"}\")\n\t\t\tcase *DependencyValue:\n\t\t\t\tquery = append(query, dp.Param+\"=\"+e(fmt.Sprint(dp.Value)))\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(parents) > 0 {\n\t\tparent := this.Resource\n\t\tfor pathLevel := this.Resource.PathLevel - 1; pathLevel >= 0; pathLevel-- {\n\t\t\tparent = parent.ParentResource\n\t\t\tif parents[pathLevel].IsZero() {\n\t\t\t\tparents[pathLevel] = aorm.FakeID(context.URLParam(parent.ParamIDName()))\n\t\t\t}\n\t\t}\n\t}\n\n\tvar uri string\n\tif this.recorde {\n\t\turi = this.Resource.GetContextURI(context, aorm.FakeID(\"{ID}\"), parents...)\n\t} else {\n\t\turi = this.Resource.GetContextIndexURI(context, parents...)\n\t}\n\n\tif this.Scheme != \"\" {\n\t\ts := this.Resource.GetSchemeByName(this.Scheme)\n\t\turi += s.Path()\n\t}\n\n\turi += this.Suffix\n\n\tif this.FormatURI != nil {\n\t\turi = this.FormatURI(this, context, uri)\n\t}\n\n\tif this.Layout != \"\" {\n\t\tquery = append(query, P_LAYOUT+\"=\"+e(this.Layout))\n\t}\n\n\tif this.Display != \"\" {\n\t\tquery = append(query, P_DISPLAY+\"=\"+e(this.Display))\n\t}\n\n\tfor _, scope := range this.Scopes {\n\t\tquery = append(query, \"scope[]=\"+e(scope))\n\t}\n\n\tfor fname, fvalue := range this.Filters {\n\t\tquery = append(query, \"filter[\"+fname+\"].Value=\"+e(fvalue))\n\t}\n\n\tif this.FilterF != nil {\n\t\tfor fname, fvalue := range this.FilterF {\n\t\t\tquery = append(query, \"filter[\"+fname+\"].Value=\"+e(fvalue(context)))\n\t\t}\n\t}\n\n\tif this.DynamicFilters != nil {\n\t\tdynamicFilters := make(map[string]string)\n\t\tthis.DynamicFilters(context, dynamicFilters)\n\n\t\tfor fname, fvalue := range dynamicFilters {\n\t\t\tquery = append(query, \"filter[\"+fname+\"].Value=\"+e(fvalue))\n\t\t}\n\t}\n\n\tfor name, value := range this.Query {\n\t\tswitch t := value.(type) {\n\t\tcase func(ctx *Context) (string, bool):\n\t\t\tif value, ok := t(context); ok {\n\t\t\t\tquery = append(query, name+\"=\"+e(value))\n\t\t\t}\n\t\tdefault:\n\t\t\tquery = append(query, name+\"=\"+e(utils.ToString(value)))\n\t\t}\n\t}\n\n\tfor _, handler := range this.URLHandlers {\n\t\thandler(context, uri, &query)\n\t}\n\n\tif len(query) > 0 {\n\t\turi += \"?\" + strings.Join(query, \"&\")\n\t}\n\n\treturn uri\n}",
"func FormatAnswersURL(questions []Question) (string, error) {\n\tformatted, err := QuestionsIDToColonSeparateString(questions)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"https://api.stackexchange.com/2.2/questions/%s/answers?filter=withbody&site=stackoverflow\", formatted), nil\n}",
"func (m *Graph) generateURL(path string, paramQuery ParamQuery) string {\n\t// re-initialize paramQuery\n\tif paramQuery == nil {\n\t\tparamQuery = ParamQuery{}\n\t}\n\n\t// default automatically add secretProof\n\tparamQuery[\"appsecret_proof\"] = m.secretProof\n\n\tquery := paramQuery.EncodeURL(\"all\")\n\tif path != \"\" {\n\t\treturn fmt.Sprintf(\"%s/%s?%s\", m.url, path, query)\n\t}\n\n\treturn fmt.Sprintf(\"%s/%s\", m.url, path)\n}",
"func MakeURLToEndpoint(apiPrefix, endpoint string, args ...interface{}) string {\n\tre := regexp.MustCompile(`\\{[a-zA-Z_0-9]+\\}`)\n\tendpoint = re.ReplaceAllString(endpoint, \"%v\")\n\treturn apiPrefix + fmt.Sprintf(endpoint, args...)\n}",
"func (p *kvsParams) formatEndpoint() string {\n\ts := fmt.Sprintf(\"%s:%d\", p.host, p.port)\n\tif p.provider == \"etcd\" {\n\t\ts = \"http://\" + s\n\t}\n\treturn s\n}",
"func UrlWithParamsForRoomConnection(baseurl, roomID, userID string) string {\n\tif !strings.HasSuffix(baseurl, \"?\") {\n\t\tbaseurl += \"?\"\n\t}\n\treturn baseurl + \"room=\" + roomID + \"&user=\" + userID\n}",
"func (c AppStatsContext) URL() string {\n\tu := url.URL{\n\t\tPath: detailsURL,\n\t\tRawQuery: fmt.Sprintf(\"time=%v\", c.stats.Start.Nanosecond()),\n\t}\n\treturn u.String()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewRequest creates a new http request to the route we are testing now, with optional values for post/get, and optional path params
|
func (t *TestContext) NewRequest(method string, values url.Values, pathParams Params) (*http.Request, error) {
var body io.Reader
u := t.FormatUrl(pathParams)
if values != nil && len(values) > 0 {
if method == "POST" {
body = bytes.NewReader([]byte(values.Encode()))
} else {
u += "?" + values.Encode()
}
}
req, err := http.NewRequest(method, u, body)
// for POST requests we need to correctly set the content type
if err == nil && body != nil {
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
}
return req, err
}
|
[
"func (s *Wire) NewRequest(method string, url string, payload interface{}) (req *http.Request, err error) {\n\n var body []byte\n\n if payload == nil {\n payload = map[string]interface{}{}\n }\n\n if body, err = json.Marshal(payload); err == nil {\n\n if req, err = http.NewRequest(method, s.BuildFullUrl(url), bytes.NewBuffer(body)); err == nil {\n\n req.Header.Set(\"Accept\", \"application/json\")\n req.Header.Set(\"Accept-charset\", \"utf-8\")\n\n if method == \"POST\" || method == \"DELETE\" {\n req.Header.Add(\"Content-Type\", \"application/json;charset=utf-8\")\n }\n\n }\n }\n\n return req, err\n}",
"func (s *HttpConfigService) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := s.NewRequest(op, params, data)\n\n\treturn req\n}",
"func CreateTestRequest(httpMethod string, urlString string) *http.Request {\n\trequest, _ := http.NewRequest(httpMethod, urlString, nil)\n\trequest.RequestURI = urlString\n\treturn request\n}",
"func (c *Client) NewRequest(ctx context.Context, method, path string, body io.Reader) (*http.Request, error) {\n\treqPath, err := url.Parse(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\turl := *c.dsn // Make a copy\n\turl.Path = reqPath.Path\n\turl.RawQuery = reqPath.RawQuery\n\treq, err := http.NewRequest(method, url.String(), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn req.WithContext(ctx), nil\n}",
"func GenerateNewRequest(url, payload string, options model.Options) *http.Request {\n\treq, _ := http.NewRequest(\"GET\", url, nil)\n\tif options.Data != \"\" {\n\t\td := []byte(payload)\n\t\treq, _ = http.NewRequest(\"POST\", url, bytes.NewBuffer(d))\n\t\treq.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\t}\n\n\tif options.Header != \"\" {\n\t\th := strings.Split(options.Header, \": \")\n\t\tif len(h) > 1 {\n\t\t\treq.Header.Add(h[0], h[1])\n\t\t}\n\t}\n\tif options.Cookie != \"\" {\n\t\treq.Header.Add(\"Cookie\", options.Cookie)\n\t}\n\tif options.UserAgent != \"\" {\n\t\treq.Header.Add(\"User-Agent\", options.UserAgent)\n\t} else {\n\t\treq.Header.Add(\"User-Agent\", \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0\")\n\t}\n\treturn req\n}",
"func (c *case_) newRequest(u string) (*http.Request, error) {\n\tbody := c.requestBody()\n\tr, err := http.NewRequest(c.method, u, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttyp := c.posttype\n\tif body != nil && typ == \"\" {\n\t\ttyp = \"application/x-www-form-urlencoded\"\n\t}\n\tif typ != \"\" {\n\t\tr.Header.Set(\"Content-Type\", typ)\n\t}\n\treturn r, nil\n}",
"func (c *Inspector) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\t// Run custom request initialization if present\n\tif initRequest != nil {\n\t\tinitRequest(req)\n\t}\n\n\treturn req\n}",
"func HttpNewRequest(method string, url string, body io.Reader) (*http.Request, error) {\n\treturn http.NewRequest(method, url, body)\n}",
"func (s *RulesService) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := s.NewRequest(op, params, data)\n\n\treturn req\n}",
"func NewMockRequest(method, path string) *http.Request {\n\treturn &http.Request{\n\t\tMethod: method,\n\t\tProto: \"http\",\n\t\tProtoMajor: 1,\n\t\tProtoMinor: 1,\n\t\tHost: \"localhost:8080\",\n\t\tRemoteAddr: \"127.0.0.1:8080\",\n\t\tRequestURI: path,\n\t\tHeader: http.Header{\n\t\t\tHeaderUserAgent: []string{\"go-sdk test\"},\n\t\t},\n\t\tURL: &url.URL{\n\t\t\tScheme: \"http\",\n\t\t\tHost: \"localhost\",\n\t\t\tPath: path,\n\t\t\tRawPath: path,\n\t\t},\n\t}\n}",
"func (c *Client) NewRequest(method, name string) (req *http.Request, err error) {\n\treq, err = http.NewRequest(method, c.root + name, nil)\n\t// TODO: auth\n\treturn\n}",
"func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Request, error) {\n\trel, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Relative URLs should be specified without a preceding slash since baseURL will have the trailing slash\n\trel.Path = strings.TrimLeft(rel.Path, \"/\")\n\n\tu := c.baseURL.ResolveReference(rel)\n\n\tvar buf io.ReadWriter\n\tif body != nil {\n\t\tbuf = new(bytes.Buffer)\n\t\terr = json.NewEncoder(buf).Encode(body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(method, u.String(), buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\treturn req, nil\n}",
"func (c *kredivoHttpClient) newReq(method string, fullPath string, body io.Reader, headers map[string]string) (*http.Request, error) {\n\treq, err := http.NewRequest(method, fullPath, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor key, value := range headers {\n\t\treq.Header.Set(key, value)\n\t}\n\n\treturn req, nil\n}",
"func (s *SecurePass) NewRequest(method, path string, data *url.Values) (*http.Request, error) {\n\tvar err error\n\tvar req *http.Request\n\n\tURL, err := s.makeRequestURL(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif data != nil {\n\t\tDebugLogger.Printf(\"Data Payload is %s\", data.Encode())\n\t\treq, err = http.NewRequest(method, URL, bytes.NewBufferString(data.Encode()))\n\t} else {\n\t\treq, err = http.NewRequest(method, URL, nil)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts.setupRequestFieds(req)\n\treturn req, nil\n}",
"func (c *OpsWorksCM) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\t// Run custom request initialization if present\n\tif initRequest != nil {\n\t\tinitRequest(req)\n\t}\n\n\treturn req\n}",
"func (s *KeyPairsService) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := s.NewRequest(op, params, data)\n\n\treturn req\n}",
"func (c *DynamoDBStreams) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\t// Run custom request initialization if present\n\tif initRequest != nil {\n\t\tinitRequest(req)\n\t}\n\n\treturn req\n}",
"func (h *RequestFactory) NewRequest(method, urlStr string, body io.Reader, d ...Decorator) (*http.Request, error) {\n\treq, err := http.NewRequest(method, urlStr, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// By default, a nil factory should work.\n\tif h == nil {\n\t\treturn req, nil\n\t}\n\tfor _, dec := range h.decorators {\n\t\treq, _ = dec.ChangeRequest(req)\n\t}\n\tfor _, dec := range d {\n\t\treq, _ = dec.ChangeRequest(req)\n\t}\n\tlogrus.Debugf(\"%v -- HEADERS: %v\", req.URL, req.Header)\n\treturn req, err\n}",
"func NewRequest(url string, method string, params interface{}) (*http.Request, error) {\n\n\tcall := &clientCall{\n\t\tVersion: \"2.0\",\n\t\tID: \"1\",\n\t\tMethod: method,\n\t\tParams: params,\n\t}\n\n\tdata, err := Marshal(call)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf := bytes.NewBuffer(data)\n\n\treq, err := http.NewRequest(http.MethodPost, url, buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treturn req, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetJSON performs the given request, and tries to deserialize the response object to v. If we received an error or decoding is impossible, we return an error. The raw http response is also returned for inspection
|
func (t *TestContext) GetJSON(r *http.Request, v interface{}) (*http.Response, error) {
resp, err := http.DefaultClient.Do(r)
if err != nil {
return resp, err
}
b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
// We replace the request's body with a fake one if the caller wants to peek inside
resp.Body = ioutil.NopCloser(bytes.NewReader(b))
err = json.Unmarshal(b, v)
if err == nil && resp.StatusCode >= 400 {
err = fmt.Errorf("Bad HTTP response code: %s", resp.Status)
}
return resp, err
}
|
[
"func (s *BaseService) GetJSON(url string, res interface{}) (err error) {\n\n\t// Make request to service\n\tresponse, err := http.Get(url)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefer response.Body.Close()\n\n\tif response.StatusCode != 200 {\n\t\terr = fmt.Errorf(BadStatus, s.GetName(), response.StatusCode)\n\t\treturn\n\t}\n\n\t// Initialize decoder\n\tdecoder := json.NewDecoder(response.Body)\n\n\t// Try to parse response\n\terr = decoder.Decode(res)\n\treturn\n}",
"func GetJSON(w http.ResponseWriter, r *http.Request) {\n\tlogging.LogRequest(r)\n\n\tresponse := store.SimplestResponseStatus{Status: \"200 OK\"}\n\trespondJSON(w, http.StatusOK, response)\n}",
"func (c AcmeClient) get(url string, out interface{}, expectedStatus ...int) (*http.Response, error) {\n\tresp, body, err := c.getRaw(url, expectedStatus...)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\n\tif len(body) > 0 && out != nil {\n\t\tif err := json.Unmarshal(body, out); err != nil {\n\t\t\treturn resp, fmt.Errorf(\"acme: error parsing response body: %v\", err)\n\t\t}\n\t}\n\n\treturn resp, nil\n}",
"func JSONGet(r io.Reader, valPtr interface{}) (err error) {\n\tdec := json.NewDecoder(r)\n\terr = dec.Decode(valPtr)\n\treturn\n}",
"func GetJSON(ctx context.Context, httpClient *http.Client,\n\turl string, queryParams map[string]string) ([]byte, error) {\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create HTTP GET request\")\n\t}\n\n\tif queryParams != nil {\n\t\tfor k, v := range queryParams {\n\t\t\tutil.AddQueryParam(req, k, v)\n\t\t}\n\t}\n\n\treq.Header.Set(\"Accept\", \"application/json\")\n\n\treturn Request(ctx, httpClient, req)\n}",
"func FetchJSON(url, kind string, out interface{}) error {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\tlog.Debugf(\"Failed to build request: %s\\n\", err)\n\t\treturn results.Unavailable\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tlog.Debugf(\"Failed to get %s: %s\\n\", kind, err)\n\t\treturn results.Unavailable\n\t}\n\tdefer resp.Body.Close()\n\t// Translate Status Code\n\tswitch resp.StatusCode {\n\tcase 200:\n\t\tbreak\n\tcase 404:\n\t\treturn results.NotFound\n\tdefault:\n\t\treturn results.Unavailable\n\t}\n\t// Decode response\n\tdec := json.NewDecoder(resp.Body)\n\tif err = dec.Decode(out); err != nil {\n\t\tlog.Debugf(\"Failed to decode response: %s\\n\", err)\n\t\treturn results.Unavailable\n\t}\n\treturn nil\n}",
"func getJson(url string) []byte {\n\tres, err := http.Get(url)\n\t// If there is an error, print it and terminate the program.\n\tif err != nil {\n\t\tlog.Fatal(\"ERROR: sorry, something happened: \", err)\n\t}\n\n\tjsonBlob, err := ioutil.ReadAll(res.Body)\n\tres.Body.Close()\n\tif err != nil {\n\t\t// If there is an error, print it and terminate the program.\n\t\tlog.Fatal(err)\n\t}\n\tif res.StatusCode != 200 {\n\t\tvar error Errors\n\t\terr := json.Unmarshal(jsonBlob, &error)\n\t\t// If there is an error, print it and terminate the program.\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"ERROR: \", err)\n\t\t}\n\t\tlog.Fatalf(\"ERROR: code: %v, message: %v\\n\", error.Cod, error.Message)\n\t}\n\treturn jsonBlob\n}",
"func decodeJsonResponse(url string, target interface{}) error {\n r, err := client.Get(url)\n if err != nil {\n return err\n }\n defer r.Body.Close()\n\n return json.NewDecoder(r.Body).Decode(target)\n}",
"func get(ctx context.Context, url string, data interface{}) error {\n\tvar err error\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\tdefer close(done)\n\t\tvar resp *http.Response\n\t\tresp, err = http.Get(url)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\terr = fmt.Errorf(\"got status %d when making the request\", resp.StatusCode)\n\t\t\treturn\n\t\t}\n\n\t\terr = json.NewDecoder(resp.Body).Decode(&data)\n\t}()\n\n\tselect {\n\tcase <-done:\n\t\treturn err\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}",
"func decodeJSON(r *http.Request, v interface{}) error {\n\treturn json.NewDecoder(r.Body).Decode(v)\n}",
"func (r *Request) GetJSON() ([]Response, error) {\n\tvar responses []Response\n\tvar errs []error\n\n\turls, err := r.URLs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, u := range urls {\n\t\tresponses, errs = getJSON(u, responses, errs)\n\t}\n\tif len(errs) > 0 {\n\t\terrStrings := make([]string, len(errs))\n\t\tfor i, err := range errs {\n\t\t\terrStrings[i] = err.Error()\n\t\t}\n\t\treturn nil, errors.New(strings.Join(errStrings, \"; \"))\n\t}\n\treturn responses, nil\n}",
"func getJSONData(requestURL string) interface{} {\n\tresp, err := http.Get(requestURL)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stdout, \"There was an error making the request to the location info API: %v\\n\", err)\n\n\t\treturn nil\n\t}\n\tdefer resp.Body.Close()\n\n\tvar jsonData interface{}\n\n\terr = json.NewDecoder(resp.Body).Decode(&jsonData)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stdout, \"There was an error processing JSON data: %v\\n\", err)\n\t\treturn nil\n\t}\n\n\treturn jsonData\n}",
"func RequestJSON(r Requester, method, url string, body io.Reader, responseStruct interface{}) error {\n\tres, err := r.Request(method, url, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\treturn json.NewDecoder(res.Body).Decode(&responseStruct)\n}",
"func JsonRequest(url string, method string, data interface{}) (string, int) {\n json, err := json.Marshal(data)\n if err != nil {\n panic(err)\n }\n req, err := http.NewRequest(method, url, bytes.NewBuffer(json))\n if err != nil {\n panic(err)\n }\n req.Header.Set(\"Content-Type\", \"application/json; charset=utf-8\")\n client := &http.Client{}\n resp, err := client.Do(req)\n if verbose {\n fmt.Printf(\"URL: %v\\n\", url)\n fmt.Printf(\"Request: %v\\n\", req)\n fmt.Println(\"HTTP Response Status:\", resp.StatusCode, http.StatusText(resp.StatusCode))\n }\n defer resp.Body.Close()\n body, err := ioutil.ReadAll(resp.Body)\n if err != nil {\n panic(err)\n }\n return string(body), resp.StatusCode\n}",
"func getResponse(target *url.URL, decTarget interface{}) (err error) {\n\tresp, err := http.Get(target.String())\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\terr = respErrorMaker(resp.StatusCode, resp.Body)\n\t\treturn\n\t}\n\tdec := json.NewDecoder(resp.Body)\n\tdec.UseNumber()\n\terr = dec.Decode(decTarget)\n\treturn\n}",
"func (c *Client) CallJSONResponse(req *http.Request, v interface{}) error {\n\treturn c.Call(req, v, jsonResponseHandler)\n}",
"func GetJson(body []byte) (jsonSource interface{}, err error) {\n\tif string(body) != \"\" && body != nil {\n\t\terr := json.Unmarshal(body, &jsonSource)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn jsonSource, err\n}",
"func Get(cl *http.Client, s Signer, a API, host string, resp interface{}) error {\n\turl := host + \"?\" + s.Sign(a)\n\tr, err := cl.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Body.Close()\n\n\treturn HandleResp(r, json.Unmarshal, resp)\n}",
"func RPCGet(apiURL string, req interface{}, resp interface{}) error {\n\tb, e := json.Marshal(req)\n\tif e != nil {\n\t\treturn e\n\t}\n\tapiURL = apiURL + \"?json=\" + url.QueryEscape(string(b))\n\tres, err := http.Get(apiURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = json.Unmarshal(body, resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Determine whether a test shuold run based on the context
|
func (t *testRunner) shouldRun(tc Tester) bool {
if t.category == "" || t.category == AllTests {
return true
}
if tc == nil {
return false
}
return getTestCategory(tc) == t.category
}
|
[
"func (cfg *Config) OnTest() bool {\n return cfg.Env == env.Test\n}",
"func ShouldRun(testType string) bool {\n\tif *uncategorized {\n\t\treturn false\n\t}\n\n\t// Fallback if no test filter is specified.\n\tif !*small && !*medium && !*large && !*manual {\n\t\treturn DEFAULT_RUN[testType]\n\t}\n\n\tswitch testType {\n\tcase SMALL_TEST:\n\t\treturn *small\n\tcase MEDIUM_TEST:\n\t\treturn *medium\n\tcase LARGE_TEST:\n\t\treturn *large\n\tcase MANUAL_TEST:\n\t\treturn *manual\n\t}\n\treturn false\n}",
"func (job TestWorker) ShouldRun(key string) bool {\n\treturn key == \"test_worker\"\n}",
"func (asserter Asserter) isUnitTesting() bool {\n\treturn asserter&AsserterUnitTesting != 0 && tester() != nil\n}",
"func shouldRun(s *suite.Suite, sn, no int) bool {\n\tif testsToRun == \"\" {\n\t\treturn true\n\t}\n\tsp := strings.Split(testsToRun, \",\")\n\ttitle := s.Test[no-1].Title\n\tfor _, x := range sp {\n\t\tif x == fmt.Sprintf(\"%d.%d\", sn, no) {\n\t\t\treturn true\n\t\t}\n\t\tif sn == 1 && x == fmt.Sprintf(\"%d\", no) {\n\t\t\treturn true\n\t\t}\n\t\tmatches, err := tag.Match(x, title)\n\t\tif err != nil {\n\t\t\terrorf(\"Malformed pattern '%s' in -tests.\", x)\n\t\t\tcontinue\n\t\t}\n\t\tif matches {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (s *fakeSource) IsTestSource() bool { return true }",
"func ShouldTearDown() bool {\n\treturn IsEnvTruthy(TearDown)\n}",
"func AmRunningInTest() bool {\n\t_, runningUnderLocalTest := os.LookupEnv(\"CMA_SSH_DEV\")\n\treturn runningUnderLocalTest\n}",
"func InTest() bool {\n\treturn lazyInTest.Get(func() bool {\n\t\treturn flag.Lookup(\"test.v\") != nil\n\t})\n}",
"func (d DoneJob) IsTest() bool {\n\treturn strings.Contains(d.SMSID, \"test\")\n}",
"func canRunTest(test string, apis []graphics.APIType) (bool, error) {\n\ttestAPI, err := testNameToAPI(test)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tfor _, a := range apis {\n\t\tif testAPI == a {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}",
"func (obj *instruction) IsTest() bool {\n\treturn obj.test != nil\n}",
"func isUserTestingMode(chainType ChainType) bool {\n\treturn UserTesting == chainType\n}",
"func (e *Env) IsTest() bool {\n\treturn e.Env == \"test\"\n}",
"func (c *controller) checkTestConfig(queue *s2hv1.Queue) (\n\tskipTest bool, testRunners []internal.StagingTestRunner, err error) {\n\n\ttestRunners = make([]internal.StagingTestRunner, 0)\n\n\tif queue.Spec.SkipTestRunner {\n\t\tif err = c.updateTestQueueCondition(\n\t\t\tqueue,\n\t\t\tv1.ConditionTrue,\n\t\t\t\"skip running test\"); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\treturn true, nil, nil\n\t}\n\n\ttestConfig := c.getTestConfiguration(queue)\n\tif testConfig == nil {\n\t\tif err = c.updateTestQueueCondition(\n\t\t\tqueue,\n\t\t\tv1.ConditionTrue,\n\t\t\t\"queue testing succeeded because no testing configuration\"); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\treturn true, nil, nil\n\t}\n\n\tskipTest = false\n\n\tif testConfig.Teamcity != nil {\n\t\ttestRunners = append(testRunners, c.testRunners[teamcity.TestRunnerName])\n\t}\n\tif testConfig.Gitlab != nil {\n\t\ttestRunners = append(testRunners, c.testRunners[gitlab.TestRunnerName])\n\t}\n\tif testConfig.TestMock != nil {\n\t\ttestRunners = append(testRunners, c.testRunners[testmock.TestRunnerName])\n\t}\n\n\tif len(testRunners) == 0 {\n\t\tif err = c.updateTestQueueCondition(queue, v1.ConditionFalse, \"test runner not found\"); err != nil {\n\t\t\treturn\n\t\t}\n\t\tlogger.Error(s2herrors.ErrTestRunnerNotFound, \"test runner not found (testRunner: nil)\")\n\t\terr = s2herrors.ErrTestRunnerNotFound\n\t\treturn\n\t}\n\n\tnow := metav1.Now()\n\tif queue.Status.StartTestingTime == nil {\n\t\tqueue.Status.StartTestingTime = &now\n\t\terr = c.updateQueue(queue)\n\t}\n\treturn\n}",
"func (a *action) runWithT(ctx context.Context, cfg *envconf.Config, t *testing.T) (context.Context, error) {\n\tswitch a.role {\n\tcase roleBeforeTest, roleAfterTest:\n\t\tif cfg.DryRunMode() {\n\t\t\tklog.V(2).Info(\"Skipping execution of roleBeforeTest and roleAfterTest due to framework being in dry-run mode\")\n\t\t\treturn ctx, nil\n\t\t}\n\t\tfor _, f := range a.testFuncs {\n\t\t\tif f == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar err error\n\t\t\tctx, err = f(ctx, cfg, t)\n\t\t\tif err != nil {\n\t\t\t\treturn ctx, err\n\t\t\t}\n\t\t}\n\tdefault:\n\t\treturn ctx, fmt.Errorf(\"runWithT() is only valid for actions roleBeforeTest and roleAfterTest\")\n\t}\n\n\treturn ctx, nil\n}",
"func (s Settings) IsUsingTestProcessHelper() bool {\n\treturn s.TestHelperProcessMethodName != \"\"\n}",
"func IsTestEnvironment() bool {\n\treturn env == \"test\"\n}",
"func TestRun_UsingTarget(t *testing.T) {\n\tlogger := logging.NewNoopCtxLogger(t)\n\tcases := []struct {\n\t\tcommentFlags []string\n\t\textraArgs []string\n\t\texpErr bool\n\t}{\n\t\t{\n\t\t\tcommentFlags: []string{\"-target\", \"mytarget\"},\n\t\t\texpErr: true,\n\t\t},\n\t\t{\n\t\t\tcommentFlags: []string{\"-target=mytarget\"},\n\t\t\texpErr: true,\n\t\t},\n\t\t{\n\t\t\textraArgs: []string{\"-target\", \"mytarget\"},\n\t\t\texpErr: true,\n\t\t},\n\t\t{\n\t\t\textraArgs: []string{\"-target=mytarget\"},\n\t\t\texpErr: true,\n\t\t},\n\t\t{\n\t\t\tcommentFlags: []string{\"-target\", \"mytarget\"},\n\t\t\textraArgs: []string{\"-target=mytarget\"},\n\t\t\texpErr: true,\n\t\t},\n\t\t// Test false positives.\n\t\t{\n\t\t\tcommentFlags: []string{\"-targethahagotcha\"},\n\t\t\texpErr: false,\n\t\t},\n\t\t{\n\t\t\textraArgs: []string{\"-targethahagotcha\"},\n\t\t\texpErr: false,\n\t\t},\n\t\t{\n\t\t\tcommentFlags: []string{\"-targeted=weird\"},\n\t\t\texpErr: false,\n\t\t},\n\t\t{\n\t\t\textraArgs: []string{\"-targeted=weird\"},\n\t\t\texpErr: false,\n\t\t},\n\t}\n\n\tRegisterMockTestingT(t)\n\n\tfor _, c := range cases {\n\t\tdescrip := fmt.Sprintf(\"comments flags: %s extra args: %s\",\n\t\t\tstrings.Join(c.commentFlags, \", \"), strings.Join(c.extraArgs, \", \"))\n\t\tt.Run(descrip, func(t *testing.T) {\n\t\t\ttmpDir, cleanup := TempDir(t)\n\t\t\tdefer cleanup()\n\t\t\tplanPath := filepath.Join(tmpDir, \"workspace.tfplan\")\n\t\t\terr := os.WriteFile(planPath, nil, 0600)\n\t\t\tOk(t, err)\n\t\t\tterraform := mocks.NewMockClient()\n\t\t\tstep := runtime.ApplyStepRunner{\n\t\t\t\tTerraformExecutor: terraform,\n\t\t\t}\n\n\t\t\tctx := context.Background()\n\t\t\toutput, err := step.Run(ctx, command.ProjectContext{\n\t\t\t\tLog: logger,\n\t\t\t\tWorkspace: \"workspace\",\n\t\t\t\tRepoRelDir: \".\",\n\t\t\t\tEscapedCommentArgs: c.commentFlags,\n\t\t\t\tRequestCtx: context.TODO(),\n\t\t\t}, c.extraArgs, tmpDir, map[string]string(nil))\n\t\t\tEquals(t, \"\", output)\n\t\t\tif c.expErr {\n\t\t\t\tErrEquals(t, \"cannot run apply with -target because we are applying an already generated plan. Instead, run -target with atlantis plan\", err)\n\t\t\t} else {\n\t\t\t\tOk(t, err)\n\t\t\t}\n\t\t})\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
setDifference returns the list of variables (wrapped in a resolver) which are in the first set, but not in the second one.
|
func setDifference(first, second map[string]*ssmvars.Variable) []*variableResolver {
var ret []*variableResolver
for key, variable := range first {
if _, exists := second[key]; !exists {
ret = append(ret, &variableResolver{wraps: variable})
}
}
return ret
}
|
[
"func Difference(set1, set2 Set, sets ...Set) Set {\n\ts := set1.Copy()\n\ts.Separate(set2)\n\tfor _, set := range sets {\n\t\ts.Separate(set) // seperate is thread safe\n\t}\n\treturn s\n}",
"func (es ExprSet) Difference(sets ...ExprSet) ExprSet {\n\tfor _, set := range sets {\n\t\tes = es.difference(set)\n\t}\n\treturn es\n}",
"func (ss StringSet) Difference(sets ...StringSet) StringSet {\n\tfor _, set := range sets {\n\t\tss = ss.difference(set)\n\t}\n\treturn ss\n}",
"func diff(left, right []string) []string {\n\trightSet := map[string]struct{}{}\n\tfor _, v := range right {\n\t\trightSet[v] = struct{}{}\n\t}\n\n\tvar diff []string\n\tfor _, v := range left {\n\t\tif _, ok := rightSet[v]; !ok {\n\t\t\tdiff = append(diff, v)\n\t\t}\n\t}\n\n\treturn diff\n}",
"func setDifference(V, S []string) []string {\n\tsPrime := make([]string, 0)\n\tfound := false\n\tfor _, v := range V {\n\t\tfor _, s := range S {\n\t\t\tif v == s {\n\t\t\t\tfound = true\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tsPrime = append(sPrime, v)\n\t\t}\n\t\tfound = false\n\t}\n\treturn sPrime\n}",
"func (setA ObjMetadataSet) Diff(setB ObjMetadataSet) ObjMetadataSet {\n\t// Create a map of the elements of A\n\tm := make(map[ObjMetadata]struct{}, len(setA))\n\tfor _, a := range setA {\n\t\tm[a] = struct{}{}\n\t}\n\t// Remove from A each element of B\n\tfor _, b := range setB {\n\t\tdelete(m, b) // OK to delete even if b not in m\n\t}\n\t// Create/return slice from the map of remaining items\n\tdiff := make(ObjMetadataSet, 0, len(m))\n\t// Iterate over setA to retain input order and have stable output\n\tfor _, id := range setA {\n\t\tif _, ok := m[id]; ok {\n\t\t\tdiff = append(diff, id)\n\t\t\tdelete(m, id)\n\t\t}\n\t}\n\treturn diff\n}",
"func enforcedSetDifference(result map[string]interface{}, keyPrefix string, a map[string]ruleset.EnforceChange, b map[string]interface{}) map[string]interface{} {\n\tfor k, v := range a {\n\t\tif keyPrefix != \"\" {\n\t\t\tk = fmt.Sprintf(\"%s.%s\", keyPrefix, k)\n\t\t}\n\t\tif v.EnforceChange != nil {\n\t\t\tresult = enforcedSetDifference(result, k, v.EnforceChange, b)\n\t\t} else if _, ok := b[k]; !ok {\n\t\t\tresult[k] = v\n\t\t}\n\t}\n\n\treturn result\n}",
"func SubtractSet(s1, s2 Set) Set {\n\tif s1.Count() == 0 {\n\t\treturn Set{}\n\t}\n\tif s2.Count() == 0 {\n\t\treturn s1\n\t}\n\n\tvar s Set\n\ts.Add(s1.Slice()...)\n\ts.Subtract(s2)\n\treturn s\n}",
"func compareReplicationSets(first, second ReplicationSet) (added, removed []string) {\n\tfor _, instance := range first.Instances {\n\t\tif !second.Includes(instance.Addr) {\n\t\t\tadded = append(added, instance.Addr)\n\t\t}\n\t}\n\n\tfor _, instance := range second.Instances {\n\t\tif !first.Includes(instance.Addr) {\n\t\t\tremoved = append(removed, instance.Addr)\n\t\t}\n\t}\n\n\treturn\n}",
"func (s *server) diffSets() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tparams := mux.Vars(r)\n\t\tvar leftSet depset.Set\n\t\tvar err error\n\t\tif !isZeroHash(params[\"leftSetId\"]) {\n\t\t\tleftSet, err = s.model.selectRawSet(params[\"orgId\"], params[\"appId\"], params[\"leftSetId\"])\n\t\t\tif err == ErrNotFound {\n\t\t\t\twriteAsJSON(w, http.StatusNotFound, fmt.Sprintf(`Set with ID \"%s\" not available in Application \"%s/%s\".`, params[\"leftSetId\"], params[\"orgId\"], params[\"appId\"]))\n\t\t\t\treturn\n\t\t\t} else if err != nil {\n\t\t\t\tw.WriteHeader(500)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tvar rightSet depset.Set\n\t\tif !isZeroHash(params[\"rightSetId\"]) {\n\t\t\trightSet, err = s.model.selectRawSet(params[\"orgId\"], params[\"appId\"], params[\"rightSetId\"])\n\t\t\tif err == ErrNotFound {\n\t\t\t\twriteAsJSON(w, http.StatusNotFound, fmt.Sprintf(`Set with ID \"%s\" not available in Application \"%s/%s\".`, params[\"rightSetId\"], params[\"orgId\"], params[\"appId\"]))\n\t\t\t\treturn\n\t\t\t} else if err != nil {\n\t\t\t\tw.WriteHeader(500)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tdelta := leftSet.Diff(rightSet)\n\n\t\twriteAsJSON(w, http.StatusOK, delta)\n\t}\n}",
"func Difference(slice []string, slice2 []string) []string {\n\tvar result []string\n\n\t// range over slice\n\tfor _, v := range slice {\n\t\tif !Contains(slice2, v) {\n\t\t\tresult = append(result, v)\n\t\t}\n\t}\n\n\treturn result\n}",
"func (ns Nodes) Difference(other Nodes) Nodes {\n\tsort.Sort(ns)\n\tsort.Sort(other)\n\ts := append(ns, other...)\n\tcount := set.Diff(s, len(ns))\n\treturn s[:count]\n}",
"func (s *FastIntSet) DifferenceWith(rhs FastIntSet) {\n\tif rhs.s == nil {\n\t\treturn\n\t}\n\ts.prepareForMutation()\n\ts.s.DifferenceWith(rhs.s)\n}",
"func Difference(array []interface{}, values ...interface{}) []interface{} {\n\thset := hashset.New(array...)\n\thset.Remove(values...)\n\treturn hset.Values()\n}",
"func (s1 *_set_Set_string) Subtract(s2 Set) {\n\tfor k := range *s1 {\n\t\tif s2.Has(k) {\n\t\t\tdelete(*s1, k)\n\t\t}\n\t}\n}",
"func difference(diskMigrations []types.Migration, flattenedMigrationDBs []types.Migration) []types.Migration {\n\t// key is Migration.File\n\texistsInDB := map[string]bool{}\n\tfor _, m := range flattenedMigrationDBs {\n\t\texistsInDB[m.File] = true\n\t}\n\tdiff := []types.Migration{}\n\tfor _, m := range diskMigrations {\n\t\tif _, ok := existsInDB[m.File]; !ok {\n\t\t\tdiff = append(diff, m)\n\t\t}\n\t}\n\treturn diff\n}",
"func Disjunction(a Set, b Set) Set {\n\treturn Union(a, b).Remove(Intersection(a, b).Elements()...)\n}",
"func Diff(l []interface{}, r []interface{}) (additions []interface{}, removals []interface{}) {\n\tadditions = Additions(l, r)\n\t// Reversing the direction gives us the removals\n\tremovals = Additions(r, l)\n\n\treturn\n}",
"func getExcept(source2 interface{}) stepAction {\r\n\treturn stepAction(func(src dataSource, option *ParallelOption, first bool) (dest dataSource, keep bool, e error) {\r\n\t\tkeep = option.KeepOrder\r\n\t\tdest, e = filterSet(src, source2, true, option)\r\n\t\treturn\r\n\t})\r\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
DeployValidatorWalletCreator deploys a new Ethereum contract, binding an instance of ValidatorWalletCreator to it.
|
func DeployValidatorWalletCreator(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *ValidatorWalletCreator, error) {
parsed, err := abi.JSON(strings.NewReader(ValidatorWalletCreatorABI))
if err != nil {
return common.Address{}, nil, nil, err
}
address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(ValidatorWalletCreatorBin), backend)
if err != nil {
return common.Address{}, nil, nil, err
}
return address, tx, &ValidatorWalletCreator{ValidatorWalletCreatorCaller: ValidatorWalletCreatorCaller{contract: contract}, ValidatorWalletCreatorTransactor: ValidatorWalletCreatorTransactor{contract: contract}, ValidatorWalletCreatorFilterer: ValidatorWalletCreatorFilterer{contract: contract}}, nil
}
|
[
"func bindValidatorWalletCreator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ValidatorWalletCreatorABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}",
"func NewContractDeployerFacade(\n\tprivateKey string,\n\trpc string,\n\tcontractArgs []string,\n\tcontractType string,\n\tgasLimit int,\n\tgasPrice int,\n) (*contractDeployerFacade, error) {\n\tfmt.Println(\"Starting account and blockchain connection process.\")\n\t// Process the private key from the flag\n\tuserAccount, err := ethacc.CreateAccount(privateKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfmt.Printf(\"Successfully accessed account for Public Key: %s\\n\",\n\t\tuserAccount.Account)\n\n\t// Connect to the RPC client with the give URL\n\tethClient, err1 := ethrpc.CreateClient(rpc)\n\tif err1 != nil {\n\t\treturn nil, fmt.Errorf(\"error: failed to connect to given \" +\n\t\t\t\"rpc url : %v \\n\", err1)\n\t}\n\tdefer ethClient.CloseClient()\n\n\t// Attempt to load data from the blockchain given the connected RPC Client\n\tcurrBlockchainState, err2 := ethClient.LoadBlockChainState(context.Background())\n\tif err2 != nil {\n\t\treturn nil, fmt.Errorf(\"error: failed to retrieve account using \" +\n\t\t\t\"provided private key : %v\\n\", err2)\n\t}\n\n\tfmt.Printf(\"Succesfully connected to RPC client %s. Current Block Height %d \"+\n\t\t\", for chain id: %d\\n\", ethClient.RawUrl, currBlockchainState.BlockNumber,\n\t\tcurrBlockchainState.ChainId)\n\n\t// Using the client and the account get data needed for contract deployment\n\tauth, err3 := ethClient.GetDataForTransaction(context.Background(),\n\t\tuserAccount, currBlockchainState.ChainId, gasLimit, gasPrice)\n\tif err3 != nil {\n\t\treturn nil, fmt.Errorf(\"error: failed to get data for transaction \" +\n\t\t\t\"processing: %v\\n\", err3)\n\t}\n\n\tcontractDeployerFacade := &contractDeployerFacade{\n\t\tbaseContractInteractorFacade{\n\t\t\tuserAccount,\n\t\t\tethClient,\n\t\t\tcurrBlockchainState,\n\t\t\tauth,\n\t\t\tcontractType,\n\t\t},\n\t\tcontractArgs,\n\t}\n\tfmt.Println(\"Successfully completed account and blockchain connection \" +\n\t\t\"process.\")\n\treturn contractDeployerFacade, nil\n}",
"func NewValidatorWalletCreatorCaller(address common.Address, caller bind.ContractCaller) (*ValidatorWalletCreatorCaller, error) {\n\tcontract, err := bindValidatorWalletCreator(address, caller, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorCaller{contract: contract}, nil\n}",
"func NewValidatorWalletCreator(address common.Address, backend bind.ContractBackend) (*ValidatorWalletCreator, error) {\n\tcontract, err := bindValidatorWalletCreator(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreator{ValidatorWalletCreatorCaller: ValidatorWalletCreatorCaller{contract: contract}, ValidatorWalletCreatorTransactor: ValidatorWalletCreatorTransactor{contract: contract}, ValidatorWalletCreatorFilterer: ValidatorWalletCreatorFilterer{contract: contract}}, nil\n}",
"func NewValidatorWalletCreatorTransactor(address common.Address, transactor bind.ContractTransactor) (*ValidatorWalletCreatorTransactor, error) {\n\tcontract, err := bindValidatorWalletCreator(address, nil, transactor, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorTransactor{contract: contract}, nil\n}",
"func (_Testproxyfactory *TestproxyfactoryTransactorSession) DeployProxyContract(admin common.Address, initializerData []byte) (*types.Transaction, error) {\n\treturn _Testproxyfactory.Contract.DeployProxyContract(&_Testproxyfactory.TransactOpts, admin, initializerData)\n}",
"func (_Testproxyfactory *TestproxyfactorySession) DeployProxyContract(admin common.Address, initializerData []byte) (*types.Transaction, error) {\n\treturn _Testproxyfactory.Contract.DeployProxyContract(&_Testproxyfactory.TransactOpts, admin, initializerData)\n}",
"func DeployMultiSigWalletFactoryContract(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *MultiSigWalletFactoryContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(MultiSigWalletFactoryContractABI))\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\taddress, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(MultiSigWalletFactoryContractBin), backend)\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\treturn address, tx, &MultiSigWalletFactoryContract{MultiSigWalletFactoryContractCaller: MultiSigWalletFactoryContractCaller{contract: contract}, MultiSigWalletFactoryContractTransactor: MultiSigWalletFactoryContractTransactor{contract: contract}, MultiSigWalletFactoryContractFilterer: MultiSigWalletFactoryContractFilterer{contract: contract}}, nil\n}",
"func NewValidatorWalletCreatorFilterer(address common.Address, filterer bind.ContractFilterer) (*ValidatorWalletCreatorFilterer, error) {\n\tcontract, err := bindValidatorWalletCreator(address, nil, nil, filterer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorFilterer{contract: contract}, nil\n}",
"func (s *Simulator) TxStakeCreateValidator(simAcc *SimAccount, commissions staking.CommissionRates) {\n\trequire.NotNil(s.t, simAcc)\n\n\tmsg := staking.NewMsgCreateValidator(\n\t\tsimAcc.Address.Bytes(),\n\t\tsimAcc.PublicKey,\n\t\ts.minSelfDelegationCoin,\n\t\tstaking.NewDescription(simAcc.Address.String(), \"\", \"\", \"\", \"\"),\n\t\tcommissions,\n\t\ts.minSelfDelegationCoin.Amount,\n\t)\n\ts.DeliverTx(s.GenTx(msg, simAcc), nil)\n}",
"func (_MultiSigWalletFactoryContract *MultiSigWalletFactoryContractTransactor) Create(opts *bind.TransactOpts, _owners []common.Address, _required *big.Int) (*types.Transaction, error) {\n\treturn _MultiSigWalletFactoryContract.contract.Transact(opts, \"create\", _owners, _required)\n}",
"func DeployContractReceiver(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *ContractReceiver, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ContractReceiverABI))\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\taddress, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(ContractReceiverBin), backend)\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\treturn address, tx, &ContractReceiver{ContractReceiverCaller: ContractReceiverCaller{contract: contract}, ContractReceiverTransactor: ContractReceiverTransactor{contract: contract}, ContractReceiverFilterer: ContractReceiverFilterer{contract: contract}}, nil\n}",
"func CreateWallet(pubKey []byte) (*Wallet, error) {\n\tpublickHash, err := hashPublicKey(pubKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tversionedPayload := append(Version, publickHash...)\n\tchecksum := checksum(versionedPayload)\n\n\taddress := append(versionedPayload, checksum...)\n\n\treturn &Wallet{\n\t\tBase58Address: base58.Encode(address),\n\t\tAddress: address,\n\t}, nil\n}",
"func CreateWallet(ctx context.Context, g *libkb.GlobalContext) (created bool, err error) {\n\tdefer g.CTraceTimed(ctx, \"Stellar.CreateWallet\", func() error { return err })()\n\t// TODO: short-circuit if the user has a bundle already\n\tclearBundle, err := bundle.NewInitialBundle()\n\tif err != nil {\n\t\treturn created, err\n\t}\n\terr = remote.PostWithChainlink(ctx, g, clearBundle)\n\tswitch e := err.(type) {\n\tcase nil:\n\t\t// ok\n\tcase libkb.AppStatusError:\n\t\tswitch keybase1.StatusCode(e.Code) {\n\t\tcase keybase1.StatusCode_SCStellarWrongRevision:\n\t\t\t// Assume this happened because a bundle already existed.\n\t\t\t// And suppress the error.\n\t\t\tg.Log.CDebugf(ctx, \"suppressing error: %v\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\tdefault:\n\t\treturn false, err\n\t}\n\treturn true, err\n}",
"func (_PMintyMultiToken *PMintyMultiTokenCaller) Deployer(opts *bind.CallOpts) (common.Address, error) {\n\tvar out []interface{}\n\terr := _PMintyMultiToken.contract.Call(opts, &out, \"deployer\")\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}",
"func DeployContract(ctx context.Context, client Client, privateKeyHex string, binHex, abiJSON string, params ...interface{}) (*Transaction, error) {\n\tif len(privateKeyHex) > 2 && privateKeyHex[:2] == \"0x\" {\n\t\tprivateKeyHex = privateKeyHex[2:]\n\t}\n\tprivateKey, err := crypto.HexToECDSA(privateKeyHex)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid private key: %v\", err)\n\t}\n\n\tgasPrice, err := client.GetGasPrice(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot get gas price: %v\", err)\n\t}\n\n\tpublicKey := privateKey.Public()\n\tpublicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey)\n\tif !ok {\n\t\treturn nil, errors.New(\"error casting public key to ECDSA\")\n\t}\n\n\tfromAddress := crypto.PubkeyToAddress(*publicKeyECDSA)\n\tnonce, err := client.GetPendingTransactionCount(ctx, fromAddress)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot get nonce: %v\", err)\n\t}\n\tbinData, err := hexutil.Decode(binHex)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot decode contract data: %v\", err)\n\t}\n\tif len(params) > 0 {\n\t\tabiData, err := abi.JSON(strings.NewReader(abiJSON))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse ABI: %v\", err)\n\t\t}\n\t\targs2, err := ConvertArguments(abiData.Constructor, params)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tinput, err := abiData.Pack(\"\", args2...)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"cannot pack parameters: %v\", err)\n\t\t}\n\t\tbinData = append(binData, input...)\n\t}\n\t//TODO try to use web3.Transaction only; can't sign currently\n\ttx := types.NewContractCreation(nonce, big.NewInt(0), 2000000, gasPrice, binData)\n\tsignedTx, err := types.SignTx(tx, types.HomesteadSigner{}, privateKey)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot sign transaction: %v\", err)\n\t}\n\traw, err := rlp.EncodeToBytes(signedTx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = client.SendRawTransaction(ctx, raw)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot send transaction: %v\", err)\n\t}\n\n\treturn convertTx(signedTx, fromAddress), nil\n}",
"func (_MultiSigWalletFactoryContract *MultiSigWalletFactoryContractSession) Create(_owners []common.Address, _required *big.Int) (*types.Transaction, error) {\n\treturn _MultiSigWalletFactoryContract.Contract.Create(&_MultiSigWalletFactoryContract.TransactOpts, _owners, _required)\n}",
"func (_Testproxyfactory *TestproxyfactorySession) DeployInstance(admin common.Address, initializerData []byte) (*types.Transaction, error) {\n\treturn _Testproxyfactory.Contract.DeployInstance(&_Testproxyfactory.TransactOpts, admin, initializerData)\n}",
"func DeployChallengeTester(auth *bind.TransactOpts, backend bind.ContractBackend, challengeFactory_ common.Address) (common.Address, *types.Transaction, *ChallengeTester, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ChallengeTesterABI))\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\n\taddress, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(ChallengeTesterBin), backend, challengeFactory_)\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\treturn address, tx, &ChallengeTester{ChallengeTesterCaller: ChallengeTesterCaller{contract: contract}, ChallengeTesterTransactor: ChallengeTesterTransactor{contract: contract}, ChallengeTesterFilterer: ChallengeTesterFilterer{contract: contract}}, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewValidatorWalletCreator creates a new instance of ValidatorWalletCreator, bound to a specific deployed contract.
|
func NewValidatorWalletCreator(address common.Address, backend bind.ContractBackend) (*ValidatorWalletCreator, error) {
contract, err := bindValidatorWalletCreator(address, backend, backend, backend)
if err != nil {
return nil, err
}
return &ValidatorWalletCreator{ValidatorWalletCreatorCaller: ValidatorWalletCreatorCaller{contract: contract}, ValidatorWalletCreatorTransactor: ValidatorWalletCreatorTransactor{contract: contract}, ValidatorWalletCreatorFilterer: ValidatorWalletCreatorFilterer{contract: contract}}, nil
}
|
[
"func bindValidatorWalletCreator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ValidatorWalletCreatorABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}",
"func DeployValidatorWalletCreator(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *ValidatorWalletCreator, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ValidatorWalletCreatorABI))\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\n\taddress, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(ValidatorWalletCreatorBin), backend)\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\treturn address, tx, &ValidatorWalletCreator{ValidatorWalletCreatorCaller: ValidatorWalletCreatorCaller{contract: contract}, ValidatorWalletCreatorTransactor: ValidatorWalletCreatorTransactor{contract: contract}, ValidatorWalletCreatorFilterer: ValidatorWalletCreatorFilterer{contract: contract}}, nil\n}",
"func NewValidatorWalletCreatorCaller(address common.Address, caller bind.ContractCaller) (*ValidatorWalletCreatorCaller, error) {\n\tcontract, err := bindValidatorWalletCreator(address, caller, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorCaller{contract: contract}, nil\n}",
"func NewValidatorWalletCreatorTransactor(address common.Address, transactor bind.ContractTransactor) (*ValidatorWalletCreatorTransactor, error) {\n\tcontract, err := bindValidatorWalletCreator(address, nil, transactor, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorTransactor{contract: contract}, nil\n}",
"func (m *MangoPay) NewWallet(owners ConsumerList, desc string, currency string) (*Wallet, error) {\n\tall := []string{}\n\tfor k, o := range owners {\n\t\tid := consumerId(o)\n\t\tif id == \"\" {\n\t\t\treturn nil, errors.New(fmt.Sprintf(\"Empty Id for owner %d. Unable to create wallet.\", k))\n\t\t}\n\t\tall = append(all, id)\n\t}\n\tw := &Wallet{\n\t\tOwners: all,\n\t\tDescription: desc,\n\t\tCurrency: currency,\n\t}\n\tw.service = m\n\treturn w, nil\n}",
"func NewValidatorWalletCreatorFilterer(address common.Address, filterer bind.ContractFilterer) (*ValidatorWalletCreatorFilterer, error) {\n\tcontract, err := bindValidatorWalletCreator(address, nil, nil, filterer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorFilterer{contract: contract}, nil\n}",
"func NewWallet(username string) Wallet {\n\treturn Wallet{username, NewBlockchain()}\n}",
"func CreateWallet(pubKey []byte) (*Wallet, error) {\n\tpublickHash, err := hashPublicKey(pubKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tversionedPayload := append(Version, publickHash...)\n\tchecksum := checksum(versionedPayload)\n\n\taddress := append(versionedPayload, checksum...)\n\n\treturn &Wallet{\n\t\tBase58Address: base58.Encode(address),\n\t\tAddress: address,\n\t}, nil\n}",
"func newWalletDomainProxy(parent object.Parent, class object.Factory) (*walletDomainProxy, error) {\n\tinst, err := newWalletDomain(parent, class)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &walletDomainProxy{\n\t\tBaseSmartContractProxy: contract.BaseSmartContractProxy{\n\t\t\tInstance: inst,\n\t\t},\n\t}, nil\n}",
"func newWallet() *wallet {\n\treturn &wallet{\n\t\tindex: indexer.New(),\n\t\taccounts: make(map[uuid.UUID]*account),\n\t}\n}",
"func NewWallet(filename, label, seed, seedPassphrase string, options ...wallet.Option) (*Wallet, error) {\n\twlt := &Wallet{\n\t\tMeta: wallet.Meta{\n\t\t\twallet.MetaFilename: filename,\n\t\t\twallet.MetaLabel: label,\n\t\t\twallet.MetaSeed: seed,\n\t\t\twallet.MetaSeedPassphrase: seedPassphrase,\n\t\t\twallet.MetaEncrypted: \"false\",\n\t\t\twallet.MetaType: WalletType,\n\t\t\twallet.MetaVersion: wallet.Version,\n\t\t\twallet.MetaCoin: string(wallet.CoinTypeSkycoin),\n\t\t\twallet.MetaCryptoType: string(crypto.DefaultCryptoType),\n\t\t\twallet.MetaTimestamp: strconv.FormatInt(time.Now().Unix(), 10),\n\t\t},\n\t\taccountManager: &bip44Accounts{},\n\t\tdecoder: defaultWalletDecoder,\n\t}\n\n\tadvOpts := wallet.AdvancedOptions{}\n\t// applies options to wallet and AdvancedOptions\n\tfor _, opt := range options {\n\t\topt(wlt)\n\t\topt(&advOpts)\n\t}\n\n\tif wlt.Bip44Coin() == nil {\n\t\tswitch wlt.Coin() {\n\t\tcase wallet.CoinTypeSkycoin:\n\t\t\twlt.SetBip44Coin(bip44.CoinTypeSkycoin)\n\t\tcase wallet.CoinTypeBitcoin:\n\t\t\twlt.SetBip44Coin(bip44.CoinTypeBitcoin)\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"bip44 coin type not set\")\n\t\t}\n\t}\n\n\t// validateMeta wallet before encrypting\n\tif err := validateMeta(wlt.Meta); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// generates a default account\n\tvar accountName = DefaultAccountName\n\tif advOpts.DefaultBip44AccountName != \"\" {\n\t\taccountName = advOpts.DefaultBip44AccountName\n\t}\n\n\t_, err := wlt.NewAccount(accountName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"generate default account failed: %v\", err)\n\t}\n\n\t// Generate addresses if options.GenrateN > 0\n\tgenerateN := advOpts.GenerateN\n\tif generateN == 0 {\n\t\tgenerateN = 1\n\t}\n\n\tif _, err := wlt.GenerateAddresses(generateN); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Generate a default change address\n\tif _, err := wlt.GenerateAddresses(1, wallet.OptionChange(true)); err != nil {\n\t\treturn nil, err\n\t}\n\n\tscanN := advOpts.ScanN\n\t// scans addresses if options.ScanN > 0\n\tif scanN > 0 {\n\t\tif advOpts.TF == nil {\n\t\t\treturn nil, errors.New(\"missing transaction finder for scanning addresses\")\n\t\t}\n\n\t\tif scanN > generateN {\n\t\t\tscanN = scanN - generateN\n\t\t}\n\n\t\t_, err := wlt.ScanAddresses(scanN, advOpts.TF)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// encrypts wallet if options.Encrypt is true\n\tif advOpts.Encrypt {\n\t\tif len(advOpts.Password) == 0 {\n\t\t\treturn nil, errors.New(\"missing password for encrypting wallet\")\n\t\t}\n\n\t\tif err := wlt.Lock(advOpts.Password); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// validateMeta the wallet again after encrypted\n\tif err := validateMeta(wlt.Meta); err != nil {\n\t\treturn nil, err\n\t}\n\treturn wlt, nil\n}",
"func NewWallet() *Wallet {\n\tw := Wallet{Transactions: make([]blockchain.Transaction, 0)}\n\n\t// generate private key\n\tpriv, err := crypto.GenerateKey()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tprivHash := blockchain.Hash(crypto.FromECDSA(priv))\n\tw.Priv = privHash\n\n\t// generate public key\n\tpub, ok := priv.Public().(*ecdsa.PublicKey)\n\tif !ok {\n\t\tlog.Fatal(\"cast to public key failed\")\n\t}\n\tpubHash := blockchain.Hash(crypto.FromECDSAPub(pub))\n\tw.Pub = pubHash\n\n\t// generate address\n\thash := sha3.New256()\n\thash.Write(pubHash)\n\taddr := hash.Sum(nil)\n\tw.Addr = addr\n\n\treturn &w\n}",
"func (c Creator) Create(filename, label, seed string, options wallet.Options) (wallet.Wallet, error) {\n\topts := convertOptions(options)\n\treturn NewWallet(\n\t\tfilename,\n\t\tlabel,\n\t\tseed,\n\t\toptions.SeedPassphrase,\n\t\topts...)\n}",
"func New(repo Repository) Wallet {\n\treturn &service{repo}\n}",
"func CreateWalletFromEthMnemonic(mnemonic, password string, statusCb WalletCallback) error {\n\tif len(_config.chain.Miners) < 1 || len(_config.chain.Sharders) < 1 {\n\t\treturn fmt.Errorf(\"SDK not initialized\")\n\t}\n\tgo func() {\n\t\tsigScheme := zcncrypto.NewSignatureScheme(_config.chain.SignatureScheme)\n\t\t_, err := sigScheme.GenerateKeysWithEth(mnemonic, password)\n\t\tif err != nil {\n\t\t\tstatusCb.OnWalletCreateComplete(StatusError, \"\", err.Error())\n\t\t\treturn\n\t\t}\n\t}()\n\treturn nil\n}",
"func New() *Wallet {\n\t_, priv := key.GenPair()\n\tprivkey, _, _ := crypto.ECDSAKeyPairFromKey(priv)\n\treturn &Wallet{\n\t\tpriv: priv,\n\t\tversion: Version1,\n\t\tprivkey: privkey,\n\t}\n}",
"func CreateWallet(ctx context.Context, name string, store e2wtypes.Store, encryptor e2wtypes.Encryptor) (e2wtypes.Wallet, error) {\n\t// First, try to open the wallet.\n\t_, err := OpenWallet(ctx, name, store, encryptor)\n\tif err == nil || !strings.Contains(err.Error(), \"wallet not found\") {\n\t\treturn nil, fmt.Errorf(\"wallet %q already exists\", name)\n\t}\n\n\tid, err := uuid.NewRandom()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to generate UUID\")\n\t}\n\n\tw := newWallet()\n\tw.id = id\n\tw.name = name\n\tw.version = version\n\tw.store = store\n\tw.encryptor = encryptor\n\n\treturn w, w.storeWallet()\n}",
"func CreateWallet(mnenomic string, password string) (*Wallet, error) {\n\n\tseed := pbkdf2.Key([]byte(mnenomic), []byte(\"mnemonic\"+password), 2048, 32, sha512.New)\n\tprivKey := ed25519.NewKeyFromSeed(seed)\n\tpubKey := privKey.Public().(ed25519.PublicKey)\n\tpubKeyBytes := []byte(pubKey)\n\tsignKp := keyPair{PrivKey: privKey, PubKey: pubKeyBytes}\n\n\taddress, err := generatePublicHash(pubKeyBytes)\n\tif err != nil {\n\t\treturn &Wallet{}, errors.Wrapf(err, \"could not create wallet\")\n\t}\n\n\twallet := Wallet{\n\t\tAddress: address,\n\t\tMnemonic: mnenomic,\n\t\tKp: signKp,\n\t\tSeed: seed,\n\t\tSk: b58cencode(privKey, edskprefix),\n\t\tPk: b58cencode(pubKeyBytes, edpkprefix),\n\t}\n\n\treturn &wallet, nil\n}",
"func NewWallet(path, password string) (*Wallet, error) {\n\t// We use 2,1 as scrypt parameters here for development because on an Android phone\n\t// it is quite slow to use the standard parameters. Do not to this in production.\n\tks := ethkeystore.NewKeyStore(path, 2, 1)\n\tw, err := keystore.NewWallet(ks, password)\n\treturn &Wallet{w: w, password: password}, errors.WithMessage(err, \"creating wallet\")\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewValidatorWalletCreatorCaller creates a new readonly instance of ValidatorWalletCreator, bound to a specific deployed contract.
|
func NewValidatorWalletCreatorCaller(address common.Address, caller bind.ContractCaller) (*ValidatorWalletCreatorCaller, error) {
contract, err := bindValidatorWalletCreator(address, caller, nil, nil)
if err != nil {
return nil, err
}
return &ValidatorWalletCreatorCaller{contract: contract}, nil
}
|
[
"func bindValidatorWalletCreator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ValidatorWalletCreatorABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}",
"func NewValidatorWalletCreator(address common.Address, backend bind.ContractBackend) (*ValidatorWalletCreator, error) {\n\tcontract, err := bindValidatorWalletCreator(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreator{ValidatorWalletCreatorCaller: ValidatorWalletCreatorCaller{contract: contract}, ValidatorWalletCreatorTransactor: ValidatorWalletCreatorTransactor{contract: contract}, ValidatorWalletCreatorFilterer: ValidatorWalletCreatorFilterer{contract: contract}}, nil\n}",
"func DeployValidatorWalletCreator(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *ValidatorWalletCreator, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ValidatorWalletCreatorABI))\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\n\taddress, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(ValidatorWalletCreatorBin), backend)\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\treturn address, tx, &ValidatorWalletCreator{ValidatorWalletCreatorCaller: ValidatorWalletCreatorCaller{contract: contract}, ValidatorWalletCreatorTransactor: ValidatorWalletCreatorTransactor{contract: contract}, ValidatorWalletCreatorFilterer: ValidatorWalletCreatorFilterer{contract: contract}}, nil\n}",
"func NewValidatorWalletCreatorTransactor(address common.Address, transactor bind.ContractTransactor) (*ValidatorWalletCreatorTransactor, error) {\n\tcontract, err := bindValidatorWalletCreator(address, nil, transactor, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorTransactor{contract: contract}, nil\n}",
"func NewValidatorWalletCreatorFilterer(address common.Address, filterer bind.ContractFilterer) (*ValidatorWalletCreatorFilterer, error) {\n\tcontract, err := bindValidatorWalletCreator(address, nil, nil, filterer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorFilterer{contract: contract}, nil\n}",
"func NewMultiSigWalletFactoryContractCaller(address common.Address, caller bind.ContractCaller) (*MultiSigWalletFactoryContractCaller, error) {\n\tcontract, err := bindMultiSigWalletFactoryContract(address, caller, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &MultiSigWalletFactoryContractCaller{contract: contract}, nil\n}",
"func (m *MangoPay) NewWallet(owners ConsumerList, desc string, currency string) (*Wallet, error) {\n\tall := []string{}\n\tfor k, o := range owners {\n\t\tid := consumerId(o)\n\t\tif id == \"\" {\n\t\t\treturn nil, errors.New(fmt.Sprintf(\"Empty Id for owner %d. Unable to create wallet.\", k))\n\t\t}\n\t\tall = append(all, id)\n\t}\n\tw := &Wallet{\n\t\tOwners: all,\n\t\tDescription: desc,\n\t\tCurrency: currency,\n\t}\n\tw.service = m\n\treturn w, nil\n}",
"func NewContractDeployerFacade(\n\tprivateKey string,\n\trpc string,\n\tcontractArgs []string,\n\tcontractType string,\n\tgasLimit int,\n\tgasPrice int,\n) (*contractDeployerFacade, error) {\n\tfmt.Println(\"Starting account and blockchain connection process.\")\n\t// Process the private key from the flag\n\tuserAccount, err := ethacc.CreateAccount(privateKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfmt.Printf(\"Successfully accessed account for Public Key: %s\\n\",\n\t\tuserAccount.Account)\n\n\t// Connect to the RPC client with the give URL\n\tethClient, err1 := ethrpc.CreateClient(rpc)\n\tif err1 != nil {\n\t\treturn nil, fmt.Errorf(\"error: failed to connect to given \" +\n\t\t\t\"rpc url : %v \\n\", err1)\n\t}\n\tdefer ethClient.CloseClient()\n\n\t// Attempt to load data from the blockchain given the connected RPC Client\n\tcurrBlockchainState, err2 := ethClient.LoadBlockChainState(context.Background())\n\tif err2 != nil {\n\t\treturn nil, fmt.Errorf(\"error: failed to retrieve account using \" +\n\t\t\t\"provided private key : %v\\n\", err2)\n\t}\n\n\tfmt.Printf(\"Succesfully connected to RPC client %s. Current Block Height %d \"+\n\t\t\", for chain id: %d\\n\", ethClient.RawUrl, currBlockchainState.BlockNumber,\n\t\tcurrBlockchainState.ChainId)\n\n\t// Using the client and the account get data needed for contract deployment\n\tauth, err3 := ethClient.GetDataForTransaction(context.Background(),\n\t\tuserAccount, currBlockchainState.ChainId, gasLimit, gasPrice)\n\tif err3 != nil {\n\t\treturn nil, fmt.Errorf(\"error: failed to get data for transaction \" +\n\t\t\t\"processing: %v\\n\", err3)\n\t}\n\n\tcontractDeployerFacade := &contractDeployerFacade{\n\t\tbaseContractInteractorFacade{\n\t\t\tuserAccount,\n\t\t\tethClient,\n\t\t\tcurrBlockchainState,\n\t\t\tauth,\n\t\t\tcontractType,\n\t\t},\n\t\tcontractArgs,\n\t}\n\tfmt.Println(\"Successfully completed account and blockchain connection \" +\n\t\t\"process.\")\n\treturn contractDeployerFacade, nil\n}",
"func NewRuntimefactoryCaller(address common.Address, caller bind.ContractCaller) (*RuntimefactoryCaller, error) {\n\tcontract, err := bindRuntimefactory(address, caller, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &RuntimefactoryCaller{contract: contract}, nil\n}",
"func (_Wrapper *WrapperCaller) NewOwner(opts *bind.CallOpts) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Wrapper.contract.Call(opts, out, \"newOwner\")\n\treturn *ret0, err\n}",
"func newWalletDomainProxy(parent object.Parent, class object.Factory) (*walletDomainProxy, error) {\n\tinst, err := newWalletDomain(parent, class)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &walletDomainProxy{\n\t\tBaseSmartContractProxy: contract.BaseSmartContractProxy{\n\t\t\tInstance: inst,\n\t\t},\n\t}, nil\n}",
"func (_MultiSigWalletFactoryContract *MultiSigWalletFactoryContractTransactor) Create(opts *bind.TransactOpts, _owners []common.Address, _required *big.Int) (*types.Transaction, error) {\n\treturn _MultiSigWalletFactoryContract.contract.Transact(opts, \"create\", _owners, _required)\n}",
"func (_MultiSigWalletFactoryContract *MultiSigWalletFactoryContractSession) Create(_owners []common.Address, _required *big.Int) (*types.Transaction, error) {\n\treturn _MultiSigWalletFactoryContract.Contract.Create(&_MultiSigWalletFactoryContract.TransactOpts, _owners, _required)\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) WatchWalletCreated(opts *bind.WatchOpts, sink chan<- *ValidatorWalletCreatorWalletCreated, walletAddress []common.Address, userAddress []common.Address) (event.Subscription, error) {\n\n\tvar walletAddressRule []interface{}\n\tfor _, walletAddressItem := range walletAddress {\n\t\twalletAddressRule = append(walletAddressRule, walletAddressItem)\n\t}\n\tvar userAddressRule []interface{}\n\tfor _, userAddressItem := range userAddress {\n\t\tuserAddressRule = append(userAddressRule, userAddressItem)\n\t}\n\n\tlogs, sub, err := _ValidatorWalletCreator.contract.WatchLogs(opts, \"WalletCreated\", walletAddressRule, userAddressRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(ValidatorWalletCreatorWalletCreated)\n\t\t\t\tif err := _ValidatorWalletCreator.contract.UnpackLog(event, \"WalletCreated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func NewValidator(adapters map[string]*adapter.Info, templates map[string]*template.Info, stateful bool) store.BackendValidator {\n\tout := &validator{\n\t\tkinds: runtimeConfig.KindMap(adapters, templates),\n\t}\n\tif stateful {\n\t\tout.config = runtimeConfig.NewEphemeral(templates, adapters)\n\t}\n\treturn out\n}",
"func NewContractReceiverCaller(address common.Address, caller bind.ContractCaller) (*ContractReceiverCaller, error) {\n\tcontract, err := bindContractReceiver(address, caller, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ContractReceiverCaller{contract: contract}, nil\n}",
"func NewSwapMiningCaller(address common.Address, caller bind.ContractCaller) (*SwapMiningCaller, error) {\n\tcontract, err := bindSwapMining(address, caller, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &SwapMiningCaller{contract: contract}, nil\n}",
"func NewWallet(username string) Wallet {\n\treturn Wallet{username, NewBlockchain()}\n}",
"func newWallet() *wallet {\n\treturn &wallet{\n\t\tindex: indexer.New(),\n\t\taccounts: make(map[uuid.UUID]*account),\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewValidatorWalletCreatorTransactor creates a new writeonly instance of ValidatorWalletCreator, bound to a specific deployed contract.
|
func NewValidatorWalletCreatorTransactor(address common.Address, transactor bind.ContractTransactor) (*ValidatorWalletCreatorTransactor, error) {
contract, err := bindValidatorWalletCreator(address, nil, transactor, nil)
if err != nil {
return nil, err
}
return &ValidatorWalletCreatorTransactor{contract: contract}, nil
}
|
[
"func DeployValidatorWalletCreator(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *ValidatorWalletCreator, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ValidatorWalletCreatorABI))\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\n\taddress, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(ValidatorWalletCreatorBin), backend)\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\treturn address, tx, &ValidatorWalletCreator{ValidatorWalletCreatorCaller: ValidatorWalletCreatorCaller{contract: contract}, ValidatorWalletCreatorTransactor: ValidatorWalletCreatorTransactor{contract: contract}, ValidatorWalletCreatorFilterer: ValidatorWalletCreatorFilterer{contract: contract}}, nil\n}",
"func bindValidatorWalletCreator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ValidatorWalletCreatorABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}",
"func NewValidatorWalletCreator(address common.Address, backend bind.ContractBackend) (*ValidatorWalletCreator, error) {\n\tcontract, err := bindValidatorWalletCreator(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreator{ValidatorWalletCreatorCaller: ValidatorWalletCreatorCaller{contract: contract}, ValidatorWalletCreatorTransactor: ValidatorWalletCreatorTransactor{contract: contract}, ValidatorWalletCreatorFilterer: ValidatorWalletCreatorFilterer{contract: contract}}, nil\n}",
"func NewValidatorWalletCreatorCaller(address common.Address, caller bind.ContractCaller) (*ValidatorWalletCreatorCaller, error) {\n\tcontract, err := bindValidatorWalletCreator(address, caller, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorCaller{contract: contract}, nil\n}",
"func (_MultiSigWalletFactoryContract *MultiSigWalletFactoryContractTransactor) Create(opts *bind.TransactOpts, _owners []common.Address, _required *big.Int) (*types.Transaction, error) {\n\treturn _MultiSigWalletFactoryContract.contract.Transact(opts, \"create\", _owners, _required)\n}",
"func (s *Simulator) TxStakeCreateValidator(simAcc *SimAccount, commissions staking.CommissionRates) {\n\trequire.NotNil(s.t, simAcc)\n\n\tmsg := staking.NewMsgCreateValidator(\n\t\tsimAcc.Address.Bytes(),\n\t\tsimAcc.PublicKey,\n\t\ts.minSelfDelegationCoin,\n\t\tstaking.NewDescription(simAcc.Address.String(), \"\", \"\", \"\", \"\"),\n\t\tcommissions,\n\t\ts.minSelfDelegationCoin.Amount,\n\t)\n\ts.DeliverTx(s.GenTx(msg, simAcc), nil)\n}",
"func (t *DefaultTransactor) NewTransactor(account accounts.Account) (*bind.TransactOpts, error) {\n\tif !t.wallet.Contains(account) {\n\t\treturn nil, errors.New(\"account not found in wallet\")\n\t}\n\treturn &bind.TransactOpts{\n\t\tFrom: account.Address,\n\t\tSigner: func(signer types.Signer, address common.Address, tx *types.Transaction) (*types.Transaction, error) {\n\t\t\tif address != account.Address {\n\t\t\t\treturn nil, errors.New(\"not authorized to sign this account\")\n\t\t\t}\n\t\t\t// Last parameter (chainID) is only relevant when making EIP155 compliant signatures.\n\t\t\t// Since we use only non EIP155 signatures, set this to zero value.\n\t\t\t// For more details, see here (https://github.com/ethereum/EIPs/blob/master/EIPS/eip-155.md).\n\t\t\treturn t.wallet.SignTx(account, tx, big.NewInt(0))\n\t\t},\n\t}, nil\n}",
"func NewValidatorWalletCreatorFilterer(address common.Address, filterer bind.ContractFilterer) (*ValidatorWalletCreatorFilterer, error) {\n\tcontract, err := bindValidatorWalletCreator(address, nil, nil, filterer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorFilterer{contract: contract}, nil\n}",
"func (_MultiSigWalletFactoryContract *MultiSigWalletFactoryContractSession) Create(_owners []common.Address, _required *big.Int) (*types.Transaction, error) {\n\treturn _MultiSigWalletFactoryContract.Contract.Create(&_MultiSigWalletFactoryContract.TransactOpts, _owners, _required)\n}",
"func NewTransferCreator() TransferTransactionCreator {\n\treturn transferCreator{NewTextComposer(), nil, data.DBWrite}\n}",
"func newWalletDomainProxy(parent object.Parent, class object.Factory) (*walletDomainProxy, error) {\n\tinst, err := newWalletDomain(parent, class)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &walletDomainProxy{\n\t\tBaseSmartContractProxy: contract.BaseSmartContractProxy{\n\t\t\tInstance: inst,\n\t\t},\n\t}, nil\n}",
"func newWallet() *wallet {\n\treturn &wallet{\n\t\tindex: indexer.New(),\n\t\taccounts: make(map[uuid.UUID]*account),\n\t}\n}",
"func CreateWalletFromEthMnemonic(mnemonic, password string, statusCb WalletCallback) error {\n\tif len(_config.chain.Miners) < 1 || len(_config.chain.Sharders) < 1 {\n\t\treturn fmt.Errorf(\"SDK not initialized\")\n\t}\n\tgo func() {\n\t\tsigScheme := zcncrypto.NewSignatureScheme(_config.chain.SignatureScheme)\n\t\t_, err := sigScheme.GenerateKeysWithEth(mnemonic, password)\n\t\tif err != nil {\n\t\t\tstatusCb.OnWalletCreateComplete(StatusError, \"\", err.Error())\n\t\t\treturn\n\t\t}\n\t}()\n\treturn nil\n}",
"func (m *MangoPay) NewWallet(owners ConsumerList, desc string, currency string) (*Wallet, error) {\n\tall := []string{}\n\tfor k, o := range owners {\n\t\tid := consumerId(o)\n\t\tif id == \"\" {\n\t\t\treturn nil, errors.New(fmt.Sprintf(\"Empty Id for owner %d. Unable to create wallet.\", k))\n\t\t}\n\t\tall = append(all, id)\n\t}\n\tw := &Wallet{\n\t\tOwners: all,\n\t\tDescription: desc,\n\t\tCurrency: currency,\n\t}\n\tw.service = m\n\treturn w, nil\n}",
"func NewMultiSigWalletFactoryContractTransactor(address common.Address, transactor bind.ContractTransactor) (*MultiSigWalletFactoryContractTransactor, error) {\n\tcontract, err := bindMultiSigWalletFactoryContract(address, nil, transactor, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &MultiSigWalletFactoryContractTransactor{contract: contract}, nil\n}",
"func NewWallet(username string) Wallet {\n\treturn Wallet{username, NewBlockchain()}\n}",
"func New(cs modules.ConsensusSet, tpool modules.TransactionPool, saveDir string) (w *Wallet, err error) {\n\tif cs == nil {\n\t\terr = errors.New(\"wallet cannot use a nil state\")\n\t\treturn\n\t}\n\tif tpool == nil {\n\t\terr = errors.New(\"wallet cannot use a nil transaction pool\")\n\t\treturn\n\t}\n\n\tw = &Wallet{\n\t\tstate: cs,\n\t\ttpool: tpool,\n\n\t\tsaveDir: saveDir,\n\n\t\tage: AgeDelay + 100,\n\t\tkeys: make(map[types.UnlockHash]*key),\n\t\ttimelockedKeys: make(map[types.BlockHeight][]types.UnlockHash),\n\t\tvisibleAddresses: make(map[types.UnlockHash]struct{}),\n\t\tsiafundAddresses: make(map[types.UnlockHash]struct{}),\n\t\tsiafundOutputs: make(map[types.SiafundOutputID]types.SiafundOutput),\n\n\t\ttransactions: make(map[string]*openTransaction),\n\n\t\tmu: sync.New(modules.SafeMutexDelay, 1),\n\t}\n\n\t// Create the wallet folder.\n\terr = os.MkdirAll(saveDir, 0700)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Try to load a previously saved wallet file. If it doesn't exist, assume\n\t// that we're creating a new wallet file.\n\t// TODO: log warning if no file found?\n\terr = w.load()\n\tif os.IsNotExist(err) {\n\t\terr = nil\n\t\t// No wallet file exists... make a visible address for the user.\n\t\t_, _, err = w.coinAddress(true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err != nil {\n\t\terr = fmt.Errorf(\"couldn't load wallet file %s: %v\", saveDir, err)\n\t\treturn\n\t}\n\n\tw.tpool.TransactionPoolSubscribe(w)\n\n\treturn\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) WatchWalletCreated(opts *bind.WatchOpts, sink chan<- *ValidatorWalletCreatorWalletCreated, walletAddress []common.Address, userAddress []common.Address) (event.Subscription, error) {\n\n\tvar walletAddressRule []interface{}\n\tfor _, walletAddressItem := range walletAddress {\n\t\twalletAddressRule = append(walletAddressRule, walletAddressItem)\n\t}\n\tvar userAddressRule []interface{}\n\tfor _, userAddressItem := range userAddress {\n\t\tuserAddressRule = append(userAddressRule, userAddressItem)\n\t}\n\n\tlogs, sub, err := _ValidatorWalletCreator.contract.WatchLogs(opts, \"WalletCreated\", walletAddressRule, userAddressRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(ValidatorWalletCreatorWalletCreated)\n\t\t\t\tif err := _ValidatorWalletCreator.contract.UnpackLog(event, \"WalletCreated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func NewTenantCreator(t mockConstructorTestingTNewTenantCreator) *TenantCreator {\n\tmock := &TenantCreator{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewValidatorWalletCreatorFilterer creates a new log filterer instance of ValidatorWalletCreator, bound to a specific deployed contract.
|
func NewValidatorWalletCreatorFilterer(address common.Address, filterer bind.ContractFilterer) (*ValidatorWalletCreatorFilterer, error) {
contract, err := bindValidatorWalletCreator(address, nil, nil, filterer)
if err != nil {
return nil, err
}
return &ValidatorWalletCreatorFilterer{contract: contract}, nil
}
|
[
"func bindValidatorWalletCreator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ValidatorWalletCreatorABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) FilterWalletCreated(opts *bind.FilterOpts, walletAddress []common.Address, userAddress []common.Address) (*ValidatorWalletCreatorWalletCreatedIterator, error) {\n\n\tvar walletAddressRule []interface{}\n\tfor _, walletAddressItem := range walletAddress {\n\t\twalletAddressRule = append(walletAddressRule, walletAddressItem)\n\t}\n\tvar userAddressRule []interface{}\n\tfor _, userAddressItem := range userAddress {\n\t\tuserAddressRule = append(userAddressRule, userAddressItem)\n\t}\n\n\tlogs, sub, err := _ValidatorWalletCreator.contract.FilterLogs(opts, \"WalletCreated\", walletAddressRule, userAddressRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorWalletCreatedIterator{contract: _ValidatorWalletCreator.contract, event: \"WalletCreated\", logs: logs, sub: sub}, nil\n}",
"func NewValidatorWalletCreatorCaller(address common.Address, caller bind.ContractCaller) (*ValidatorWalletCreatorCaller, error) {\n\tcontract, err := bindValidatorWalletCreator(address, caller, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorCaller{contract: contract}, nil\n}",
"func NewMultiSigWalletFactoryContractFilterer(address common.Address, filterer bind.ContractFilterer) (*MultiSigWalletFactoryContractFilterer, error) {\n\tcontract, err := bindMultiSigWalletFactoryContract(address, nil, nil, filterer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &MultiSigWalletFactoryContractFilterer{contract: contract}, nil\n}",
"func NewValidatorWalletCreator(address common.Address, backend bind.ContractBackend) (*ValidatorWalletCreator, error) {\n\tcontract, err := bindValidatorWalletCreator(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreator{ValidatorWalletCreatorCaller: ValidatorWalletCreatorCaller{contract: contract}, ValidatorWalletCreatorTransactor: ValidatorWalletCreatorTransactor{contract: contract}, ValidatorWalletCreatorFilterer: ValidatorWalletCreatorFilterer{contract: contract}}, nil\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) WatchWalletCreated(opts *bind.WatchOpts, sink chan<- *ValidatorWalletCreatorWalletCreated, walletAddress []common.Address, userAddress []common.Address) (event.Subscription, error) {\n\n\tvar walletAddressRule []interface{}\n\tfor _, walletAddressItem := range walletAddress {\n\t\twalletAddressRule = append(walletAddressRule, walletAddressItem)\n\t}\n\tvar userAddressRule []interface{}\n\tfor _, userAddressItem := range userAddress {\n\t\tuserAddressRule = append(userAddressRule, userAddressItem)\n\t}\n\n\tlogs, sub, err := _ValidatorWalletCreator.contract.WatchLogs(opts, \"WalletCreated\", walletAddressRule, userAddressRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(ValidatorWalletCreatorWalletCreated)\n\t\t\t\tif err := _ValidatorWalletCreator.contract.UnpackLog(event, \"WalletCreated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func DeployValidatorWalletCreator(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *ValidatorWalletCreator, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ValidatorWalletCreatorABI))\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\n\taddress, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(ValidatorWalletCreatorBin), backend)\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\treturn address, tx, &ValidatorWalletCreator{ValidatorWalletCreatorCaller: ValidatorWalletCreatorCaller{contract: contract}, ValidatorWalletCreatorTransactor: ValidatorWalletCreatorTransactor{contract: contract}, ValidatorWalletCreatorFilterer: ValidatorWalletCreatorFilterer{contract: contract}}, nil\n}",
"func NewTradeFilterer(address common.Address, filterer bind.ContractFilterer) (*TradeFilterer, error) {\n\tcontract, err := bindTrade(address, nil, nil, filterer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &TradeFilterer{contract: contract}, nil\n}",
"func NewValidatorWalletCreatorTransactor(address common.Address, transactor bind.ContractTransactor) (*ValidatorWalletCreatorTransactor, error) {\n\tcontract, err := bindValidatorWalletCreator(address, nil, transactor, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorTransactor{contract: contract}, nil\n}",
"func NewDropkitContractFilterer(address common.Address, filterer bind.ContractFilterer) (*DropkitContractFilterer, error) {\n\tcontract, err := bindDropkitContract(address, nil, nil, filterer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &DropkitContractFilterer{contract: contract}, nil\n}",
"func NewBlockFilterer(params *chaincfg.Params,\n\treq *FilterBlocksRequest) *BlockFilterer {\n\n\t// Construct a reverse index by address string for the requested\n\t// external addresses.\n\tnExAddrs := len(req.ExternalAddrs)\n\texReverseFilter := make(map[string]waddrmgr.ScopedIndex, nExAddrs)\n\tfor scopedIndex, addr := range req.ExternalAddrs {\n\t\texReverseFilter[addr.EncodeAddress()] = scopedIndex\n\t}\n\n\t// Construct a reverse index by address string for the requested\n\t// internal addresses.\n\tnInAddrs := len(req.InternalAddrs)\n\tinReverseFilter := make(map[string]waddrmgr.ScopedIndex, nInAddrs)\n\tfor scopedIndex, addr := range req.InternalAddrs {\n\t\tinReverseFilter[addr.EncodeAddress()] = scopedIndex\n\t}\n\n\tfoundExternal := make(map[waddrmgr.KeyScope]map[uint32]struct{})\n\tfoundInternal := make(map[waddrmgr.KeyScope]map[uint32]struct{})\n\tfoundOutPoints := make(map[wire.OutPoint]btcutil.Address)\n\n\treturn &BlockFilterer{\n\t\tParams: params,\n\t\tExReverseFilter: exReverseFilter,\n\t\tInReverseFilter: inReverseFilter,\n\t\tWatchedOutPoints: req.WatchedOutPoints,\n\t\tFoundExternal: foundExternal,\n\t\tFoundInternal: foundInternal,\n\t\tFoundOutPoints: foundOutPoints,\n\t}\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) ParseWalletCreated(log types.Log) (*ValidatorWalletCreatorWalletCreated, error) {\n\tevent := new(ValidatorWalletCreatorWalletCreated)\n\tif err := _ValidatorWalletCreator.contract.UnpackLog(event, \"WalletCreated\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func NewPancakeswapFilterer(address common.Address, filterer bind.ContractFilterer) (*PancakeswapFilterer, error) {\n\tcontract, err := bindPancakeswap(address, nil, nil, filterer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &PancakeswapFilterer{contract: contract}, nil\n}",
"func NewRuntimefactoryFilterer(address common.Address, filterer bind.ContractFilterer) (*RuntimefactoryFilterer, error) {\n\tcontract, err := bindRuntimefactory(address, nil, nil, filterer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &RuntimefactoryFilterer{contract: contract}, nil\n}",
"func NewComptrollerFilterer(address common.Address, filterer bind.ContractFilterer) (*ComptrollerFilterer, error) {\n\tcontract, err := bindComptroller(address, nil, nil, filterer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ComptrollerFilterer{contract: contract}, nil\n}",
"func NewFilter(chain *core.BlockChain) func(id ID) error {\n\treturn newFilter(\n\t\tchain.Config(),\n\t\tchain.Genesis().Hash(),\n\t\tfunc() uint64 {\n\t\t\treturn chain.CurrentHeader().Number.Uint64()\n\t\t},\n\t)\n}",
"func (_LivroVisitas *LivroVisitasFilterer) FilterNewVisitor(opts *bind.FilterOpts) (*LivroVisitasNewVisitorIterator, error) {\n\n\tlogs, sub, err := _LivroVisitas.contract.FilterLogs(opts, \"NewVisitor\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &LivroVisitasNewVisitorIterator{contract: _LivroVisitas.contract, event: \"NewVisitor\", logs: logs, sub: sub}, nil\n}",
"func NewContractReceiverFilterer(address common.Address, filterer bind.ContractFilterer) (*ContractReceiverFilterer, error) {\n\tcontract, err := bindContractReceiver(address, nil, nil, filterer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ContractReceiverFilterer{contract: contract}, nil\n}",
"func NewTipcontractFilterer(address common.Address, filterer bind.ContractFilterer) (*TipcontractFilterer, error) {\n\tcontract, err := bindTipcontract(address, nil, nil, filterer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &TipcontractFilterer{contract: contract}, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
bindValidatorWalletCreator binds a generic wrapper to an already deployed contract.
|
func bindValidatorWalletCreator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
parsed, err := abi.JSON(strings.NewReader(ValidatorWalletCreatorABI))
if err != nil {
return nil, err
}
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil
}
|
[
"func DeployValidatorWalletCreator(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *ValidatorWalletCreator, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ValidatorWalletCreatorABI))\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\n\taddress, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(ValidatorWalletCreatorBin), backend)\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\treturn address, tx, &ValidatorWalletCreator{ValidatorWalletCreatorCaller: ValidatorWalletCreatorCaller{contract: contract}, ValidatorWalletCreatorTransactor: ValidatorWalletCreatorTransactor{contract: contract}, ValidatorWalletCreatorFilterer: ValidatorWalletCreatorFilterer{contract: contract}}, nil\n}",
"func NewValidatorWalletCreatorCaller(address common.Address, caller bind.ContractCaller) (*ValidatorWalletCreatorCaller, error) {\n\tcontract, err := bindValidatorWalletCreator(address, caller, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorCaller{contract: contract}, nil\n}",
"func NewValidatorWalletCreator(address common.Address, backend bind.ContractBackend) (*ValidatorWalletCreator, error) {\n\tcontract, err := bindValidatorWalletCreator(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreator{ValidatorWalletCreatorCaller: ValidatorWalletCreatorCaller{contract: contract}, ValidatorWalletCreatorTransactor: ValidatorWalletCreatorTransactor{contract: contract}, ValidatorWalletCreatorFilterer: ValidatorWalletCreatorFilterer{contract: contract}}, nil\n}",
"func bindMultiSigWalletFactoryContract(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(MultiSigWalletFactoryContractABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}",
"func NewValidatorWrapper(client client.Client, kvConfigProvider kvConfig.KubeVirtConfigProvider) *ValidatorWrapper {\n\tnetAttachDefProvider := NetworkAttachmentDefinitions{\n\t\tClient: client,\n\t}\n\tstorageClassesProvider := StorageClasses{\n\t\tClient: client,\n\t}\n\treturn &ValidatorWrapper{\n\t\tnetworkMappingValidator: NewNetworkMappingValidator(&netAttachDefProvider),\n\t\tstorageMappingValidator: NewStorageMappingValidator(&storageClassesProvider),\n\t\tkvConfigProvider: kvConfigProvider,\n\t}\n}",
"func NewValidatorWrapper(client client.Client, kvConfigProvider config.KubeVirtConfigProvider) *ValidatorWrapper {\n\tnetAttachDefProvider := NetworkAttachmentDefinitions{\n\t\tClient: client,\n\t}\n\tstorageClassesProvider := StorageClasses{\n\t\tClient: client,\n\t}\n\treturn &ValidatorWrapper{\n\t\tnetworkMappingValidator: NewNetworkMappingValidator(&netAttachDefProvider),\n\t\tstorageMappingValidator: NewStorageMappingValidator(&storageClassesProvider),\n\t\tkvConfigProvider: kvConfigProvider,\n\t}\n}",
"func NewValidatorWalletCreatorTransactor(address common.Address, transactor bind.ContractTransactor) (*ValidatorWalletCreatorTransactor, error) {\n\tcontract, err := bindValidatorWalletCreator(address, nil, transactor, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorTransactor{contract: contract}, nil\n}",
"func makeWalletValidateHandler(endpoints endpoint.Endpoints, options []grpc.ServerOption) grpc.Handler {\n\treturn grpc.NewServer(endpoints.WalletValidateEndpoint, decodeWalletValidateRequest, encodeWalletValidateResponse, options...)\n}",
"func InitValidatorServiceContractInstance() {\n\tinstance, err := binding.NewValidatorService(common.HexToAddress(\n\t\tconstants.ContractDeploymentAddress[constants.NetActive][constants.ValidatorService]),\n\t\tconnection.ETHCLIENT)\n\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tlog.Println(\"SKALE validator service contract is loaded\")\n\tvalidatorServiceContractInstance = instance\n}",
"func bindEthvault(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, ) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(EthvaultABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, nil), nil\n}",
"func NewValidatorWalletCreatorFilterer(address common.Address, filterer bind.ContractFilterer) (*ValidatorWalletCreatorFilterer, error) {\n\tcontract, err := bindValidatorWalletCreator(address, nil, nil, filterer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorFilterer{contract: contract}, nil\n}",
"func (k Keeper) unbondValidator(ctx sdk.Context, validator types.Validator) types.Validator {\n\n\tstore := ctx.KVStore(k.storeKey)\n\tpool := k.GetPool(ctx)\n\n\t// sanity check\n\tif validator.Status == sdk.Unbonded {\n\t\tpanic(fmt.Sprintf(\"should not already be unbonded, validator: %v\\n\", validator))\n\t}\n\n\t// set the status\n\tvalidator, pool = validator.UpdateStatus(pool, sdk.Unbonded)\n\tk.SetPool(ctx, pool)\n\n\t// save the now unbonded validator record\n\tk.SetValidator(ctx, validator)\n\n\t// add to accumulated changes for tendermint\n\tbzABCI := k.cdc.MustMarshalBinary(validator.ABCIValidatorZero())\n\tstore.Set(GetTendermintUpdatesKey(validator.Owner), bzABCI)\n\n\t// also remove from the Bonded types.Validators Store\n\tstore.Delete(GetValidatorsBondedIndexKey(validator.Owner))\n\treturn validator\n}",
"func newWalletDomainProxy(parent object.Parent, class object.Factory) (*walletDomainProxy, error) {\n\tinst, err := newWalletDomain(parent, class)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &walletDomainProxy{\n\t\tBaseSmartContractProxy: contract.BaseSmartContractProxy{\n\t\t\tInstance: inst,\n\t\t},\n\t}, nil\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorCaller) Template(opts *bind.CallOpts) (common.Address, error) {\n\tvar out []interface{}\n\terr := _ValidatorWalletCreator.contract.Call(opts, &out, \"template\")\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}",
"func bindContractReceiver(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ContractReceiverABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}",
"func bindSwapMining(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(SwapMiningABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}",
"func bindParameterizerContract(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ParameterizerContractABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}",
"func bindDropkitContract(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(DropkitContractABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}",
"func bindComptroller(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ComptrollerABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Template is a free data retrieval call binding the contract method 0x6f2ddd93. Solidity: function template() view returns(address)
|
func (_ValidatorWalletCreator *ValidatorWalletCreatorCaller) Template(opts *bind.CallOpts) (common.Address, error) {
var out []interface{}
err := _ValidatorWalletCreator.contract.Call(opts, &out, "template")
if err != nil {
return *new(common.Address), err
}
out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)
return out0, err
}
|
[
"func (_ValidatorWalletCreator *ValidatorWalletCreatorSession) Template() (common.Address, error) {\n\treturn _ValidatorWalletCreator.Contract.Template(&_ValidatorWalletCreator.CallOpts)\n}",
"func Template(name string, data interface{}) Response {\n\treturn &templateResponse{name, data}\n}",
"func template(client *conjurapi.Client) ([]byte, error) {\n\tid := helper.ReadMandatoryArg(2, \"templateID\", \"\", \"any valid template id within the namespace\")\n\tid = \"templates/\" + id\n\tresult, err := camapi.GetSecret(client, id)\n\n\treturn []byte(string(result) + \"\\n\"), err\n}",
"func (storage *Storage) Template(ctx context.Context, id domain.ID) (domain.Template, error) {\n\tvar template domain.Template\n\n\tconn, closer, err := storage.connection(ctx)\n\tif err != nil {\n\t\treturn template, err\n\t}\n\tdefer closer()\n\n\tentity, err := storage.exec.TemplateReader(ctx, conn).ReadByID(id)\n\tif err != nil {\n\t\treturn template, err\n\t}\n\treturn entity.Definition, nil\n}",
"func runTemplate(w http.ResponseWriter, r *http.Request, name string) {\n\tbuf := new(bytes.Buffer)\n\tT(name).Execute(buf, nil) // TODO add correct data here\n\tbuf.WriteTo(w)\n}",
"func (e *email) executeTemplate(t *template.Template) (*bytes.Buffer, error) {\n var emailBody bytes.Buffer\n if err := t.ExecuteTemplate(&emailBody, nameWithHtmlExtension(constants.IntroduceTemplateName), e); err != nil {\n return nil, err\n }\n return &emailBody, nil\n}",
"func (fe *Feedback) GetTemplates(xQuery FeedbackGetTemplatesXQuery) (TemplateGetResponse, error){\n var (\n rawRequest *RawRequest\n response []byte\n err error\n getTemplatesResponse TemplateGetResponse\n\t )\n\n \n\n \n \n \n \n \n \n \n \n \n\n \n \n \n \n \n //API call\n rawRequest = NewRequest(\n fe.config,\n \"get\",\n \"/service/application/feedback/v1.0/template/\",\n nil,\n xQuery,\n nil)\n response, err = rawRequest.Execute()\n if err != nil {\n return TemplateGetResponse{}, err\n\t }\n \n err = json.Unmarshal(response, &getTemplatesResponse)\n if err != nil {\n return TemplateGetResponse{}, common.NewFDKError(err.Error())\n }\n return getTemplatesResponse, nil\n \n }",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorTransactorSession) SetTemplate(_template common.Address) (*types.Transaction, error) {\n\treturn _ValidatorWalletCreator.Contract.SetTemplate(&_ValidatorWalletCreator.TransactOpts, _template)\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorTransactor) SetTemplate(opts *bind.TransactOpts, _template common.Address) (*types.Transaction, error) {\n\treturn _ValidatorWalletCreator.contract.Transact(opts, \"setTemplate\", _template)\n}",
"func (d *defaultTemplateBuilder) RawTemplate(name string) (string, error) {\n\tdata, err := d.execute(name)\n\treturn string(data.json), err\n}",
"func (t *Template) Lookup(name string) *Template {}",
"func TemplateApi(w http.ResponseWriter, r *http.Request) {\n\n\tif r.Method == http.MethodGet {\n\t\thtml_templates.ExecuteTemplate(w, \"display_template.tmpl\", nil)\n\t\treturn\n\t}\n\n\tif r.Method == protocol.MethodFetch {\n\t\t//create json data for the browser asnychronously\n\n\t\tid := r.FormValue(query.TEMPLATE_ID)\n\t\tfmt.Println(id)\n\t\ta, t, err := templates.CheckTemplateById(id)\n\t\tif !a || err != nil {\n\t\t\tm, err := templates.GetAllTemplates()\n\t\t\tif err != nil {\n\t\t\t\tm[query.SUCCESS] = false\n\t\t\t} else {\n\t\t\t\tm[query.SUCCESS] = true\n\t\t\t}\n\t\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.Write(b)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(\"HAHAHA\")\n\t\teid := r.FormValue(query.ELEMENT_ID)\n\t\ti, j := t.CheckElement(eid)\n\t\tif i {\n\t\t\tm := templates.CreateAPIData(t)\n\t\t\tm = m[query.TEMPLATE_ELEMENTS].([]map[string]interface{})[j]\n\t\t\tm[query.SUCCESS] = true\n\t\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.Write(b)\n\t\t\treturn\n\t\t} else {\n\t\t\tm := templates.CreateAPIData(t)\n\t\t\tfmt.Println(m)\n\t\t\tm[query.SUCCESS] = true\n\t\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.Write(b)\n\t\t\treturn\n\t\t}\n\t}\n\tif r.Method == http.MethodPost {\n\t\t//create json data for the browser asnychronously\n\n\t\tt := r.FormValue(\"t\")\n\t\tif len(t) == 0 {\n\n\t\t} else {\n\t\t\tif t == \"t\" {\n\t\t\t\t//generate new template and return the json\n\t\t\t\tfmt.Println(\"Creating a new Template now\")\n\t\t\t\tm := templates.CreateAPIData(templates.CreateNew())\n\t\t\t\tm[query.SUCCESS] = true\n\t\t\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\tw.Write(b)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif t == \"e\" {\n\t\t\t\tid := r.FormValue(query.TEMPLATE_ID)\n\t\t\t\tfmt.Println(id)\n\t\t\t\ttemp, _ := model.GetTemplateByTemplateID(id)\n\t\t\t\te, _ := temp.AddElement()\n\t\t\t\tfmt.Println(\">>>>>>>>>>>>>>>>>>>>The new element\", e)\n\t\t\t\tm := templates.CreateAPIDataElement(temp.GetID(), e)\n\t\t\t\t//generate new element for the template id and return the json\n\t\t\t\tfmt.Println(m)\n\t\t\t\tfmt.Println(temp)\n\t\t\t\tm[query.SUCCESS] = true\n\t\t\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\tw.Write(b)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\tif r.Method == http.MethodDelete {\n\t\ttid := r.FormValue(query.TEMPLATE_ID)\n\t\teid := r.FormValue(query.ELEMENT_ID)\n\t\tfmt.Println(\"Remove the Data\", tid, eid)\n\t\ta, t, err := templates.CheckTemplateById(tid)\n\t\tfmt.Println(a, t, err)\n\t\tif !a || err != nil {\n\t\t\tm := make(map[string]interface{})\n\t\t\tm[query.SUCCESS] = false\n\t\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.Write(b)\n\t\t\treturn\n\t\t}\n\t\tif a {\n\t\t\tfmt.Println(\"TEMPLATES EXISTS\")\n\t\t\tif len(eid) == 0 {\n\t\t\t\tt.Remove()\n\t\t\t\tm, _ := templates.GetAllTemplates()\n\t\t\t\tm[query.SUCCESS] = true\n\t\t\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\tw.Write(b)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ti, j := t.CheckElement(eid)\n\t\t\tif i {\n\t\t\t\tt.RemoveElementByIndex(j)\n\t\t\t\tfmt.Println(t)\n\t\t\t\tm := templates.CreateAPIData(t)\n\t\t\t\tm[query.SUCCESS] = true\n\t\t\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\tw.Write(b)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tm := make(map[string]interface{})\n\t\tm[query.SUCCESS] = false\n\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(b)\n\t\treturn\n\t}\n\n\tif r.Method == http.MethodPut {\n\t\tfmt.Println(\"\\n\\nSAVE NOW\")\n\t\ttid := r.FormValue(query.TEMPLATE_ID)\n\t\teid := r.FormValue(query.ELEMENT_ID)\n\t\tfmt.Println(tid, eid)\n\t\ta, t, err := templates.CheckTemplateById(tid)\n\t\tif !a || err != nil {\n\t\t\tm := make(map[string]interface{})\n\t\t\tm[query.SUCCESS] = false\n\t\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.Write(b)\n\t\t\treturn\n\t\t} else {\n\t\t\ti, j := t.CheckElement(eid)\n\t\t\tif !i {\n\t\t\t\ttnm := r.FormValue(query.TEMPLATE_NAME)\n\t\t\t\tt.SetName(tnm)\n\t\t\t\tm := make(map[string]interface{})\n\t\t\t\tm[query.SUCCESS] = true\n\t\t\t\tq, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\tw.Write(q)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"HEHEHEHEHEHEHEHEHEHEH\")\n\t\t\t\tb, _ := strconv.ParseBool(r.FormValue(query.ELEMENT_CONTENT_STATIC))\n\t\t\t\tx_, _ := strconv.Atoi(r.FormValue(query.ELEMENT_POSITION_X))\n\t\t\t\ty_, _ := strconv.Atoi(r.FormValue(query.ELEMENT_POSITION_Y))\n\t\t\t\tw_, _ := strconv.Atoi(r.FormValue(query.ELEMENT_POSITION_W))\n\t\t\t\th_, _ := strconv.Atoi(r.FormValue(query.ELEMENT_POSITION_H))\n\t\t\t\tfs, _ := strconv.Atoi(r.FormValue(query.ELEMENT_FONT_SIZE))\n\t\t\t\tps, _ := strconv.Atoi(r.FormValue(query.ELEMENT_PIXEL_SIZE))\n\n\t\t\t\tfmt.Println(x_, y_, w_, h_)\n\t\t\t\tt.SaveSettings(\n\t\t\t\t\tj,\n\t\t\t\t\tr.FormValue(query.TEMPLATE_NAME),\n\t\t\t\t\tr.FormValue(query.ELEMENT_ID),\n\t\t\t\t\tr.FormValue(query.ELEMENT_CONTENT),\n\t\t\t\t\tb,\n\t\t\t\t\tx_,\n\t\t\t\t\ty_,\n\t\t\t\t\tw_,\n\t\t\t\t\th_,\n\t\t\t\t\tr.FormValue(query.ELEMENT_COLOR),\n\t\t\t\t\tr.FormValue(query.ELEMENT_FILL_COLOR),\n\t\t\t\t\tfs,\n\t\t\t\t\tps,\n\t\t\t\t\tr.FormValue(query.ELEMENT_PIXEL_STYLE),\n\t\t\t\t\tr.FormValue(query.ELEMENT_FORM))\n\n\t\t\t\tm := make(map[string]interface{})\n\t\t\t\tm[query.SUCCESS] = true\n\t\t\t\tq, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\tw.Write(q)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n}",
"func (p *partial) template() (*Template, error) {\n\tif p.tpl == nil {\n\t\tvar err error\n\n\t\tp.tpl, err = Parse(p.source)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn p.tpl, nil\n}",
"func (t AbstractTags) Template() string {\n\tvar buf bytes.Buffer\n\tfor i, tag := range t {\n\t\tif i == 0 {\n\t\t\tbuf.Write([]byte(fmt.Sprintf(\"%v=%v-%%v,\", tag.Key, tag.Value)))\n\t\t} else {\n\t\t\tbuf.Write([]byte(fmt.Sprintf(\"%v=%v,\", tag.Key, tag.Value)))\n\t\t}\n\t}\n\n\tb := buf.Bytes()\n\tb = b[0 : len(b)-1]\n\n\treturn string(b)\n}",
"func (w *GBTWorker) getResult(useCoinbaseValue bool, submitOld *bool) (*json.GetBlockTemplateResult, error) {\n\t// Ensure the timestamps are still in valid range for the template.\n\t// This should really only ever happen if the local clock is changed\n\t// after the template is generated, but it's important to avoid serving\n\t// invalid block templates.\n\ttemplate := w.miner.template\n\tif template == nil {\n\t\treturn nil, fmt.Errorf(\"No template\")\n\t}\n\tmsgBlock := template.Block\n\theader := &msgBlock.Header\n\tadjustedTime := w.miner.timeSource.AdjustedTime()\n\tmaxTime := adjustedTime.Add(time.Second * blockchain.MaxTimeOffsetSeconds)\n\tif header.Timestamp.After(maxTime) {\n\t\treturn nil, rpc.RpcInvalidError(\"The template time is after the maximum allowed time for a block - template time %v, maximum time %v\", adjustedTime, maxTime)\n\t}\n\t// Convert each transaction in the block template to a template result\n\t// transaction. The result does not include the coinbase, so notice\n\t// the adjustments to the various lengths and indices.\n\tnumTx := len(template.Block.Transactions)\n\ttransactions := make([]json.GetBlockTemplateResultTx, 0, numTx-1)\n\ttxIndex := make(map[hash.Hash]int64, numTx)\n\tfor i, tx := range template.Block.Transactions {\n\t\ttxHash := tx.TxHash()\n\t\ttxIndex[txHash] = int64(i)\n\n\t\t// Skip the coinbase transaction.\n\t\tif i == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Create an array of 1-based indices to transactions that come\n\t\t// before this one in the transactions list which this one\n\t\t// depends on. This is necessary since the created block must\n\t\t// ensure proper ordering of the dependencies. A map is used\n\t\t// before creating the final array to prevent duplicate entries\n\t\t// when multiple inputs reference the same transaction.\n\t\tdependsMap := make(map[int64]struct{})\n\t\tfor _, txIn := range tx.TxIn {\n\t\t\tif idx, ok := txIndex[txIn.PreviousOut.Hash]; ok {\n\t\t\t\tdependsMap[idx] = struct{}{}\n\t\t\t}\n\t\t}\n\t\tdepends := make([]int64, 0, len(dependsMap))\n\t\tfor idx := range dependsMap {\n\t\t\tdepends = append(depends, idx)\n\t\t}\n\n\t\t// Serialize the transaction for later conversion to hex.\n\t\ttxBuf, err := tx.Serialize()\n\t\tif err != nil {\n\t\t\tcontext := \"Failed to serialize transaction\"\n\t\t\treturn nil, rpc.RpcInvalidError(err.Error(), context)\n\n\t\t}\n\n\t\t//TODO, bTx := btcutil.NewTx(tx)\n\t\tresultTx := json.GetBlockTemplateResultTx{\n\t\t\tData: hex.EncodeToString(txBuf),\n\t\t\tHash: txHash.String(),\n\t\t\tDepends: depends,\n\t\t\tFee: template.Fees[i],\n\t\t\tSigOps: template.SigOpCounts[i],\n\t\t\t//TODO, blockchain.GetTransactionWeight(bTx)\n\t\t\tWeight: 2000000,\n\t\t}\n\t\ttransactions = append(transactions, resultTx)\n\t}\n\n\t//parents\n\tparents := []json.GetBlockTemplateResultPt{}\n\tfor _, v := range template.Block.Parents {\n\t\tresultPt := json.GetBlockTemplateResultPt{\n\t\t\tData: hex.EncodeToString(v.Bytes()),\n\t\t\tHash: v.String(),\n\t\t}\n\t\tparents = append(parents, resultPt)\n\t}\n\t//TODO,submitOld\n\n\t// gbtMutableFields are the manipulations the server allows to be made\n\t// to block templates generated by the getblocktemplate RPC. It is\n\t// declared here to avoid the overhead of creating the slice on every\n\t// invocation for constant data.\n\tgbtMutableFields := []string{\n\t\t\"time\", \"transactions/add\", \"prevblock\", \"coinbase/append\",\n\t}\n\tgbtCapabilities := []string{\"proposal\"}\n\tdiffBig := pow.CompactToBig(template.Difficulty)\n\ttarget := fmt.Sprintf(\"%064x\", diffBig)\n\tlongPollID := encodeTemplateID(template.Block.Header.ParentRoot, w.miner.lastTemplate)\n\n\tblockFeeMap := map[int]int64{}\n\tfor coinid, val := range template.BlockFeesMap {\n\t\tblockFeeMap[int(coinid)] = val\n\t}\n\treply := json.GetBlockTemplateResult{\n\t\tStateRoot: template.Block.Header.StateRoot.String(),\n\t\tCurTime: template.Block.Header.Timestamp.Unix(),\n\t\tHeight: int64(template.Height),\n\t\tNodeInfo: version.String() + \":\" + w.miner.policy.CoinbaseGenerator.PeerID(),\n\t\tBlues: template.Blues,\n\t\tPreviousHash: template.Block.Header.ParentRoot.String(),\n\t\tWeightLimit: types.MaxBlockWeight,\n\t\tSigOpLimit: types.MaxBlockSigOpsCost,\n\t\tSizeLimit: types.MaxBlockPayload,\n\t\t//TODO,transactions\n\t\t// make([]json.GetBlockTemplateResultTx, 0, 1)\n\t\tParents: parents,\n\t\tTransactions: transactions,\n\t\tVersion: template.Block.Header.Version,\n\t\tLongPollID: longPollID,\n\t\t//TODO, submitOld\n\t\tSubmitOld: submitOld,\n\t\tPowDiffReference: json.PowDiffReference{\n\t\t\tTarget: target,\n\t\t\tNBits: strconv.FormatInt(int64(template.Difficulty), 16),\n\t\t},\n\t\tMinTime: w.miner.minTimestamp.Unix(),\n\t\tMaxTime: maxTime.Unix(),\n\t\t// gbtMutableFields\n\t\tMutable: gbtMutableFields,\n\t\tNonceRange: gbtNonceRange,\n\t\t// TODO, Capabilities\n\t\tCapabilities: gbtCapabilities,\n\t\tBlockFeesMap: blockFeeMap,\n\t\tCoinbaseVersion: params.ActiveNetParams.Params.CoinbaseConfig.GetCurrentVersion(int64(template.Height)),\n\t}\n\n\tif useCoinbaseValue {\n\t\treply.CoinbaseAux = w.coinbaseAux\n\t\tv := uint64(msgBlock.Transactions[0].TxOut[0].Amount.Value)\n\t\treply.CoinbaseValue = &v\n\t} else {\n\t\t// Ensure the template has a valid payment address associated\n\t\t// with it when a full coinbase is requested.\n\t\tif !template.ValidPayAddress {\n\t\t\treturn nil, rpc.RpcInvalidError(\"A coinbase transaction has been \" +\n\t\t\t\t\"requested, but the server has not \" +\n\t\t\t\t\"been configured with any payment \" +\n\t\t\t\t\"addresses via --miningaddr\")\n\t\t}\n\t\t// Serialize the transaction for conversion to hex.\n\t\ttx := msgBlock.Transactions[0]\n\t\ttxBuf, err := tx.Serialize()\n\t\tif err != nil {\n\t\t\tcontext := \"Failed to serialize transaction\"\n\t\t\treturn nil, rpc.RpcInvalidError(\"%s %s\", err.Error(), context)\n\t\t}\n\n\t\tresultTx := json.GetBlockTemplateResultTx{\n\t\t\tData: hex.EncodeToString(txBuf),\n\t\t\tHash: tx.TxHash().String(),\n\t\t\tDepends: []int64{},\n\t\t\tFee: template.Fees[0],\n\t\t\tSigOps: template.SigOpCounts[0],\n\t\t}\n\n\t\treply.CoinbaseTxn = &resultTx\n\t}\n\n\treturn &reply, nil\n}",
"func DALTemplate() (*template.Template, error) {\n\ttmpl, err := tmps.ReadFile(\"templates/dal.gotmp\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn getTemplate(string(tmpl))\n}",
"func (co *PlatformAppCommunication) GetEmailTemplates(xQuery PlatformAppGetEmailTemplatesXQuery) (EmailTemplates, error) {\n var (\n rawRequest *RawRequest\n response []byte\n err error\n getEmailTemplatesResponse EmailTemplates\n\t )\n\n \n\n \n \n \n \n \n \n \n \n \n\n \n \n \n \n \n //API call\n rawRequest = NewRequest(\n co.config,\n \"get\",\n fmt.Sprintf(\"/service/platform/communication/v1.0/company/%s/application/%s/email/templates\",co.CompanyID, co.ApplicationID),\n nil,\n xQuery,\n nil)\n response, err = rawRequest.Execute()\n if err != nil {\n return EmailTemplates{}, err\n\t }\n \n err = json.Unmarshal(response, &getEmailTemplatesResponse)\n if err != nil {\n return EmailTemplates{}, common.NewFDKError(err.Error())\n }\n return getEmailTemplatesResponse, nil\n \n }",
"func (s *LoadBalancedWebService) Template() (string, error) {\n\trulePriorityLambda, err := s.parser.Read(lbWebSvcRulePriorityGeneratorPath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\toutputs, err := s.addonsOutputs()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tsidecars, err := s.manifest.Sidecar.SidecarsOpts()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"convert the sidecar configuration for service %s: %w\", s.name, err)\n\t}\n\tcontent, err := s.parser.ParseLoadBalancedWebService(template.ServiceOpts{\n\t\tVariables: s.manifest.Variables,\n\t\tSecrets: s.manifest.Secrets,\n\t\tNestedStack: outputs,\n\t\tSidecars: sidecars,\n\t\tLogConfig: s.manifest.LogConfigOpts(),\n\t\tRulePriorityLambda: rulePriorityLambda.String(),\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn content.String(), nil\n}",
"func DBTemplate() (*template.Template, error) {\n\ttmpl, err := tmps.ReadFile(\"templates/db.gotmp\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn getTemplate(string(tmpl))\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Template is a free data retrieval call binding the contract method 0x6f2ddd93. Solidity: function template() view returns(address)
|
func (_ValidatorWalletCreator *ValidatorWalletCreatorSession) Template() (common.Address, error) {
return _ValidatorWalletCreator.Contract.Template(&_ValidatorWalletCreator.CallOpts)
}
|
[
"func (_ValidatorWalletCreator *ValidatorWalletCreatorCaller) Template(opts *bind.CallOpts) (common.Address, error) {\n\tvar out []interface{}\n\terr := _ValidatorWalletCreator.contract.Call(opts, &out, \"template\")\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}",
"func Template(name string, data interface{}) Response {\n\treturn &templateResponse{name, data}\n}",
"func template(client *conjurapi.Client) ([]byte, error) {\n\tid := helper.ReadMandatoryArg(2, \"templateID\", \"\", \"any valid template id within the namespace\")\n\tid = \"templates/\" + id\n\tresult, err := camapi.GetSecret(client, id)\n\n\treturn []byte(string(result) + \"\\n\"), err\n}",
"func (storage *Storage) Template(ctx context.Context, id domain.ID) (domain.Template, error) {\n\tvar template domain.Template\n\n\tconn, closer, err := storage.connection(ctx)\n\tif err != nil {\n\t\treturn template, err\n\t}\n\tdefer closer()\n\n\tentity, err := storage.exec.TemplateReader(ctx, conn).ReadByID(id)\n\tif err != nil {\n\t\treturn template, err\n\t}\n\treturn entity.Definition, nil\n}",
"func runTemplate(w http.ResponseWriter, r *http.Request, name string) {\n\tbuf := new(bytes.Buffer)\n\tT(name).Execute(buf, nil) // TODO add correct data here\n\tbuf.WriteTo(w)\n}",
"func (e *email) executeTemplate(t *template.Template) (*bytes.Buffer, error) {\n var emailBody bytes.Buffer\n if err := t.ExecuteTemplate(&emailBody, nameWithHtmlExtension(constants.IntroduceTemplateName), e); err != nil {\n return nil, err\n }\n return &emailBody, nil\n}",
"func (fe *Feedback) GetTemplates(xQuery FeedbackGetTemplatesXQuery) (TemplateGetResponse, error){\n var (\n rawRequest *RawRequest\n response []byte\n err error\n getTemplatesResponse TemplateGetResponse\n\t )\n\n \n\n \n \n \n \n \n \n \n \n \n\n \n \n \n \n \n //API call\n rawRequest = NewRequest(\n fe.config,\n \"get\",\n \"/service/application/feedback/v1.0/template/\",\n nil,\n xQuery,\n nil)\n response, err = rawRequest.Execute()\n if err != nil {\n return TemplateGetResponse{}, err\n\t }\n \n err = json.Unmarshal(response, &getTemplatesResponse)\n if err != nil {\n return TemplateGetResponse{}, common.NewFDKError(err.Error())\n }\n return getTemplatesResponse, nil\n \n }",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorTransactorSession) SetTemplate(_template common.Address) (*types.Transaction, error) {\n\treturn _ValidatorWalletCreator.Contract.SetTemplate(&_ValidatorWalletCreator.TransactOpts, _template)\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorTransactor) SetTemplate(opts *bind.TransactOpts, _template common.Address) (*types.Transaction, error) {\n\treturn _ValidatorWalletCreator.contract.Transact(opts, \"setTemplate\", _template)\n}",
"func (d *defaultTemplateBuilder) RawTemplate(name string) (string, error) {\n\tdata, err := d.execute(name)\n\treturn string(data.json), err\n}",
"func (t *Template) Lookup(name string) *Template {}",
"func TemplateApi(w http.ResponseWriter, r *http.Request) {\n\n\tif r.Method == http.MethodGet {\n\t\thtml_templates.ExecuteTemplate(w, \"display_template.tmpl\", nil)\n\t\treturn\n\t}\n\n\tif r.Method == protocol.MethodFetch {\n\t\t//create json data for the browser asnychronously\n\n\t\tid := r.FormValue(query.TEMPLATE_ID)\n\t\tfmt.Println(id)\n\t\ta, t, err := templates.CheckTemplateById(id)\n\t\tif !a || err != nil {\n\t\t\tm, err := templates.GetAllTemplates()\n\t\t\tif err != nil {\n\t\t\t\tm[query.SUCCESS] = false\n\t\t\t} else {\n\t\t\t\tm[query.SUCCESS] = true\n\t\t\t}\n\t\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.Write(b)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(\"HAHAHA\")\n\t\teid := r.FormValue(query.ELEMENT_ID)\n\t\ti, j := t.CheckElement(eid)\n\t\tif i {\n\t\t\tm := templates.CreateAPIData(t)\n\t\t\tm = m[query.TEMPLATE_ELEMENTS].([]map[string]interface{})[j]\n\t\t\tm[query.SUCCESS] = true\n\t\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.Write(b)\n\t\t\treturn\n\t\t} else {\n\t\t\tm := templates.CreateAPIData(t)\n\t\t\tfmt.Println(m)\n\t\t\tm[query.SUCCESS] = true\n\t\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.Write(b)\n\t\t\treturn\n\t\t}\n\t}\n\tif r.Method == http.MethodPost {\n\t\t//create json data for the browser asnychronously\n\n\t\tt := r.FormValue(\"t\")\n\t\tif len(t) == 0 {\n\n\t\t} else {\n\t\t\tif t == \"t\" {\n\t\t\t\t//generate new template and return the json\n\t\t\t\tfmt.Println(\"Creating a new Template now\")\n\t\t\t\tm := templates.CreateAPIData(templates.CreateNew())\n\t\t\t\tm[query.SUCCESS] = true\n\t\t\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\tw.Write(b)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif t == \"e\" {\n\t\t\t\tid := r.FormValue(query.TEMPLATE_ID)\n\t\t\t\tfmt.Println(id)\n\t\t\t\ttemp, _ := model.GetTemplateByTemplateID(id)\n\t\t\t\te, _ := temp.AddElement()\n\t\t\t\tfmt.Println(\">>>>>>>>>>>>>>>>>>>>The new element\", e)\n\t\t\t\tm := templates.CreateAPIDataElement(temp.GetID(), e)\n\t\t\t\t//generate new element for the template id and return the json\n\t\t\t\tfmt.Println(m)\n\t\t\t\tfmt.Println(temp)\n\t\t\t\tm[query.SUCCESS] = true\n\t\t\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\tw.Write(b)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\tif r.Method == http.MethodDelete {\n\t\ttid := r.FormValue(query.TEMPLATE_ID)\n\t\teid := r.FormValue(query.ELEMENT_ID)\n\t\tfmt.Println(\"Remove the Data\", tid, eid)\n\t\ta, t, err := templates.CheckTemplateById(tid)\n\t\tfmt.Println(a, t, err)\n\t\tif !a || err != nil {\n\t\t\tm := make(map[string]interface{})\n\t\t\tm[query.SUCCESS] = false\n\t\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.Write(b)\n\t\t\treturn\n\t\t}\n\t\tif a {\n\t\t\tfmt.Println(\"TEMPLATES EXISTS\")\n\t\t\tif len(eid) == 0 {\n\t\t\t\tt.Remove()\n\t\t\t\tm, _ := templates.GetAllTemplates()\n\t\t\t\tm[query.SUCCESS] = true\n\t\t\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\tw.Write(b)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ti, j := t.CheckElement(eid)\n\t\t\tif i {\n\t\t\t\tt.RemoveElementByIndex(j)\n\t\t\t\tfmt.Println(t)\n\t\t\t\tm := templates.CreateAPIData(t)\n\t\t\t\tm[query.SUCCESS] = true\n\t\t\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\tw.Write(b)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tm := make(map[string]interface{})\n\t\tm[query.SUCCESS] = false\n\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(b)\n\t\treturn\n\t}\n\n\tif r.Method == http.MethodPut {\n\t\tfmt.Println(\"\\n\\nSAVE NOW\")\n\t\ttid := r.FormValue(query.TEMPLATE_ID)\n\t\teid := r.FormValue(query.ELEMENT_ID)\n\t\tfmt.Println(tid, eid)\n\t\ta, t, err := templates.CheckTemplateById(tid)\n\t\tif !a || err != nil {\n\t\t\tm := make(map[string]interface{})\n\t\t\tm[query.SUCCESS] = false\n\t\t\tb, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.Write(b)\n\t\t\treturn\n\t\t} else {\n\t\t\ti, j := t.CheckElement(eid)\n\t\t\tif !i {\n\t\t\t\ttnm := r.FormValue(query.TEMPLATE_NAME)\n\t\t\t\tt.SetName(tnm)\n\t\t\t\tm := make(map[string]interface{})\n\t\t\t\tm[query.SUCCESS] = true\n\t\t\t\tq, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\tw.Write(q)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"HEHEHEHEHEHEHEHEHEHEH\")\n\t\t\t\tb, _ := strconv.ParseBool(r.FormValue(query.ELEMENT_CONTENT_STATIC))\n\t\t\t\tx_, _ := strconv.Atoi(r.FormValue(query.ELEMENT_POSITION_X))\n\t\t\t\ty_, _ := strconv.Atoi(r.FormValue(query.ELEMENT_POSITION_Y))\n\t\t\t\tw_, _ := strconv.Atoi(r.FormValue(query.ELEMENT_POSITION_W))\n\t\t\t\th_, _ := strconv.Atoi(r.FormValue(query.ELEMENT_POSITION_H))\n\t\t\t\tfs, _ := strconv.Atoi(r.FormValue(query.ELEMENT_FONT_SIZE))\n\t\t\t\tps, _ := strconv.Atoi(r.FormValue(query.ELEMENT_PIXEL_SIZE))\n\n\t\t\t\tfmt.Println(x_, y_, w_, h_)\n\t\t\t\tt.SaveSettings(\n\t\t\t\t\tj,\n\t\t\t\t\tr.FormValue(query.TEMPLATE_NAME),\n\t\t\t\t\tr.FormValue(query.ELEMENT_ID),\n\t\t\t\t\tr.FormValue(query.ELEMENT_CONTENT),\n\t\t\t\t\tb,\n\t\t\t\t\tx_,\n\t\t\t\t\ty_,\n\t\t\t\t\tw_,\n\t\t\t\t\th_,\n\t\t\t\t\tr.FormValue(query.ELEMENT_COLOR),\n\t\t\t\t\tr.FormValue(query.ELEMENT_FILL_COLOR),\n\t\t\t\t\tfs,\n\t\t\t\t\tps,\n\t\t\t\t\tr.FormValue(query.ELEMENT_PIXEL_STYLE),\n\t\t\t\t\tr.FormValue(query.ELEMENT_FORM))\n\n\t\t\t\tm := make(map[string]interface{})\n\t\t\t\tm[query.SUCCESS] = true\n\t\t\t\tq, _ := json.MarshalIndent(m, \"\", \" \")\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\tw.Write(q)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n}",
"func (p *partial) template() (*Template, error) {\n\tif p.tpl == nil {\n\t\tvar err error\n\n\t\tp.tpl, err = Parse(p.source)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn p.tpl, nil\n}",
"func (t AbstractTags) Template() string {\n\tvar buf bytes.Buffer\n\tfor i, tag := range t {\n\t\tif i == 0 {\n\t\t\tbuf.Write([]byte(fmt.Sprintf(\"%v=%v-%%v,\", tag.Key, tag.Value)))\n\t\t} else {\n\t\t\tbuf.Write([]byte(fmt.Sprintf(\"%v=%v,\", tag.Key, tag.Value)))\n\t\t}\n\t}\n\n\tb := buf.Bytes()\n\tb = b[0 : len(b)-1]\n\n\treturn string(b)\n}",
"func (w *GBTWorker) getResult(useCoinbaseValue bool, submitOld *bool) (*json.GetBlockTemplateResult, error) {\n\t// Ensure the timestamps are still in valid range for the template.\n\t// This should really only ever happen if the local clock is changed\n\t// after the template is generated, but it's important to avoid serving\n\t// invalid block templates.\n\ttemplate := w.miner.template\n\tif template == nil {\n\t\treturn nil, fmt.Errorf(\"No template\")\n\t}\n\tmsgBlock := template.Block\n\theader := &msgBlock.Header\n\tadjustedTime := w.miner.timeSource.AdjustedTime()\n\tmaxTime := adjustedTime.Add(time.Second * blockchain.MaxTimeOffsetSeconds)\n\tif header.Timestamp.After(maxTime) {\n\t\treturn nil, rpc.RpcInvalidError(\"The template time is after the maximum allowed time for a block - template time %v, maximum time %v\", adjustedTime, maxTime)\n\t}\n\t// Convert each transaction in the block template to a template result\n\t// transaction. The result does not include the coinbase, so notice\n\t// the adjustments to the various lengths and indices.\n\tnumTx := len(template.Block.Transactions)\n\ttransactions := make([]json.GetBlockTemplateResultTx, 0, numTx-1)\n\ttxIndex := make(map[hash.Hash]int64, numTx)\n\tfor i, tx := range template.Block.Transactions {\n\t\ttxHash := tx.TxHash()\n\t\ttxIndex[txHash] = int64(i)\n\n\t\t// Skip the coinbase transaction.\n\t\tif i == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Create an array of 1-based indices to transactions that come\n\t\t// before this one in the transactions list which this one\n\t\t// depends on. This is necessary since the created block must\n\t\t// ensure proper ordering of the dependencies. A map is used\n\t\t// before creating the final array to prevent duplicate entries\n\t\t// when multiple inputs reference the same transaction.\n\t\tdependsMap := make(map[int64]struct{})\n\t\tfor _, txIn := range tx.TxIn {\n\t\t\tif idx, ok := txIndex[txIn.PreviousOut.Hash]; ok {\n\t\t\t\tdependsMap[idx] = struct{}{}\n\t\t\t}\n\t\t}\n\t\tdepends := make([]int64, 0, len(dependsMap))\n\t\tfor idx := range dependsMap {\n\t\t\tdepends = append(depends, idx)\n\t\t}\n\n\t\t// Serialize the transaction for later conversion to hex.\n\t\ttxBuf, err := tx.Serialize()\n\t\tif err != nil {\n\t\t\tcontext := \"Failed to serialize transaction\"\n\t\t\treturn nil, rpc.RpcInvalidError(err.Error(), context)\n\n\t\t}\n\n\t\t//TODO, bTx := btcutil.NewTx(tx)\n\t\tresultTx := json.GetBlockTemplateResultTx{\n\t\t\tData: hex.EncodeToString(txBuf),\n\t\t\tHash: txHash.String(),\n\t\t\tDepends: depends,\n\t\t\tFee: template.Fees[i],\n\t\t\tSigOps: template.SigOpCounts[i],\n\t\t\t//TODO, blockchain.GetTransactionWeight(bTx)\n\t\t\tWeight: 2000000,\n\t\t}\n\t\ttransactions = append(transactions, resultTx)\n\t}\n\n\t//parents\n\tparents := []json.GetBlockTemplateResultPt{}\n\tfor _, v := range template.Block.Parents {\n\t\tresultPt := json.GetBlockTemplateResultPt{\n\t\t\tData: hex.EncodeToString(v.Bytes()),\n\t\t\tHash: v.String(),\n\t\t}\n\t\tparents = append(parents, resultPt)\n\t}\n\t//TODO,submitOld\n\n\t// gbtMutableFields are the manipulations the server allows to be made\n\t// to block templates generated by the getblocktemplate RPC. It is\n\t// declared here to avoid the overhead of creating the slice on every\n\t// invocation for constant data.\n\tgbtMutableFields := []string{\n\t\t\"time\", \"transactions/add\", \"prevblock\", \"coinbase/append\",\n\t}\n\tgbtCapabilities := []string{\"proposal\"}\n\tdiffBig := pow.CompactToBig(template.Difficulty)\n\ttarget := fmt.Sprintf(\"%064x\", diffBig)\n\tlongPollID := encodeTemplateID(template.Block.Header.ParentRoot, w.miner.lastTemplate)\n\n\tblockFeeMap := map[int]int64{}\n\tfor coinid, val := range template.BlockFeesMap {\n\t\tblockFeeMap[int(coinid)] = val\n\t}\n\treply := json.GetBlockTemplateResult{\n\t\tStateRoot: template.Block.Header.StateRoot.String(),\n\t\tCurTime: template.Block.Header.Timestamp.Unix(),\n\t\tHeight: int64(template.Height),\n\t\tNodeInfo: version.String() + \":\" + w.miner.policy.CoinbaseGenerator.PeerID(),\n\t\tBlues: template.Blues,\n\t\tPreviousHash: template.Block.Header.ParentRoot.String(),\n\t\tWeightLimit: types.MaxBlockWeight,\n\t\tSigOpLimit: types.MaxBlockSigOpsCost,\n\t\tSizeLimit: types.MaxBlockPayload,\n\t\t//TODO,transactions\n\t\t// make([]json.GetBlockTemplateResultTx, 0, 1)\n\t\tParents: parents,\n\t\tTransactions: transactions,\n\t\tVersion: template.Block.Header.Version,\n\t\tLongPollID: longPollID,\n\t\t//TODO, submitOld\n\t\tSubmitOld: submitOld,\n\t\tPowDiffReference: json.PowDiffReference{\n\t\t\tTarget: target,\n\t\t\tNBits: strconv.FormatInt(int64(template.Difficulty), 16),\n\t\t},\n\t\tMinTime: w.miner.minTimestamp.Unix(),\n\t\tMaxTime: maxTime.Unix(),\n\t\t// gbtMutableFields\n\t\tMutable: gbtMutableFields,\n\t\tNonceRange: gbtNonceRange,\n\t\t// TODO, Capabilities\n\t\tCapabilities: gbtCapabilities,\n\t\tBlockFeesMap: blockFeeMap,\n\t\tCoinbaseVersion: params.ActiveNetParams.Params.CoinbaseConfig.GetCurrentVersion(int64(template.Height)),\n\t}\n\n\tif useCoinbaseValue {\n\t\treply.CoinbaseAux = w.coinbaseAux\n\t\tv := uint64(msgBlock.Transactions[0].TxOut[0].Amount.Value)\n\t\treply.CoinbaseValue = &v\n\t} else {\n\t\t// Ensure the template has a valid payment address associated\n\t\t// with it when a full coinbase is requested.\n\t\tif !template.ValidPayAddress {\n\t\t\treturn nil, rpc.RpcInvalidError(\"A coinbase transaction has been \" +\n\t\t\t\t\"requested, but the server has not \" +\n\t\t\t\t\"been configured with any payment \" +\n\t\t\t\t\"addresses via --miningaddr\")\n\t\t}\n\t\t// Serialize the transaction for conversion to hex.\n\t\ttx := msgBlock.Transactions[0]\n\t\ttxBuf, err := tx.Serialize()\n\t\tif err != nil {\n\t\t\tcontext := \"Failed to serialize transaction\"\n\t\t\treturn nil, rpc.RpcInvalidError(\"%s %s\", err.Error(), context)\n\t\t}\n\n\t\tresultTx := json.GetBlockTemplateResultTx{\n\t\t\tData: hex.EncodeToString(txBuf),\n\t\t\tHash: tx.TxHash().String(),\n\t\t\tDepends: []int64{},\n\t\t\tFee: template.Fees[0],\n\t\t\tSigOps: template.SigOpCounts[0],\n\t\t}\n\n\t\treply.CoinbaseTxn = &resultTx\n\t}\n\n\treturn &reply, nil\n}",
"func DALTemplate() (*template.Template, error) {\n\ttmpl, err := tmps.ReadFile(\"templates/dal.gotmp\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn getTemplate(string(tmpl))\n}",
"func (co *PlatformAppCommunication) GetEmailTemplates(xQuery PlatformAppGetEmailTemplatesXQuery) (EmailTemplates, error) {\n var (\n rawRequest *RawRequest\n response []byte\n err error\n getEmailTemplatesResponse EmailTemplates\n\t )\n\n \n\n \n \n \n \n \n \n \n \n \n\n \n \n \n \n \n //API call\n rawRequest = NewRequest(\n co.config,\n \"get\",\n fmt.Sprintf(\"/service/platform/communication/v1.0/company/%s/application/%s/email/templates\",co.CompanyID, co.ApplicationID),\n nil,\n xQuery,\n nil)\n response, err = rawRequest.Execute()\n if err != nil {\n return EmailTemplates{}, err\n\t }\n \n err = json.Unmarshal(response, &getEmailTemplatesResponse)\n if err != nil {\n return EmailTemplates{}, common.NewFDKError(err.Error())\n }\n return getEmailTemplatesResponse, nil\n \n }",
"func (s *LoadBalancedWebService) Template() (string, error) {\n\trulePriorityLambda, err := s.parser.Read(lbWebSvcRulePriorityGeneratorPath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\toutputs, err := s.addonsOutputs()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tsidecars, err := s.manifest.Sidecar.SidecarsOpts()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"convert the sidecar configuration for service %s: %w\", s.name, err)\n\t}\n\tcontent, err := s.parser.ParseLoadBalancedWebService(template.ServiceOpts{\n\t\tVariables: s.manifest.Variables,\n\t\tSecrets: s.manifest.Secrets,\n\t\tNestedStack: outputs,\n\t\tSidecars: sidecars,\n\t\tLogConfig: s.manifest.LogConfigOpts(),\n\t\tRulePriorityLambda: rulePriorityLambda.String(),\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn content.String(), nil\n}",
"func DBTemplate() (*template.Template, error) {\n\ttmpl, err := tmps.ReadFile(\"templates/db.gotmp\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn getTemplate(string(tmpl))\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetTemplate is a paid mutator transaction binding the contract method 0x89c716d1. Solidity: function setTemplate(address _template) returns()
|
func (_ValidatorWalletCreator *ValidatorWalletCreatorTransactor) SetTemplate(opts *bind.TransactOpts, _template common.Address) (*types.Transaction, error) {
return _ValidatorWalletCreator.contract.Transact(opts, "setTemplate", _template)
}
|
[
"func (_ValidatorWalletCreator *ValidatorWalletCreatorTransactorSession) SetTemplate(_template common.Address) (*types.Transaction, error) {\n\treturn _ValidatorWalletCreator.Contract.SetTemplate(&_ValidatorWalletCreator.TransactOpts, _template)\n}",
"func (req *ReqStub) SetTemplate(template string) {\n\treq.Template = template\n}",
"func (_RollupCreator *RollupCreatorTransactor) SetTemplates(opts *bind.TransactOpts, _rollupTemplate common.Address, _challengeFactory common.Address, _nodeFactory common.Address) (*types.Transaction, error) {\n\treturn _RollupCreator.contract.Transact(opts, \"setTemplates\", _rollupTemplate, _challengeFactory, _nodeFactory)\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorSession) Template() (common.Address, error) {\n\treturn _ValidatorWalletCreator.Contract.Template(&_ValidatorWalletCreator.CallOpts)\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorCaller) Template(opts *bind.CallOpts) (common.Address, error) {\n\tvar out []interface{}\n\terr := _ValidatorWalletCreator.contract.Call(opts, &out, \"template\")\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}",
"func (_RollupCreator *RollupCreatorSession) SetTemplates(_rollupTemplate common.Address, _challengeFactory common.Address, _nodeFactory common.Address) (*types.Transaction, error) {\n\treturn _RollupCreator.Contract.SetTemplates(&_RollupCreator.TransactOpts, _rollupTemplate, _challengeFactory, _nodeFactory)\n}",
"func (qorWidgetSetting *QorWidgetSetting) SetTemplate(template string) {\n\tqorWidgetSetting.Template = template\n}",
"func (anka *ankaDB) SetQueryTemplate(templateName string, request string) error {\n\terr := anka.logic.SetQueryTemplate(templateName, request)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (m *DeviceManagementConfigurationSettingTemplate) SetSettingInstanceTemplate(value DeviceManagementConfigurationSettingInstanceTemplateable)() {\n err := m.GetBackingStore().Set(\"settingInstanceTemplate\", value)\n if err != nil {\n panic(err)\n }\n}",
"func setConfigTemplate(customTemplate string) error {\n\ttmpl := template.New(\"clientConfigFileTemplate\")\n\tvar err error\n\tif configTemplate, err = tmpl.Parse(customTemplate); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (m *WorkOrderMutation) ResetTemplate() {\n\tm.template = nil\n\tm.clearedtemplate = false\n}",
"func SetTemplateBody(name, body string) InstallationChange {\n\treturn InstallationChange{Op: InstallationChangeReplace, Path: fmt.Sprintf(\"/templates/%s/body\", name), Value: body}\n}",
"func SetTemplateHeaders(name string, headers map[string]string) InstallationChange {\n\traw, _ := json.Marshal(headers)\n\treturn InstallationChange{Op: InstallationChangeReplace, Path: fmt.Sprintf(\"/templates/%s/headers\", name), Value: string(raw)}\n}",
"func (m *ManagementAction) SetReferenceTemplateVersion(value *int32)() {\n err := m.GetBackingStore().Set(\"referenceTemplateVersion\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (m *FlowInstanceMutation) ResetTemplate() {\n\tm.template = nil\n\tm.clearedtemplate = false\n}",
"func SetTemplates(templates map[string]InstallationTemplate) InstallationChange {\n\traw, _ := json.Marshal(templates)\n\treturn InstallationChange{Op: InstallationChangeReplace, Path: \"/templates\", Value: string(raw)}\n}",
"func (m *BusinessFlow) SetPolicyTemplateId(value *string)() {\n err := m.GetBackingStore().Set(\"policyTemplateId\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (e *TemplateEmail) SetTemplateContent(content ...string) error {\n\tif e.TemplateName == \"\" {\n\t\treturn ErrNoTemplateName\n\t}\n\tcontentArr, err := formatTemplateContent(content)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.TemplateContent = contentArr\n\treturn nil\n}",
"func (h *TextHandler) SetTextTemplate(t *template.Template) {\n\th.mtx.Lock()\n\tdefer h.mtx.Unlock()\n\th.template = t\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetTemplate is a paid mutator transaction binding the contract method 0x89c716d1. Solidity: function setTemplate(address _template) returns()
|
func (_ValidatorWalletCreator *ValidatorWalletCreatorTransactorSession) SetTemplate(_template common.Address) (*types.Transaction, error) {
return _ValidatorWalletCreator.Contract.SetTemplate(&_ValidatorWalletCreator.TransactOpts, _template)
}
|
[
"func (_ValidatorWalletCreator *ValidatorWalletCreatorTransactor) SetTemplate(opts *bind.TransactOpts, _template common.Address) (*types.Transaction, error) {\n\treturn _ValidatorWalletCreator.contract.Transact(opts, \"setTemplate\", _template)\n}",
"func (req *ReqStub) SetTemplate(template string) {\n\treq.Template = template\n}",
"func (_RollupCreator *RollupCreatorTransactor) SetTemplates(opts *bind.TransactOpts, _rollupTemplate common.Address, _challengeFactory common.Address, _nodeFactory common.Address) (*types.Transaction, error) {\n\treturn _RollupCreator.contract.Transact(opts, \"setTemplates\", _rollupTemplate, _challengeFactory, _nodeFactory)\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorSession) Template() (common.Address, error) {\n\treturn _ValidatorWalletCreator.Contract.Template(&_ValidatorWalletCreator.CallOpts)\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorCaller) Template(opts *bind.CallOpts) (common.Address, error) {\n\tvar out []interface{}\n\terr := _ValidatorWalletCreator.contract.Call(opts, &out, \"template\")\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}",
"func (_RollupCreator *RollupCreatorSession) SetTemplates(_rollupTemplate common.Address, _challengeFactory common.Address, _nodeFactory common.Address) (*types.Transaction, error) {\n\treturn _RollupCreator.Contract.SetTemplates(&_RollupCreator.TransactOpts, _rollupTemplate, _challengeFactory, _nodeFactory)\n}",
"func (qorWidgetSetting *QorWidgetSetting) SetTemplate(template string) {\n\tqorWidgetSetting.Template = template\n}",
"func (anka *ankaDB) SetQueryTemplate(templateName string, request string) error {\n\terr := anka.logic.SetQueryTemplate(templateName, request)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (m *DeviceManagementConfigurationSettingTemplate) SetSettingInstanceTemplate(value DeviceManagementConfigurationSettingInstanceTemplateable)() {\n err := m.GetBackingStore().Set(\"settingInstanceTemplate\", value)\n if err != nil {\n panic(err)\n }\n}",
"func setConfigTemplate(customTemplate string) error {\n\ttmpl := template.New(\"clientConfigFileTemplate\")\n\tvar err error\n\tif configTemplate, err = tmpl.Parse(customTemplate); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (m *WorkOrderMutation) ResetTemplate() {\n\tm.template = nil\n\tm.clearedtemplate = false\n}",
"func SetTemplateBody(name, body string) InstallationChange {\n\treturn InstallationChange{Op: InstallationChangeReplace, Path: fmt.Sprintf(\"/templates/%s/body\", name), Value: body}\n}",
"func SetTemplateHeaders(name string, headers map[string]string) InstallationChange {\n\traw, _ := json.Marshal(headers)\n\treturn InstallationChange{Op: InstallationChangeReplace, Path: fmt.Sprintf(\"/templates/%s/headers\", name), Value: string(raw)}\n}",
"func (m *ManagementAction) SetReferenceTemplateVersion(value *int32)() {\n err := m.GetBackingStore().Set(\"referenceTemplateVersion\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (m *FlowInstanceMutation) ResetTemplate() {\n\tm.template = nil\n\tm.clearedtemplate = false\n}",
"func SetTemplates(templates map[string]InstallationTemplate) InstallationChange {\n\traw, _ := json.Marshal(templates)\n\treturn InstallationChange{Op: InstallationChangeReplace, Path: \"/templates\", Value: string(raw)}\n}",
"func (m *BusinessFlow) SetPolicyTemplateId(value *string)() {\n err := m.GetBackingStore().Set(\"policyTemplateId\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (e *TemplateEmail) SetTemplateContent(content ...string) error {\n\tif e.TemplateName == \"\" {\n\t\treturn ErrNoTemplateName\n\t}\n\tcontentArr, err := formatTemplateContent(content)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.TemplateContent = contentArr\n\treturn nil\n}",
"func (h *TextHandler) SetTextTemplate(t *template.Template) {\n\th.mtx.Lock()\n\tdefer h.mtx.Unlock()\n\th.template = t\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. Solidity: function transferOwnership(address newOwner) returns()
|
func (_ValidatorWalletCreator *ValidatorWalletCreatorTransactor) TransferOwnership(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) {
return _ValidatorWalletCreator.contract.Transact(opts, "transferOwnership", newOwner)
}
|
[
"func (_TrueUSD *TrueUSDTransactor) TransferOwnership(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) {\n\treturn _TrueUSD.contract.Transact(opts, \"transferOwnership\", newOwner)\n}",
"func (t *Token) TransferOwnership(stub shim.ChaincodeStubInterface,\n\targs []string,\n\tgetOwner func(shim.ChaincodeStubInterface) (string, error),\n) error {\n\tcallerID, err := GetCallerID(stub)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttokenOwnerID, err := getOwner(stub)\n\tif err := CheckCallerIsOwner(callerID, tokenOwnerID); err != nil {\n\t\treturn err\n\t}\n\n\tnewOwnerID := args[0]\n\n\terr = stub.PutState(\"owner\", []byte(newOwnerID))\n\treturn err\n}",
"func (_Berry *BerryTransactor) ProposeOwnership(opts *bind.TransactOpts, _pendingOwner common.Address) (*types.Transaction, error) {\n\treturn _Berry.contract.Transact(opts, \"proposeOwnership\", _pendingOwner)\n}",
"func (_Bank *BankTransactor) TransferOwner(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) {\n\treturn _Bank.contract.Transact(opts, \"transferOwner\", newOwner)\n}",
"func (_Wrapper *WrapperTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Wrapper.contract.Transact(opts, \"acceptOwnership\")\n}",
"func (_Bank *BankTransactorSession) TransferOwner(newOwner common.Address) (*types.Transaction, error) {\n\treturn _Bank.Contract.TransferOwner(&_Bank.TransactOpts, newOwner)\n}",
"func (chudrive *Chudrive) TransferOwnership(fileID, email string) error {\n\tperm := drive.Permission{\n\t\tEmailAddress: email,\n\t\tRole: \"owner\",\n\t\tType: \"user\",\n\t}\n\t_, err := chudrive.Drive.Permissions.Create(fileID, &perm).TransferOwnership(true).Do()\n\treturn err\n}",
"func (_Berry *BerryTransactorSession) ProposeOwnership(_pendingOwner common.Address) (*types.Transaction, error) {\n\treturn _Berry.Contract.ProposeOwnership(&_Berry.TransactOpts, _pendingOwner)\n}",
"func (_Registry *RegistryCaller) Ownership(opts *bind.CallOpts, arg0 common.Address) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Registry.contract.Call(opts, out, \"ownership\", arg0)\n\treturn *ret0, err\n}",
"func (_Registry *RegistryTransactor) AssignOwnership(opts *bind.TransactOpts, owner common.Address, property common.Address) (*types.Transaction, error) {\n\treturn _Registry.contract.Transact(opts, \"assignOwnership\", owner, property)\n}",
"func (_CardOwnership *CardOwnershipTransactor) SetOwner(opts *bind.TransactOpts, _newOwner common.Address) (*types.Transaction, error) {\n\treturn _CardOwnership.contract.Transact(opts, \"setOwner\", _newOwner)\n}",
"func (_Asset *AssetCaller) Ownership(opts *bind.CallOpts) (string, error) {\n\tvar (\n\t\tret0 = new(string)\n\t)\n\tout := ret0\n\terr := _Asset.contract.Call(opts, out, \"Ownership\")\n\treturn *ret0, err\n}",
"func (_CardOwnership *CardOwnershipTransactor) Transfer(opts *bind.TransactOpts, _to common.Address, _cardId *big.Int) (*types.Transaction, error) {\n\treturn _CardOwnership.contract.Transact(opts, \"transfer\", _to, _cardId)\n}",
"func (a *DefaultApiService) FundOwnership(ctx _context.Context) ApiFundOwnershipRequest {\n\treturn ApiFundOwnershipRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}",
"func (_Registry *RegistrySession) AssignOwnership(owner common.Address, property common.Address) (*types.Transaction, error) {\n\treturn _Registry.Contract.AssignOwnership(&_Registry.TransactOpts, owner, property)\n}",
"func (_Issuer *IssuerTransactor) ChangeOwner(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) {\n\treturn _Issuer.contract.Transact(opts, \"changeOwner\", newOwner)\n}",
"func DeployCardOwnership(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *CardOwnership, error) {\n\tparsed, err := abi.JSON(strings.NewReader(CardOwnershipABI))\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\taddress, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(CardOwnershipBin), backend)\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\treturn address, tx, &CardOwnership{CardOwnershipCaller: CardOwnershipCaller{contract: contract}, CardOwnershipTransactor: CardOwnershipTransactor{contract: contract}, CardOwnershipFilterer: CardOwnershipFilterer{contract: contract}}, nil\n}",
"func (_Dai *DaiTransactor) SetOwner(opts *bind.TransactOpts, owner_ common.Address) (*types.Transaction, error) {\n\treturn _Dai.contract.Transact(opts, \"setOwner\", owner_)\n}",
"func (_CrossEther *CrossEtherTransactor) OwnerPermissions(opts *bind.TransactOpts, newOwner common.Address, isOwner bool) (*types.Transaction, error) {\n\treturn _CrossEther.contract.Transact(opts, \"ownerPermissions\", newOwner, isOwner)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
FilterTemplateUpdated is a free log retrieval operation binding the contract event 0x6eb26f176dd9180849dd4874d3530de0e5c1f62a6e6798d34e3abfc11f1db2cc. Solidity: event TemplateUpdated()
|
func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) FilterTemplateUpdated(opts *bind.FilterOpts) (*ValidatorWalletCreatorTemplateUpdatedIterator, error) {
logs, sub, err := _ValidatorWalletCreator.contract.FilterLogs(opts, "TemplateUpdated")
if err != nil {
return nil, err
}
return &ValidatorWalletCreatorTemplateUpdatedIterator{contract: _ValidatorWalletCreator.contract, event: "TemplateUpdated", logs: logs, sub: sub}, nil
}
|
[
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) WatchTemplateUpdated(opts *bind.WatchOpts, sink chan<- *ValidatorWalletCreatorTemplateUpdated) (event.Subscription, error) {\n\n\tlogs, sub, err := _ValidatorWalletCreator.contract.WatchLogs(opts, \"TemplateUpdated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(ValidatorWalletCreatorTemplateUpdated)\n\t\t\t\tif err := _ValidatorWalletCreator.contract.UnpackLog(event, \"TemplateUpdated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) ParseTemplateUpdated(log types.Log) (*ValidatorWalletCreatorTemplateUpdated, error) {\n\tevent := new(ValidatorWalletCreatorTemplateUpdated)\n\tif err := _ValidatorWalletCreator.contract.UnpackLog(event, \"TemplateUpdated\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func (_DNSResolverContract *DNSResolverContractFilterer) FilterUpdated(opts *bind.FilterOpts) (*DNSResolverContractUpdatedIterator, error) {\n\n\tlogs, sub, err := _DNSResolverContract.contract.FilterLogs(opts, \"Updated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &DNSResolverContractUpdatedIterator{contract: _DNSResolverContract.contract, event: \"Updated\", logs: logs, sub: sub}, nil\n}",
"func (fe *PlatformAppFeedback) UpdateTemplateStatus(ID string, body UpdateTemplateStatusRequest) (UpdateResponse, error) {\n var (\n rawRequest *RawRequest\n response []byte\n err error\n updateTemplateStatusResponse UpdateResponse\n\t )\n\n \n \n \n \n \n\n \n\n \n \n \n \n \n \n \n //Parse req body to map\n var reqBody map[string]interface{}\n reqBodyJSON, err := json.Marshal(body)\n if err != nil {\n \n return UpdateResponse{}, common.NewFDKError(err.Error())\n }\n err = json.Unmarshal([]byte(reqBodyJSON), &reqBody)\n if err != nil {\n \n return UpdateResponse{}, common.NewFDKError(err.Error()) \n }\n \n //API call\n rawRequest = NewRequest(\n fe.config,\n \"patch\",\n fmt.Sprintf(\"/service/platform/feedback/v1.0/company/%s/application/%s/templates/%s/status/\",fe.CompanyID, fe.ApplicationID, ID),\n nil,\n nil,\n reqBody)\n response, err = rawRequest.Execute()\n if err != nil {\n return UpdateResponse{}, err\n\t }\n \n err = json.Unmarshal(response, &updateTemplateStatusResponse)\n if err != nil {\n return UpdateResponse{}, common.NewFDKError(err.Error())\n }\n return updateTemplateStatusResponse, nil\n \n }",
"func (s *Session) OnReadTemplate(fs *NFv9TemplateFlowSet) {\n\tfor _, t := range fs.Templates {\n\t\ttid := int(t.TemplateID)\n\t\tif _, ok := s.templates[tid]; !ok {\n\t\t\ts.templates[tid] = t\n\t\t}\n\t}\n}",
"func (c *Client) SourceRepositoryTemplateUpdate(r string, payload *SourceRepositoryTemplateStruct) error {\n\tvar err error\n\treturn err\n}",
"func (s FilterItem) TemplateString(tmpl string) (string, error) {\n\tvar b []byte\n\tvar err error\n\tif b, err = s.TemplateBytes(tmpl); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(b), nil\n}",
"func (_DNSResolverContract *DNSResolverContractFilterer) WatchUpdated(opts *bind.WatchOpts, sink chan<- *DNSResolverContractUpdated) (event.Subscription, error) {\n\n\tlogs, sub, err := _DNSResolverContract.contract.WatchLogs(opts, \"Updated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(DNSResolverContractUpdated)\n\t\t\t\tif err := _DNSResolverContract.contract.UnpackLog(event, \"Updated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (s *Status) HandlerTmpUpdate() int64 {\n\treturn s.pStatus.handlerTmpUpdate // Handler_tmp_update\n}",
"func (s *SyncStore) UpdateTemplate(category, fileName string, body []byte, mimeType string, meta map[string]interface{}) error {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.Store.UpdateTemplate(category, fileName, body, mimeType, meta)\n}",
"func (r *Reconciler) activeTemplateUpdateRequests(logger logr.Logger, tier *toolchainv1alpha1.NSTemplateTier) (int, bool, error) {\n\t// fetch the list of TemplateUpdateRequest owned by the NSTemplateTier tier\n\ttemplateUpdateRequests := toolchainv1alpha1.TemplateUpdateRequestList{}\n\tif err := r.Client.List(context.TODO(), &templateUpdateRequests, client.MatchingLabels{\n\t\ttoolchainv1alpha1.NSTemplateTierNameLabelKey: tier.Name,\n\t}); err != nil {\n\t\treturn -1, false, err\n\t}\n\n\t// count non-deleted templateUpdateRequest items\n\titems := make(map[string]*metav1.Time, len(templateUpdateRequests.Items))\n\tfor _, item := range templateUpdateRequests.Items {\n\t\titems[item.Name] = item.DeletionTimestamp\n\t}\n\tlogger.Info(\"checking templateUpdateRequests\", \"items\", items)\n\tcount := 0\n\tfor _, tur := range templateUpdateRequests.Items {\n\t\tlogger.Info(\"checking templateUpdateRequest\", \"name\", tur.Name, \"deleted\", util.IsBeingDeleted(&tur))\n\t\tif util.IsBeingDeleted(&tur) {\n\t\t\t// ignore when already being deleted\n\t\t\tlogger.Info(\"skipping TemplateUpdateRequest as it is already being deleted\", \"name\", tur.Name)\n\t\t\tcontinue\n\t\t}\n\n\t\t// delete when in `complete=true` (reason=updated) or when in `complete=false/reason=failed` status conditions\n\t\tif condition.IsTrue(tur.Status.Conditions, toolchainv1alpha1.TemplateUpdateRequestComplete) ||\n\t\t\t(condition.IsFalseWithReason(tur.Status.Conditions, toolchainv1alpha1.TemplateUpdateRequestComplete, toolchainv1alpha1.TemplateUpdateRequestUnableToUpdateReason) &&\n\t\t\t\tmaxUpdateFailuresReached(tur, r.Config.GetMasterUserRecordUpdateFailureThreshold())) {\n\t\t\tif err := r.incrementCounters(logger, tier, tur); err != nil {\n\t\t\t\treturn -1, false, err\n\t\t\t}\n\t\t\tif err := r.Client.Delete(context.TODO(), &tur); err != nil {\n\t\t\t\tif errors.IsNotFound(err) {\n\t\t\t\t\tlogger.Info(\"skipping failed TemplateUpdateRequest as it was already deleted\", \"name\", tur.Name)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treturn -1, false, errs.Wrapf(err, \"unable to delete the TemplateUpdateRequest resource '%s'\", tur.Name)\n\t\t\t}\n\t\t\t// will exit the reconcile loop\n\t\t\treturn -1, true, nil\n\t\t}\n\t\tcount++\n\t}\n\tlogger.Info(\"found active TemplateUpdateRequests for the current tier\", \"count\", count)\n\treturn count, false, nil\n}",
"func (_Permissioning *PermissioningFilterer) FilterRegistryUpdated(opts *bind.FilterOpts) (*PermissioningRegistryUpdatedIterator, error) {\n\n\tlogs, sub, err := _Permissioning.contract.FilterLogs(opts, \"RegistryUpdated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &PermissioningRegistryUpdatedIterator{contract: _Permissioning.contract, event: \"RegistryUpdated\", logs: logs, sub: sub}, nil\n}",
"func (filter *filter) LastTimeUpdated() time.Time {\n\tfilterFilePath := filter.Path()\n\ts, err := os.Stat(filterFilePath)\n\tif os.IsNotExist(err) {\n\t\t// if the filter file does not exist, return 0001-01-01\n\t\treturn time.Time{}\n\t}\n\n\tif err != nil {\n\t\t// if the filter file does not exist, return 0001-01-01\n\t\treturn time.Time{}\n\t}\n\n\t// filter file modified time\n\treturn s.ModTime()\n}",
"func (c *Softlayer) TemplatesByFilter(filter *Filter) (Templates, error) {\n\treq := &ResourceRequest{\n\t\tName: \"Template\",\n\t\tPath: \"SoftLayer_Account/getBlockDeviceTemplateGroups.json\",\n\t\tFilter: filter,\n\t\tObjectMask: templateMask,\n\t\tResource: &Templates{},\n\t}\n\tif err := c.get(req); err != nil {\n\t\treturn nil, err\n\t}\n\treturn *req.Resource.(*Templates), nil\n}",
"func (g *tLogger) Update(t *testing.T) {\n\tg.mu.Lock()\n\tdefer g.mu.Unlock()\n\tif !g.initialized {\n\t\tgrpclog.SetLoggerV2(TLogger)\n\t\tg.initialized = true\n\t}\n\tg.t = t\n\tg.start = time.Now()\n\tg.errors = map[*regexp.Regexp]int{}\n}",
"func (svc *SES) RawUpdateTemplate(ctx context.Context, in *SDK.UpdateTemplateInput) (*SDK.UpdateTemplateResponse, error) {\n\treturn svc.client.UpdateTemplateRequest(in).Send(ctx)\n}",
"func (_SystemConfig *SystemConfigFilterer) FilterConfigUpdate(opts *bind.FilterOpts, version []*big.Int, updateType []uint8) (*SystemConfigConfigUpdateIterator, error) {\n\n\tvar versionRule []interface{}\n\tfor _, versionItem := range version {\n\t\tversionRule = append(versionRule, versionItem)\n\t}\n\tvar updateTypeRule []interface{}\n\tfor _, updateTypeItem := range updateType {\n\t\tupdateTypeRule = append(updateTypeRule, updateTypeItem)\n\t}\n\n\tlogs, sub, err := _SystemConfig.contract.FilterLogs(opts, \"ConfigUpdate\", versionRule, updateTypeRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &SystemConfigConfigUpdateIterator{contract: _SystemConfig.contract, event: \"ConfigUpdate\", logs: logs, sub: sub}, nil\n}",
"func (_Comptroller *ComptrollerFilterer) FilterCompSpeedUpdated(opts *bind.FilterOpts, cToken []common.Address) (*ComptrollerCompSpeedUpdatedIterator, error) {\n\n\tvar cTokenRule []interface{}\n\tfor _, cTokenItem := range cToken {\n\t\tcTokenRule = append(cTokenRule, cTokenItem)\n\t}\n\n\tlogs, sub, err := _Comptroller.contract.FilterLogs(opts, \"CompSpeedUpdated\", cTokenRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ComptrollerCompSpeedUpdatedIterator{contract: _Comptroller.contract, event: \"CompSpeedUpdated\", logs: logs, sub: sub}, nil\n}",
"func (c *Client) OperationTemplateUpdate(r string, payload *OperationTemplateStruct) error {\n\tvar err error\n\treturn err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WatchTemplateUpdated is a free log subscription operation binding the contract event 0x6eb26f176dd9180849dd4874d3530de0e5c1f62a6e6798d34e3abfc11f1db2cc. Solidity: event TemplateUpdated()
|
func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) WatchTemplateUpdated(opts *bind.WatchOpts, sink chan<- *ValidatorWalletCreatorTemplateUpdated) (event.Subscription, error) {
logs, sub, err := _ValidatorWalletCreator.contract.WatchLogs(opts, "TemplateUpdated")
if err != nil {
return nil, err
}
return event.NewSubscription(func(quit <-chan struct{}) error {
defer sub.Unsubscribe()
for {
select {
case log := <-logs:
// New log arrived, parse the event and forward to the user
event := new(ValidatorWalletCreatorTemplateUpdated)
if err := _ValidatorWalletCreator.contract.UnpackLog(event, "TemplateUpdated", log); err != nil {
return err
}
event.Raw = log
select {
case sink <- event:
case err := <-sub.Err():
return err
case <-quit:
return nil
}
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
}), nil
}
|
[
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) FilterTemplateUpdated(opts *bind.FilterOpts) (*ValidatorWalletCreatorTemplateUpdatedIterator, error) {\n\n\tlogs, sub, err := _ValidatorWalletCreator.contract.FilterLogs(opts, \"TemplateUpdated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorTemplateUpdatedIterator{contract: _ValidatorWalletCreator.contract, event: \"TemplateUpdated\", logs: logs, sub: sub}, nil\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) ParseTemplateUpdated(log types.Log) (*ValidatorWalletCreatorTemplateUpdated, error) {\n\tevent := new(ValidatorWalletCreatorTemplateUpdated)\n\tif err := _ValidatorWalletCreator.contract.UnpackLog(event, \"TemplateUpdated\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func (_DNSResolverContract *DNSResolverContractFilterer) WatchUpdated(opts *bind.WatchOpts, sink chan<- *DNSResolverContractUpdated) (event.Subscription, error) {\n\n\tlogs, sub, err := _DNSResolverContract.contract.WatchLogs(opts, \"Updated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(DNSResolverContractUpdated)\n\t\t\t\tif err := _DNSResolverContract.contract.UnpackLog(event, \"Updated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (c *FakeHookTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(hooktemplatesResource, c.ns, opts))\n\n}",
"func (c *Client) JSDataTemplateUpdate(r string, payload *JSDataTemplateStruct) error {\n\tvar err error\n\treturn err\n}",
"func (s *SyncStore) UpdateTemplate(category, fileName string, body []byte, mimeType string, meta map[string]interface{}) error {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.Store.UpdateTemplate(category, fileName, body, mimeType, meta)\n}",
"func (c *Client) SourceRepositoryTemplateUpdate(r string, payload *SourceRepositoryTemplateStruct) error {\n\tvar err error\n\treturn err\n}",
"func WatchUpdate() <-chan *ChangeEvent {\n return defaultClient.WatchUpdate()\n}",
"func (_Comptroller *ComptrollerFilterer) WatchCompSpeedUpdated(opts *bind.WatchOpts, sink chan<- *ComptrollerCompSpeedUpdated, cToken []common.Address) (event.Subscription, error) {\n\n\tvar cTokenRule []interface{}\n\tfor _, cTokenItem := range cToken {\n\t\tcTokenRule = append(cTokenRule, cTokenItem)\n\t}\n\n\tlogs, sub, err := _Comptroller.contract.WatchLogs(opts, \"CompSpeedUpdated\", cTokenRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(ComptrollerCompSpeedUpdated)\n\t\t\t\tif err := _Comptroller.contract.UnpackLog(event, \"CompSpeedUpdated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (r *Reconciler) activeTemplateUpdateRequests(logger logr.Logger, tier *toolchainv1alpha1.NSTemplateTier) (int, bool, error) {\n\t// fetch the list of TemplateUpdateRequest owned by the NSTemplateTier tier\n\ttemplateUpdateRequests := toolchainv1alpha1.TemplateUpdateRequestList{}\n\tif err := r.Client.List(context.TODO(), &templateUpdateRequests, client.MatchingLabels{\n\t\ttoolchainv1alpha1.NSTemplateTierNameLabelKey: tier.Name,\n\t}); err != nil {\n\t\treturn -1, false, err\n\t}\n\n\t// count non-deleted templateUpdateRequest items\n\titems := make(map[string]*metav1.Time, len(templateUpdateRequests.Items))\n\tfor _, item := range templateUpdateRequests.Items {\n\t\titems[item.Name] = item.DeletionTimestamp\n\t}\n\tlogger.Info(\"checking templateUpdateRequests\", \"items\", items)\n\tcount := 0\n\tfor _, tur := range templateUpdateRequests.Items {\n\t\tlogger.Info(\"checking templateUpdateRequest\", \"name\", tur.Name, \"deleted\", util.IsBeingDeleted(&tur))\n\t\tif util.IsBeingDeleted(&tur) {\n\t\t\t// ignore when already being deleted\n\t\t\tlogger.Info(\"skipping TemplateUpdateRequest as it is already being deleted\", \"name\", tur.Name)\n\t\t\tcontinue\n\t\t}\n\n\t\t// delete when in `complete=true` (reason=updated) or when in `complete=false/reason=failed` status conditions\n\t\tif condition.IsTrue(tur.Status.Conditions, toolchainv1alpha1.TemplateUpdateRequestComplete) ||\n\t\t\t(condition.IsFalseWithReason(tur.Status.Conditions, toolchainv1alpha1.TemplateUpdateRequestComplete, toolchainv1alpha1.TemplateUpdateRequestUnableToUpdateReason) &&\n\t\t\t\tmaxUpdateFailuresReached(tur, r.Config.GetMasterUserRecordUpdateFailureThreshold())) {\n\t\t\tif err := r.incrementCounters(logger, tier, tur); err != nil {\n\t\t\t\treturn -1, false, err\n\t\t\t}\n\t\t\tif err := r.Client.Delete(context.TODO(), &tur); err != nil {\n\t\t\t\tif errors.IsNotFound(err) {\n\t\t\t\t\tlogger.Info(\"skipping failed TemplateUpdateRequest as it was already deleted\", \"name\", tur.Name)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treturn -1, false, errs.Wrapf(err, \"unable to delete the TemplateUpdateRequest resource '%s'\", tur.Name)\n\t\t\t}\n\t\t\t// will exit the reconcile loop\n\t\t\treturn -1, true, nil\n\t\t}\n\t\tcount++\n\t}\n\tlogger.Info(\"found active TemplateUpdateRequests for the current tier\", \"count\", count)\n\treturn count, false, nil\n}",
"func (_SystemConfig *SystemConfigFilterer) WatchConfigUpdate(opts *bind.WatchOpts, sink chan<- *SystemConfigConfigUpdate, version []*big.Int, updateType []uint8) (event.Subscription, error) {\n\n\tvar versionRule []interface{}\n\tfor _, versionItem := range version {\n\t\tversionRule = append(versionRule, versionItem)\n\t}\n\tvar updateTypeRule []interface{}\n\tfor _, updateTypeItem := range updateType {\n\t\tupdateTypeRule = append(updateTypeRule, updateTypeItem)\n\t}\n\n\tlogs, sub, err := _SystemConfig.contract.WatchLogs(opts, \"ConfigUpdate\", versionRule, updateTypeRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(SystemConfigConfigUpdate)\n\t\t\t\tif err := _SystemConfig.contract.UnpackLog(event, \"ConfigUpdate\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (c *Client) OperationTemplateUpdate(r string, payload *OperationTemplateStruct) error {\n\tvar err error\n\treturn err\n}",
"func (fe *PlatformAppFeedback) UpdateTemplateStatus(ID string, body UpdateTemplateStatusRequest) (UpdateResponse, error) {\n var (\n rawRequest *RawRequest\n response []byte\n err error\n updateTemplateStatusResponse UpdateResponse\n\t )\n\n \n \n \n \n \n\n \n\n \n \n \n \n \n \n \n //Parse req body to map\n var reqBody map[string]interface{}\n reqBodyJSON, err := json.Marshal(body)\n if err != nil {\n \n return UpdateResponse{}, common.NewFDKError(err.Error())\n }\n err = json.Unmarshal([]byte(reqBodyJSON), &reqBody)\n if err != nil {\n \n return UpdateResponse{}, common.NewFDKError(err.Error()) \n }\n \n //API call\n rawRequest = NewRequest(\n fe.config,\n \"patch\",\n fmt.Sprintf(\"/service/platform/feedback/v1.0/company/%s/application/%s/templates/%s/status/\",fe.CompanyID, fe.ApplicationID, ID),\n nil,\n nil,\n reqBody)\n response, err = rawRequest.Execute()\n if err != nil {\n return UpdateResponse{}, err\n\t }\n \n err = json.Unmarshal(response, &updateTemplateStatusResponse)\n if err != nil {\n return UpdateResponse{}, common.NewFDKError(err.Error())\n }\n return updateTemplateStatusResponse, nil\n \n }",
"func (g *tLogger) Update(t *testing.T) {\n\tg.mu.Lock()\n\tdefer g.mu.Unlock()\n\tif !g.initialized {\n\t\tgrpclog.SetLoggerV2(TLogger)\n\t\tg.initialized = true\n\t}\n\tg.t = t\n\tg.start = time.Now()\n\tg.errors = map[*regexp.Regexp]int{}\n}",
"func (c *FakeHookTemplates) Update(ctx context.Context, hookTemplate *v1alpha1.HookTemplate, opts v1.UpdateOptions) (result *v1alpha1.HookTemplate, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewUpdateAction(hooktemplatesResource, c.ns, hookTemplate), &v1alpha1.HookTemplate{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.HookTemplate), err\n}",
"func (dm *DataManager) UpdateTemplateVersion(ctx context.Context, req *pb.UpdateTemplateVersionReq) (*pb.UpdateTemplateVersionResp, error) {\n\trtime := time.Now()\n\tlogger.V(2).Infof(\"UpdateTemplateVersion[%d]| input[%+v]\", req.Seq, req)\n\tresponse := &pb.UpdateTemplateVersionResp{}\n\n\tdefer func() {\n\t\tcost := dm.collector.StatRequest(\"UpdateTemplateVersion\", response.ErrCode, rtime, time.Now())\n\t\tlogger.V(2).Infof(\"UpdateTemplateVersion[%d]| output[%dms][%+v]\", req.Seq, cost, response)\n\t}()\n\n\taction := templateversionaction.NewUpdateAction(dm.viper, dm.smgr, req, response)\n\tdm.executor.Execute(action)\n\n\treturn response, nil\n}",
"func SyncDevWorkspaceTemplate() error {\n\tclient, err := library.GetK8sClient()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get k8s client: %w\", err)\n\t}\n\n\tdwtPath, err := library.FindDevWorkspaceTemplate()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to find DevWorkspaceTemplate: %w\", err)\n\t}\n\tlog.Printf(\"Found DevWorkspaceTemplate at %s\", dwtPath)\n\n\tdwt, err := library.ReadDevWorkspaceTemplateFromFile(dwtPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to read DevWorkspaceTemplate: %w\", err)\n\t}\n\tlog.Printf(\"Successfully read DevWorkspaceTemplate\")\n\n\tif dwt.Namespace == \"\" {\n\t\tns, err := library.GetCurrentNamespace()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to get the current namespace and DevWorkspaceTemplate does not supply one\")\n\t\t}\n\t\tdwt.Namespace = ns\n\t}\n\n\tlog.Printf(\"Reading current DevWorkspace to ensure Template is compatible\")\n\t// Get current DevWorkspace to set ownerref correctly\n\tcurrDW, err := library.GetDevWorkspace(client)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get the current DevWorkspace: %w\", err)\n\t}\n\terr = library.SetControllerRef(currDW, dwt)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to set controllerref on DevWorkspaceTemplate\")\n\t}\n\n\terr = client.Create(context.Background(), dwt)\n\tif err == nil {\n\t\tlog.Printf(\"Successfully created DevWorkspaceTemplate on cluster: name %q, namespace: %q\", dwt.Name, dwt.Namespace)\n\t\treturn nil\n\t}\n\tif !k8serrors.IsAlreadyExists(err) {\n\t\treturn fmt.Errorf(\"encountered unexpected error when trying to sync template to cluster (retry): %w\", err)\n\t}\n\n\t// template exists, need to update (if allowed)\n\tlog.Printf(\"DevWorkspaceTemplate with name %s already exists in current namespace; updating\", dwt.Name)\n\tclusterDWT := &v1alpha2.DevWorkspaceTemplate{}\n\terr = client.Get(context.Background(), types.NamespacedName{Name: dwt.Name, Namespace: dwt.Namespace}, clusterDWT)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unexpected error while trying to get devworkspace template from cluster (retry): %w\", err)\n\t}\n\tif !library.OwnerRefsMatch(metav1.GetControllerOf(dwt), metav1.GetControllerOf(clusterDWT)) {\n\t\treturn fmt.Errorf(\"template already exists on cluster and is not controlled by the current DevWorkspace\")\n\t}\n\tclusterDWT.Spec = dwt.Spec\n\terr = client.Update(context.Background(), clusterDWT)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to update DevWorkspaceTemplate on cluster: %w\", err)\n\t}\n\tlog.Printf(\"Successfully updated DevWorkspaceTemplate on cluster\")\n\treturn nil\n}",
"func (s *Session) OnReadTemplate(fs *NFv9TemplateFlowSet) {\n\tfor _, t := range fs.Templates {\n\t\ttid := int(t.TemplateID)\n\t\tif _, ok := s.templates[tid]; !ok {\n\t\t\ts.templates[tid] = t\n\t\t}\n\t}\n}",
"func (_Stakinginfo *StakinginfoFilterer) WatchStakeUpdate(opts *bind.WatchOpts, sink chan<- *StakinginfoStakeUpdate, validatorId []*big.Int, nonce []*big.Int, newAmount []*big.Int) (event.Subscription, error) {\n\n\tvar validatorIdRule []interface{}\n\tfor _, validatorIdItem := range validatorId {\n\t\tvalidatorIdRule = append(validatorIdRule, validatorIdItem)\n\t}\n\tvar nonceRule []interface{}\n\tfor _, nonceItem := range nonce {\n\t\tnonceRule = append(nonceRule, nonceItem)\n\t}\n\tvar newAmountRule []interface{}\n\tfor _, newAmountItem := range newAmount {\n\t\tnewAmountRule = append(newAmountRule, newAmountItem)\n\t}\n\n\tlogs, sub, err := _Stakinginfo.contract.WatchLogs(opts, \"StakeUpdate\", validatorIdRule, nonceRule, newAmountRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(StakinginfoStakeUpdate)\n\t\t\t\tif err := _Stakinginfo.contract.UnpackLog(event, \"StakeUpdate\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ParseTemplateUpdated is a log parse operation binding the contract event 0x6eb26f176dd9180849dd4874d3530de0e5c1f62a6e6798d34e3abfc11f1db2cc. Solidity: event TemplateUpdated()
|
func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) ParseTemplateUpdated(log types.Log) (*ValidatorWalletCreatorTemplateUpdated, error) {
event := new(ValidatorWalletCreatorTemplateUpdated)
if err := _ValidatorWalletCreator.contract.UnpackLog(event, "TemplateUpdated", log); err != nil {
return nil, err
}
event.Raw = log
return event, nil
}
|
[
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) FilterTemplateUpdated(opts *bind.FilterOpts) (*ValidatorWalletCreatorTemplateUpdatedIterator, error) {\n\n\tlogs, sub, err := _ValidatorWalletCreator.contract.FilterLogs(opts, \"TemplateUpdated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorTemplateUpdatedIterator{contract: _ValidatorWalletCreator.contract, event: \"TemplateUpdated\", logs: logs, sub: sub}, nil\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) WatchTemplateUpdated(opts *bind.WatchOpts, sink chan<- *ValidatorWalletCreatorTemplateUpdated) (event.Subscription, error) {\n\n\tlogs, sub, err := _ValidatorWalletCreator.contract.WatchLogs(opts, \"TemplateUpdated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(ValidatorWalletCreatorTemplateUpdated)\n\t\t\t\tif err := _ValidatorWalletCreator.contract.UnpackLog(event, \"TemplateUpdated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (s *SyncStore) UpdateTemplate(category, fileName string, body []byte, mimeType string, meta map[string]interface{}) error {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.Store.UpdateTemplate(category, fileName, body, mimeType, meta)\n}",
"func (o WorkflowTemplateOutput) UpdateTime() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *WorkflowTemplate) pulumi.StringOutput { return v.UpdateTime }).(pulumi.StringOutput)\n}",
"func (dm *DataManager) UpdateTemplateVersion(ctx context.Context, req *pb.UpdateTemplateVersionReq) (*pb.UpdateTemplateVersionResp, error) {\n\trtime := time.Now()\n\tlogger.V(2).Infof(\"UpdateTemplateVersion[%d]| input[%+v]\", req.Seq, req)\n\tresponse := &pb.UpdateTemplateVersionResp{}\n\n\tdefer func() {\n\t\tcost := dm.collector.StatRequest(\"UpdateTemplateVersion\", response.ErrCode, rtime, time.Now())\n\t\tlogger.V(2).Infof(\"UpdateTemplateVersion[%d]| output[%dms][%+v]\", req.Seq, cost, response)\n\t}()\n\n\taction := templateversionaction.NewUpdateAction(dm.viper, dm.smgr, req, response)\n\tdm.executor.Execute(action)\n\n\treturn response, nil\n}",
"func (client *SmnClient) UpdateMessageTemplate(request *UpdateMessageTemplateRequest) (response *UpdateMessageTemplateResponse, err error) {\n\tresponse = &UpdateMessageTemplateResponse{\n\t\tBaseResponse: &BaseResponse{},\n\t}\n\terr = client.SendRequest(request, response)\n\treturn\n}",
"func (g *tLogger) Update(t *testing.T) {\n\tg.mu.Lock()\n\tdefer g.mu.Unlock()\n\tif !g.initialized {\n\t\tgrpclog.SetLoggerV2(TLogger)\n\t\tg.initialized = true\n\t}\n\tg.t = t\n\tg.start = time.Now()\n\tg.errors = map[*regexp.Regexp]int{}\n}",
"func (c *Client) SourceRepositoryTemplateUpdate(r string, payload *SourceRepositoryTemplateStruct) error {\n\tvar err error\n\treturn err\n}",
"func parseTemplate(source string) string {\n\treturn TempReplaceRegexp.ReplaceAllStringFunc(source, func(m string) string {\n\t\tbindstr := strings.TrimSpace(TempReplaceRegexp.FindStringSubmatch(m)[1])\n\t\treturn fmt.Sprintf(`<span bind-html=\"%v\"></span>`, bindstr)\n\t})\n}",
"func (svc *SES) RawUpdateTemplate(ctx context.Context, in *SDK.UpdateTemplateInput) (*SDK.UpdateTemplateResponse, error) {\n\treturn svc.client.UpdateTemplateRequest(in).Send(ctx)\n}",
"func ParseTemplateHandler(writer http.ResponseWriter, request *http.Request) {\n\ttemplateID := request.PostFormValue(\"template_id\")\n\tif len(templateID) == 0 {\n\t\tlogging.Error().Log(\"template_id is empty\")\n\t\treturn\n\t}\n\n\ttemplateStrings, err := getBrazeTemplateInfo(templateID)\n\tif err != nil {\n\t\thttp.Error(writer, \"unable to parse template\", http.StatusInternalServerError)\n\t\tlogging.Error().LogErr(\"faile to parse template\", err)\n\t\treturn\n\t}\n\n\tdata := map[string]interface{}{\n\t\t\"template_id\": templateID,\n\t\t\"strings\": templateStrings,\n\t}\n\n\tdataBytes, err := json.Marshal(data)\n\tif err != nil {\n\t\twriter.WriteHeader(http.StatusInternalServerError)\n\t\tlogging.Error().LogErr(\"faile to marshal JSON\", err)\n\t\treturn\n\t}\n\n\twriter.Write(dataBytes)\n\twriter.Header().Add(utils.ContentTypeHeader, \"application/json\")\n}",
"func (s *Session) OnReadTemplate(fs *NFv9TemplateFlowSet) {\n\tfor _, t := range fs.Templates {\n\t\ttid := int(t.TemplateID)\n\t\tif _, ok := s.templates[tid]; !ok {\n\t\t\ts.templates[tid] = t\n\t\t}\n\t}\n}",
"func (c *Client) OperationTemplateUpdate(r string, payload *OperationTemplateStruct) error {\n\tvar err error\n\treturn err\n}",
"func UpdateTemplateManifest(ctx context.Context, query bson.D, update bson.D) error {\n\tupdateResult, err := mongodb.Operator.Update(ctx, mongodb.WorkflowTemplateCollection, query, update)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif updateResult.MatchedCount == 0 {\n\t\treturn errors.New(\"Template collection query didn't match\")\n\t}\n\treturn nil\n}",
"func (s *Service) Update(r *Request) (*JobTemplate, error) {\n\tjob_template := new(JobTemplate)\n\tapierr := new(errors.APIError)\n\t_, err := s.sling.New().Put(r.ID+\"/\").BodyJSON(r).Receive(job_template, apierr)\n\treturn job_template, errors.BuildError(err, apierr)\n}",
"func (s *Status) HandlerTmpUpdate() int64 {\n\treturn s.pStatus.handlerTmpUpdate // Handler_tmp_update\n}",
"func (fe *PlatformAppFeedback) UpdateTemplateStatus(ID string, body UpdateTemplateStatusRequest) (UpdateResponse, error) {\n var (\n rawRequest *RawRequest\n response []byte\n err error\n updateTemplateStatusResponse UpdateResponse\n\t )\n\n \n \n \n \n \n\n \n\n \n \n \n \n \n \n \n //Parse req body to map\n var reqBody map[string]interface{}\n reqBodyJSON, err := json.Marshal(body)\n if err != nil {\n \n return UpdateResponse{}, common.NewFDKError(err.Error())\n }\n err = json.Unmarshal([]byte(reqBodyJSON), &reqBody)\n if err != nil {\n \n return UpdateResponse{}, common.NewFDKError(err.Error()) \n }\n \n //API call\n rawRequest = NewRequest(\n fe.config,\n \"patch\",\n fmt.Sprintf(\"/service/platform/feedback/v1.0/company/%s/application/%s/templates/%s/status/\",fe.CompanyID, fe.ApplicationID, ID),\n nil,\n nil,\n reqBody)\n response, err = rawRequest.Execute()\n if err != nil {\n return UpdateResponse{}, err\n\t }\n \n err = json.Unmarshal(response, &updateTemplateStatusResponse)\n if err != nil {\n return UpdateResponse{}, common.NewFDKError(err.Error())\n }\n return updateTemplateStatusResponse, nil\n \n }",
"func (m *ProjectTemplateMutation) OldUpdateTime(ctx context.Context) (v time.Time, err error) {\n\tif !m.op.Is(OpUpdateOne) {\n\t\treturn v, fmt.Errorf(\"OldUpdateTime is allowed only on UpdateOne operations\")\n\t}\n\tif m.id == nil || m.oldValue == nil {\n\t\treturn v, fmt.Errorf(\"OldUpdateTime requires an ID field in the mutation\")\n\t}\n\toldValue, err := m.oldValue(ctx)\n\tif err != nil {\n\t\treturn v, fmt.Errorf(\"querying old value for OldUpdateTime: %w\", err)\n\t}\n\treturn oldValue.UpdateTime, nil\n}",
"func updateServiceTemplates(ctx *template.TemplateBuildContext) *axerror.AXError {\n\tvalidatedTemplates := ctx.GetServiceTemplates()\n\tutils.DebugLog.Printf(\"Updating %d validated service templates\", len(validatedTemplates))\n\n\tquery := map[string]interface{}{\n\t\tservice.TemplateRepo: ctx.Repo,\n\t\tservice.TemplateBranch: ctx.Branch,\n\t\taxdb.AXDBSelectColumns: []string{service.TemplateId, service.TemplateName, service.TemplateType,\n\t\t\tservice.TemplateCost, service.TemplateJobsSuccess, service.TemplateJobsFail},\n\t}\n\toldTempArray, axErr := service.GetTemplates(query)\n\tif axErr != nil {\n\t\treturn axErr\n\t}\n\tutils.DebugLog.Printf(\"Found %d existing templates in database (repo: %s, branch: %s)\", len(oldTempArray), ctx.Repo, ctx.Branch)\n\n\t// First iterate existing templates, and see if we need to delete any because either the template is gone, or template was invalid.\n\t// If there is an existing one, we update the entry in the database (will have a new revision)\n\tvar toBeDeleted []service.EmbeddedTemplateIf\n\tupdated := make(map[string]bool)\n\tfor _, existing := range oldTempArray {\n\t\tresult, ok := ctx.Results[existing.GetName()]\n\t\tif !ok {\n\t\t\tutils.DebugLog.Printf(\"Marking %v for deletion: no longer exists in branch\\n\", existing)\n\t\t\ttoBeDeleted = append(toBeDeleted, existing)\n\t\t\tcontinue\n\t\t}\n\t\tif result.AXErr != nil {\n\t\t\tutils.DebugLog.Printf(\"Marking %v for deletion: incoming template had error: %v\\n\", existing, result.AXErr)\n\t\t\ttoBeDeleted = append(toBeDeleted, existing)\n\t\t\tcontinue\n\t\t}\n\t\teTmpl, axErr := service.EmbedServiceTemplate(result.Template, ctx)\n\t\tif axErr != nil {\n\t\t\tutils.ErrorLog.Printf(\"Error generating embedded template %s: %v\", result.Template.GetName(), axErr)\n\t\t\tcontinue\n\t\t}\n\t\tupdated[eTmpl.GetName()] = true\n\t\t// preserve previous stats\n\t\tstats := existing.GetStats()\n\t\teTmpl.SetStats(stats.Cost, stats.JobsFail, stats.JobsSuccess)\n\t\tutils.DebugLog.Printf(\"Updating existing service template %v\\n\", eTmpl)\n\t\taxErr = UpdateTemplate(eTmpl)\n\t\tif axErr != nil {\n\t\t\tutils.ErrorLog.Printf(\"Failed to update template %v: %v\", eTmpl, axErr)\n\t\t}\n\t}\n\n\t// Iterate the validated templates, and insert any new ones\n\tfor _, st := range validatedTemplates {\n\t\tif updated[st.GetName()] {\n\t\t\t// skip the ones we just updated\n\t\t\tcontinue\n\t\t}\n\t\teTmpl, axErr := service.EmbedServiceTemplate(st, ctx)\n\t\tif axErr != nil {\n\t\t\tutils.ErrorLog.Printf(\"Error generating embedded template %s: %v\", st.GetName(), axErr)\n\t\t\tcontinue\n\t\t}\n\t\tutils.DebugLog.Printf(\"Inserting new service template %v\\n\", eTmpl)\n\t\taxErr = InsertTemplate(eTmpl)\n\t\tif axErr != nil {\n\t\t\tutils.ErrorLog.Printf(\"Failed to insert template %v: %v\", eTmpl, axErr)\n\t\t}\n\t}\n\n\tfor _, t := range toBeDeleted {\n\t\t_, e := utils.Dbcl.Delete(axdb.AXDBAppAXOPS, service.TemplateTable, []map[string]interface{}{{service.TemplateId: t.GetID()}})\n\t\tif e != nil {\n\t\t\tutils.ErrorLog.Printf(\"Failed to delete template %v: %v\", t, e)\n\t\t\taxErr = e\n\t\t}\n\t}\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
FilterWalletCreated is a free log retrieval operation binding the contract event 0xca0b7dde26052d34217ef1a0cee48085a07ca32da0a918609937a307d496bbf5. Solidity: event WalletCreated(address indexed walletAddress, address indexed userAddress, address adminProxy)
|
func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) FilterWalletCreated(opts *bind.FilterOpts, walletAddress []common.Address, userAddress []common.Address) (*ValidatorWalletCreatorWalletCreatedIterator, error) {
var walletAddressRule []interface{}
for _, walletAddressItem := range walletAddress {
walletAddressRule = append(walletAddressRule, walletAddressItem)
}
var userAddressRule []interface{}
for _, userAddressItem := range userAddress {
userAddressRule = append(userAddressRule, userAddressItem)
}
logs, sub, err := _ValidatorWalletCreator.contract.FilterLogs(opts, "WalletCreated", walletAddressRule, userAddressRule)
if err != nil {
return nil, err
}
return &ValidatorWalletCreatorWalletCreatedIterator{contract: _ValidatorWalletCreator.contract, event: "WalletCreated", logs: logs, sub: sub}, nil
}
|
[
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) WatchWalletCreated(opts *bind.WatchOpts, sink chan<- *ValidatorWalletCreatorWalletCreated, walletAddress []common.Address, userAddress []common.Address) (event.Subscription, error) {\n\n\tvar walletAddressRule []interface{}\n\tfor _, walletAddressItem := range walletAddress {\n\t\twalletAddressRule = append(walletAddressRule, walletAddressItem)\n\t}\n\tvar userAddressRule []interface{}\n\tfor _, userAddressItem := range userAddress {\n\t\tuserAddressRule = append(userAddressRule, userAddressItem)\n\t}\n\n\tlogs, sub, err := _ValidatorWalletCreator.contract.WatchLogs(opts, \"WalletCreated\", walletAddressRule, userAddressRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(ValidatorWalletCreatorWalletCreated)\n\t\t\t\tif err := _ValidatorWalletCreator.contract.UnpackLog(event, \"WalletCreated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) ParseWalletCreated(log types.Log) (*ValidatorWalletCreatorWalletCreated, error) {\n\tevent := new(ValidatorWalletCreatorWalletCreated)\n\tif err := _ValidatorWalletCreator.contract.UnpackLog(event, \"WalletCreated\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func walletCreate(gateway *daemon.Gateway) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlogger.Info(\"API request made to create a wallet\")\n\t\tseed := r.FormValue(\"seed\")\n\t\tlabel := r.FormValue(\"label\")\n\t\twltName := wallet.NewWalletFilename()\n\t\tvar wlt wallet.Wallet\n\t\tvar err error\n\t\t// the wallet name may dup, rename it till no conflict.\n\t\tfor {\n\t\t\twlt, err = Wg.CreateWallet(wltName, wallet.OptSeed(seed), wallet.OptLabel(label))\n\t\t\tif err != nil && strings.Contains(err.Error(), \"renaming\") {\n\t\t\t\twltName = wallet.NewWalletFilename()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\tif err := Wg.SaveWallet(wlt.GetID()); err != nil {\n\t\t\twh.Error400(w, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\trlt := wallet.NewReadableWallet(wlt)\n\t\twh.SendOr500(w, rlt)\n\t}\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) FilterTemplateUpdated(opts *bind.FilterOpts) (*ValidatorWalletCreatorTemplateUpdatedIterator, error) {\n\n\tlogs, sub, err := _ValidatorWalletCreator.contract.FilterLogs(opts, \"TemplateUpdated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorTemplateUpdatedIterator{contract: _ValidatorWalletCreator.contract, event: \"TemplateUpdated\", logs: logs, sub: sub}, nil\n}",
"func (_Testproxyfactory *TestproxyfactoryFilterer) FilterProxyCreated(opts *bind.FilterOpts) (*TestproxyfactoryProxyCreatedIterator, error) {\n\n\tlogs, sub, err := _Testproxyfactory.contract.FilterLogs(opts, \"ProxyCreated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &TestproxyfactoryProxyCreatedIterator{contract: _Testproxyfactory.contract, event: \"ProxyCreated\", logs: logs, sub: sub}, nil\n}",
"func (_Marketplace *MarketplaceFilterer) WatchAuctionCreated(opts *bind.WatchOpts, sink chan<- *MarketplaceAuctionCreated, _nftAddress []common.Address, _tokenId []*big.Int) (event.Subscription, error) {\n\n\tvar _nftAddressRule []interface{}\n\tfor _, _nftAddressItem := range _nftAddress {\n\t\t_nftAddressRule = append(_nftAddressRule, _nftAddressItem)\n\t}\n\tvar _tokenIdRule []interface{}\n\tfor _, _tokenIdItem := range _tokenId {\n\t\t_tokenIdRule = append(_tokenIdRule, _tokenIdItem)\n\t}\n\n\tlogs, sub, err := _Marketplace.contract.WatchLogs(opts, \"AuctionCreated\", _nftAddressRule, _tokenIdRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(MarketplaceAuctionCreated)\n\t\t\t\tif err := _Marketplace.contract.UnpackLog(event, \"AuctionCreated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (_Marketplace *MarketplaceFilterer) FilterAuctionCreated(opts *bind.FilterOpts, _nftAddress []common.Address, _tokenId []*big.Int) (*MarketplaceAuctionCreatedIterator, error) {\n\n\tvar _nftAddressRule []interface{}\n\tfor _, _nftAddressItem := range _nftAddress {\n\t\t_nftAddressRule = append(_nftAddressRule, _nftAddressItem)\n\t}\n\tvar _tokenIdRule []interface{}\n\tfor _, _tokenIdItem := range _tokenId {\n\t\t_tokenIdRule = append(_tokenIdRule, _tokenIdItem)\n\t}\n\n\tlogs, sub, err := _Marketplace.contract.FilterLogs(opts, \"AuctionCreated\", _nftAddressRule, _tokenIdRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &MarketplaceAuctionCreatedIterator{contract: _Marketplace.contract, event: \"AuctionCreated\", logs: logs, sub: sub}, nil\n}",
"func (_Pancakeswap *PancakeswapFilterer) FilterPairCreated(opts *bind.FilterOpts, token0 []common.Address, token1 []common.Address) (*PancakeswapPairCreatedIterator, error) {\n\n\tvar token0Rule []interface{}\n\tfor _, token0Item := range token0 {\n\t\ttoken0Rule = append(token0Rule, token0Item)\n\t}\n\tvar token1Rule []interface{}\n\tfor _, token1Item := range token1 {\n\t\ttoken1Rule = append(token1Rule, token1Item)\n\t}\n\n\tlogs, sub, err := _Pancakeswap.contract.FilterLogs(opts, \"PairCreated\", token0Rule, token1Rule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &PancakeswapPairCreatedIterator{contract: _Pancakeswap.contract, event: \"PairCreated\", logs: logs, sub: sub}, nil\n}",
"func (_RollupCreator *RollupCreatorFilterer) FilterRollupCreated(opts *bind.FilterOpts) (*RollupCreatorRollupCreatedIterator, error) {\n\n\tlogs, sub, err := _RollupCreator.contract.FilterLogs(opts, \"RollupCreated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &RollupCreatorRollupCreatedIterator{contract: _RollupCreator.contract, event: \"RollupCreated\", logs: logs, sub: sub}, nil\n}",
"func (_UniswapV2 *UniswapV2Filterer) WatchPairCreated(opts *bind.WatchOpts, sink chan<- *UniswapV2PairCreated, token0 []common.Address, token1 []common.Address) (event.Subscription, error) {\n\n\tvar token0Rule []interface{}\n\tfor _, token0Item := range token0 {\n\t\ttoken0Rule = append(token0Rule, token0Item)\n\t}\n\tvar token1Rule []interface{}\n\tfor _, token1Item := range token1 {\n\t\ttoken1Rule = append(token1Rule, token1Item)\n\t}\n\n\tlogs, sub, err := _UniswapV2.contract.WatchLogs(opts, \"PairCreated\", token0Rule, token1Rule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(UniswapV2PairCreated)\n\t\t\t\tif err := _UniswapV2.contract.UnpackLog(event, \"PairCreated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (_Pancakeswap *PancakeswapFilterer) WatchPairCreated(opts *bind.WatchOpts, sink chan<- *PancakeswapPairCreated, token0 []common.Address, token1 []common.Address) (event.Subscription, error) {\n\n\tvar token0Rule []interface{}\n\tfor _, token0Item := range token0 {\n\t\ttoken0Rule = append(token0Rule, token0Item)\n\t}\n\tvar token1Rule []interface{}\n\tfor _, token1Item := range token1 {\n\t\ttoken1Rule = append(token1Rule, token1Item)\n\t}\n\n\tlogs, sub, err := _Pancakeswap.contract.WatchLogs(opts, \"PairCreated\", token0Rule, token1Rule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(PancakeswapPairCreated)\n\t\t\t\tif err := _Pancakeswap.contract.UnpackLog(event, \"PairCreated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) WatchTemplateUpdated(opts *bind.WatchOpts, sink chan<- *ValidatorWalletCreatorTemplateUpdated) (event.Subscription, error) {\n\n\tlogs, sub, err := _ValidatorWalletCreator.contract.WatchLogs(opts, \"TemplateUpdated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(ValidatorWalletCreatorTemplateUpdated)\n\t\t\t\tif err := _ValidatorWalletCreator.contract.UnpackLog(event, \"TemplateUpdated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (_Users *usersEvents) FilterTemporaryCreated(opts *bind.FilterOpts, proxy []common.Address, feePayer []common.Address, identityHash []common.Hash) (ablbind.EventIterator, error) {\n\n\tvar proxyRule []interface{}\n\tfor _, proxyItem := range proxy {\n\t\tproxyRule = append(proxyRule, proxyItem)\n\t}\n\tvar feePayerRule []interface{}\n\tfor _, feePayerItem := range feePayer {\n\t\tfeePayerRule = append(feePayerRule, feePayerItem)\n\t}\n\tvar identityHashRule []interface{}\n\tfor _, identityHashItem := range identityHash {\n\t\tidentityHashRule = append(identityHashRule, identityHashItem)\n\t}\n\n\tlogs, sub, err := _Users.contract.FilterLogs(opts, \"TemporaryCreated\", proxyRule, feePayerRule, identityHashRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &UsersTemporaryCreatedIterator{contract: _Users.contract, event: \"TemporaryCreated\", logs: logs, sub: sub}, nil\n}",
"func walletCreateND(name string) error {\n\t_, err := nd.CreateWallet(name, store, keystorev4.New())\n\treturn err\n}",
"func (_PancakeFactory *PancakeFactoryFilterer) FilterPairCreated(opts *bind.FilterOpts, token0 []common.Address, token1 []common.Address) (*PancakeFactoryPairCreatedIterator, error) {\n\n\tvar token0Rule []interface{}\n\tfor _, token0Item := range token0 {\n\t\ttoken0Rule = append(token0Rule, token0Item)\n\t}\n\tvar token1Rule []interface{}\n\tfor _, token1Item := range token1 {\n\t\ttoken1Rule = append(token1Rule, token1Item)\n\t}\n\n\tlogs, sub, err := _PancakeFactory.contract.FilterLogs(opts, \"PairCreated\", token0Rule, token1Rule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &PancakeFactoryPairCreatedIterator{contract: _PancakeFactory.contract, event: \"PairCreated\", logs: logs, sub: sub}, nil\n}",
"func (_Testproxyfactory *TestproxyfactoryFilterer) WatchProxyCreated(opts *bind.WatchOpts, sink chan<- *TestproxyfactoryProxyCreated) (event.Subscription, error) {\n\n\tlogs, sub, err := _Testproxyfactory.contract.WatchLogs(opts, \"ProxyCreated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(TestproxyfactoryProxyCreated)\n\t\t\t\tif err := _Testproxyfactory.contract.UnpackLog(event, \"ProxyCreated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func CreateWallet(pubKey []byte) (*Wallet, error) {\n\tpublickHash, err := hashPublicKey(pubKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tversionedPayload := append(Version, publickHash...)\n\tchecksum := checksum(versionedPayload)\n\n\taddress := append(versionedPayload, checksum...)\n\n\treturn &Wallet{\n\t\tBase58Address: base58.Encode(address),\n\t\tAddress: address,\n\t}, nil\n}",
"func CreateWallet(ctx context.Context, g *libkb.GlobalContext) (created bool, err error) {\n\tdefer g.CTraceTimed(ctx, \"Stellar.CreateWallet\", func() error { return err })()\n\t// TODO: short-circuit if the user has a bundle already\n\tclearBundle, err := bundle.NewInitialBundle()\n\tif err != nil {\n\t\treturn created, err\n\t}\n\terr = remote.PostWithChainlink(ctx, g, clearBundle)\n\tswitch e := err.(type) {\n\tcase nil:\n\t\t// ok\n\tcase libkb.AppStatusError:\n\t\tswitch keybase1.StatusCode(e.Code) {\n\t\tcase keybase1.StatusCode_SCStellarWrongRevision:\n\t\t\t// Assume this happened because a bundle already existed.\n\t\t\t// And suppress the error.\n\t\t\tg.Log.CDebugf(ctx, \"suppressing error: %v\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\tdefault:\n\t\treturn false, err\n\t}\n\treturn true, err\n}",
"func (_SuperCoin *SuperCoinFilterer) WatchWhitelistedAddressAdded(opts *bind.WatchOpts, sink chan<- *SuperCoinWhitelistedAddressAdded) (event.Subscription, error) {\n\n\tlogs, sub, err := _SuperCoin.contract.WatchLogs(opts, \"WhitelistedAddressAdded\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(SuperCoinWhitelistedAddressAdded)\n\t\t\t\tif err := _SuperCoin.contract.UnpackLog(event, \"WhitelistedAddressAdded\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WatchWalletCreated is a free log subscription operation binding the contract event 0xca0b7dde26052d34217ef1a0cee48085a07ca32da0a918609937a307d496bbf5. Solidity: event WalletCreated(address indexed walletAddress, address indexed userAddress, address adminProxy)
|
func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) WatchWalletCreated(opts *bind.WatchOpts, sink chan<- *ValidatorWalletCreatorWalletCreated, walletAddress []common.Address, userAddress []common.Address) (event.Subscription, error) {
var walletAddressRule []interface{}
for _, walletAddressItem := range walletAddress {
walletAddressRule = append(walletAddressRule, walletAddressItem)
}
var userAddressRule []interface{}
for _, userAddressItem := range userAddress {
userAddressRule = append(userAddressRule, userAddressItem)
}
logs, sub, err := _ValidatorWalletCreator.contract.WatchLogs(opts, "WalletCreated", walletAddressRule, userAddressRule)
if err != nil {
return nil, err
}
return event.NewSubscription(func(quit <-chan struct{}) error {
defer sub.Unsubscribe()
for {
select {
case log := <-logs:
// New log arrived, parse the event and forward to the user
event := new(ValidatorWalletCreatorWalletCreated)
if err := _ValidatorWalletCreator.contract.UnpackLog(event, "WalletCreated", log); err != nil {
return err
}
event.Raw = log
select {
case sink <- event:
case err := <-sub.Err():
return err
case <-quit:
return nil
}
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
}), nil
}
|
[
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) FilterWalletCreated(opts *bind.FilterOpts, walletAddress []common.Address, userAddress []common.Address) (*ValidatorWalletCreatorWalletCreatedIterator, error) {\n\n\tvar walletAddressRule []interface{}\n\tfor _, walletAddressItem := range walletAddress {\n\t\twalletAddressRule = append(walletAddressRule, walletAddressItem)\n\t}\n\tvar userAddressRule []interface{}\n\tfor _, userAddressItem := range userAddress {\n\t\tuserAddressRule = append(userAddressRule, userAddressItem)\n\t}\n\n\tlogs, sub, err := _ValidatorWalletCreator.contract.FilterLogs(opts, \"WalletCreated\", walletAddressRule, userAddressRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorWalletCreatedIterator{contract: _ValidatorWalletCreator.contract, event: \"WalletCreated\", logs: logs, sub: sub}, nil\n}",
"func walletCreate(gateway *daemon.Gateway) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlogger.Info(\"API request made to create a wallet\")\n\t\tseed := r.FormValue(\"seed\")\n\t\tlabel := r.FormValue(\"label\")\n\t\twltName := wallet.NewWalletFilename()\n\t\tvar wlt wallet.Wallet\n\t\tvar err error\n\t\t// the wallet name may dup, rename it till no conflict.\n\t\tfor {\n\t\t\twlt, err = Wg.CreateWallet(wltName, wallet.OptSeed(seed), wallet.OptLabel(label))\n\t\t\tif err != nil && strings.Contains(err.Error(), \"renaming\") {\n\t\t\t\twltName = wallet.NewWalletFilename()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\tif err := Wg.SaveWallet(wlt.GetID()); err != nil {\n\t\t\twh.Error400(w, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\trlt := wallet.NewReadableWallet(wlt)\n\t\twh.SendOr500(w, rlt)\n\t}\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) WatchTemplateUpdated(opts *bind.WatchOpts, sink chan<- *ValidatorWalletCreatorTemplateUpdated) (event.Subscription, error) {\n\n\tlogs, sub, err := _ValidatorWalletCreator.contract.WatchLogs(opts, \"TemplateUpdated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(ValidatorWalletCreatorTemplateUpdated)\n\t\t\t\tif err := _ValidatorWalletCreator.contract.UnpackLog(event, \"TemplateUpdated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (_Marketplace *MarketplaceFilterer) WatchAuctionCreated(opts *bind.WatchOpts, sink chan<- *MarketplaceAuctionCreated, _nftAddress []common.Address, _tokenId []*big.Int) (event.Subscription, error) {\n\n\tvar _nftAddressRule []interface{}\n\tfor _, _nftAddressItem := range _nftAddress {\n\t\t_nftAddressRule = append(_nftAddressRule, _nftAddressItem)\n\t}\n\tvar _tokenIdRule []interface{}\n\tfor _, _tokenIdItem := range _tokenId {\n\t\t_tokenIdRule = append(_tokenIdRule, _tokenIdItem)\n\t}\n\n\tlogs, sub, err := _Marketplace.contract.WatchLogs(opts, \"AuctionCreated\", _nftAddressRule, _tokenIdRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(MarketplaceAuctionCreated)\n\t\t\t\tif err := _Marketplace.contract.UnpackLog(event, \"AuctionCreated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (_Testproxyfactory *TestproxyfactoryFilterer) WatchProxyCreated(opts *bind.WatchOpts, sink chan<- *TestproxyfactoryProxyCreated) (event.Subscription, error) {\n\n\tlogs, sub, err := _Testproxyfactory.contract.WatchLogs(opts, \"ProxyCreated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(TestproxyfactoryProxyCreated)\n\t\t\t\tif err := _Testproxyfactory.contract.UnpackLog(event, \"ProxyCreated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (_Pancakeswap *PancakeswapFilterer) WatchPairCreated(opts *bind.WatchOpts, sink chan<- *PancakeswapPairCreated, token0 []common.Address, token1 []common.Address) (event.Subscription, error) {\n\n\tvar token0Rule []interface{}\n\tfor _, token0Item := range token0 {\n\t\ttoken0Rule = append(token0Rule, token0Item)\n\t}\n\tvar token1Rule []interface{}\n\tfor _, token1Item := range token1 {\n\t\ttoken1Rule = append(token1Rule, token1Item)\n\t}\n\n\tlogs, sub, err := _Pancakeswap.contract.WatchLogs(opts, \"PairCreated\", token0Rule, token1Rule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(PancakeswapPairCreated)\n\t\t\t\tif err := _Pancakeswap.contract.UnpackLog(event, \"PairCreated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (_UniswapV2 *UniswapV2Filterer) WatchPairCreated(opts *bind.WatchOpts, sink chan<- *UniswapV2PairCreated, token0 []common.Address, token1 []common.Address) (event.Subscription, error) {\n\n\tvar token0Rule []interface{}\n\tfor _, token0Item := range token0 {\n\t\ttoken0Rule = append(token0Rule, token0Item)\n\t}\n\tvar token1Rule []interface{}\n\tfor _, token1Item := range token1 {\n\t\ttoken1Rule = append(token1Rule, token1Item)\n\t}\n\n\tlogs, sub, err := _UniswapV2.contract.WatchLogs(opts, \"PairCreated\", token0Rule, token1Rule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(UniswapV2PairCreated)\n\t\t\t\tif err := _UniswapV2.contract.UnpackLog(event, \"PairCreated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) ParseWalletCreated(log types.Log) (*ValidatorWalletCreatorWalletCreated, error) {\n\tevent := new(ValidatorWalletCreatorWalletCreated)\n\tif err := _ValidatorWalletCreator.contract.UnpackLog(event, \"WalletCreated\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func (_Users *usersEvents) WatchTemporaryCreated(opts *bind.WatchOpts, sink chan<- *UsersTemporaryCreated, proxy []common.Address, feePayer []common.Address, identityHash []common.Hash) (event.Subscription, error) {\n\n\tvar proxyRule []interface{}\n\tfor _, proxyItem := range proxy {\n\t\tproxyRule = append(proxyRule, proxyItem)\n\t}\n\tvar feePayerRule []interface{}\n\tfor _, feePayerItem := range feePayer {\n\t\tfeePayerRule = append(feePayerRule, feePayerItem)\n\t}\n\tvar identityHashRule []interface{}\n\tfor _, identityHashItem := range identityHash {\n\t\tidentityHashRule = append(identityHashRule, identityHashItem)\n\t}\n\n\tlogs, sub, err := _Users.contract.WatchLogs(opts, \"TemporaryCreated\", proxyRule, feePayerRule, identityHashRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevt := new(UsersTemporaryCreated)\n\t\t\t\tif err := _Users.contract.UnpackLog(evt, \"TemporaryCreated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevt.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- evt:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func walletCreateND(name string) error {\n\t_, err := nd.CreateWallet(name, store, keystorev4.New())\n\treturn err\n}",
"func (_RollupCreator *RollupCreatorFilterer) WatchRollupCreated(opts *bind.WatchOpts, sink chan<- *RollupCreatorRollupCreated) (event.Subscription, error) {\n\n\tlogs, sub, err := _RollupCreator.contract.WatchLogs(opts, \"RollupCreated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(RollupCreatorRollupCreated)\n\t\t\t\tif err := _RollupCreator.contract.UnpackLog(event, \"RollupCreated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func CreateWallet(ctx context.Context, g *libkb.GlobalContext) (created bool, err error) {\n\tdefer g.CTraceTimed(ctx, \"Stellar.CreateWallet\", func() error { return err })()\n\t// TODO: short-circuit if the user has a bundle already\n\tclearBundle, err := bundle.NewInitialBundle()\n\tif err != nil {\n\t\treturn created, err\n\t}\n\terr = remote.PostWithChainlink(ctx, g, clearBundle)\n\tswitch e := err.(type) {\n\tcase nil:\n\t\t// ok\n\tcase libkb.AppStatusError:\n\t\tswitch keybase1.StatusCode(e.Code) {\n\t\tcase keybase1.StatusCode_SCStellarWrongRevision:\n\t\t\t// Assume this happened because a bundle already existed.\n\t\t\t// And suppress the error.\n\t\t\tg.Log.CDebugf(ctx, \"suppressing error: %v\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\tdefault:\n\t\treturn false, err\n\t}\n\treturn true, err\n}",
"func (_SuperCoin *SuperCoinFilterer) WatchWhitelistedAddressAdded(opts *bind.WatchOpts, sink chan<- *SuperCoinWhitelistedAddressAdded) (event.Subscription, error) {\n\n\tlogs, sub, err := _SuperCoin.contract.WatchLogs(opts, \"WhitelistedAddressAdded\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(SuperCoinWhitelistedAddressAdded)\n\t\t\t\tif err := _SuperCoin.contract.UnpackLog(event, \"WhitelistedAddressAdded\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func lnCreateWallet(net string) error {\n\t// create wallet\n\tlog.Printf(\"Creating lightning wallet: %v\", net)\n\n\tdcrwalletExe := filepath.Join(destination,\n\t\t\"decred-\"+tuple+\"-\"+manifestDecredVersion, \"dcrlncli\")\n\targs := []string{\"create\"}\n\tswitch net {\n\tcase \"testnet\":\n\t\targs = append(args, \"--testnet\")\n\tcase \"simnet\":\n\t\targs = append(args, \"--simnet\")\n\t}\n\tcmd := exec.Command(dcrwalletExe, args...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}",
"func (_MultiSigWalletFactoryContract *MultiSigWalletFactoryContractFilterer) WatchContractInstantiation(opts *bind.WatchOpts, sink chan<- *MultiSigWalletFactoryContractContractInstantiation) (event.Subscription, error) {\n\n\tlogs, sub, err := _MultiSigWalletFactoryContract.contract.WatchLogs(opts, \"ContractInstantiation\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(MultiSigWalletFactoryContractContractInstantiation)\n\t\t\t\tif err := _MultiSigWalletFactoryContract.contract.UnpackLog(event, \"ContractInstantiation\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func CreateWallet(pubKey []byte) (*Wallet, error) {\n\tpublickHash, err := hashPublicKey(pubKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tversionedPayload := append(Version, publickHash...)\n\tchecksum := checksum(versionedPayload)\n\n\taddress := append(versionedPayload, checksum...)\n\n\treturn &Wallet{\n\t\tBase58Address: base58.Encode(address),\n\t\tAddress: address,\n\t}, nil\n}",
"func (s *State) InsertWallet(w Wallet, newWallet bool) (err error) {\n\twn := s.walletNode(w.ID)\n\tif wn != nil {\n\t\terr = errors.New(\"wallet of that id already exists in quorum\")\n\t\treturn\n\t}\n\n\twn = new(walletNode)\n\twn.id = w.ID\n\twn.weight = int(w.Sector.Atoms)\n\ts.insertWalletNode(wn)\n\n\tif w.KnownScripts == nil {\n\t\tw.KnownScripts = make(map[string]ScriptInputEvent)\n\t} else {\n\t\tfor _, scriptEvent := range w.KnownScripts {\n\t\t\ts.InsertEvent(&scriptEvent, newWallet)\n\t\t}\n\t}\n\n\tif w.Sector.ActiveUpdates == nil {\n\t\tw.Sector.ActiveUpdates = make([]SectorUpdate, 0)\n\t} else {\n\t\tfor _, update := range w.Sector.ActiveUpdates {\n\t\t\ts.InsertEvent(&update.Event, newWallet)\n\t\t}\n\t}\n\n\ts.SaveWallet(w)\n\treturn\n}",
"func CreateWallet(ctx context.Context, name string, store e2wtypes.Store, encryptor e2wtypes.Encryptor) (e2wtypes.Wallet, error) {\n\t// First, try to open the wallet.\n\t_, err := OpenWallet(ctx, name, store, encryptor)\n\tif err == nil || !strings.Contains(err.Error(), \"wallet not found\") {\n\t\treturn nil, fmt.Errorf(\"wallet %q already exists\", name)\n\t}\n\n\tid, err := uuid.NewRandom()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to generate UUID\")\n\t}\n\n\tw := newWallet()\n\tw.id = id\n\tw.name = name\n\tw.version = version\n\tw.store = store\n\tw.encryptor = encryptor\n\n\treturn w, w.storeWallet()\n}",
"func (s SkyNode) CreateWallet(name string, seed string, csrf string) (*Wallet, error) {\n\tform := url.Values{\n\t\t\"label\": {name},\n\t\t\"seed\": {seed},\n\t}\n\n\treq, err := http.NewRequest(\"POST\", (fmt.Sprintf(\"%s/wallet/create\", s.baseURL)), strings.NewReader(form.Encode()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\treq.Header.Set(\"X-CSRF-Token\", csrf)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tw := &Wallet{}\n\terr = json.NewDecoder(resp.Body).Decode(w)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn w, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ParseWalletCreated is a log parse operation binding the contract event 0xca0b7dde26052d34217ef1a0cee48085a07ca32da0a918609937a307d496bbf5. Solidity: event WalletCreated(address indexed walletAddress, address indexed userAddress, address adminProxy)
|
func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) ParseWalletCreated(log types.Log) (*ValidatorWalletCreatorWalletCreated, error) {
event := new(ValidatorWalletCreatorWalletCreated)
if err := _ValidatorWalletCreator.contract.UnpackLog(event, "WalletCreated", log); err != nil {
return nil, err
}
event.Raw = log
return event, nil
}
|
[
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) WatchWalletCreated(opts *bind.WatchOpts, sink chan<- *ValidatorWalletCreatorWalletCreated, walletAddress []common.Address, userAddress []common.Address) (event.Subscription, error) {\n\n\tvar walletAddressRule []interface{}\n\tfor _, walletAddressItem := range walletAddress {\n\t\twalletAddressRule = append(walletAddressRule, walletAddressItem)\n\t}\n\tvar userAddressRule []interface{}\n\tfor _, userAddressItem := range userAddress {\n\t\tuserAddressRule = append(userAddressRule, userAddressItem)\n\t}\n\n\tlogs, sub, err := _ValidatorWalletCreator.contract.WatchLogs(opts, \"WalletCreated\", walletAddressRule, userAddressRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(ValidatorWalletCreatorWalletCreated)\n\t\t\t\tif err := _ValidatorWalletCreator.contract.UnpackLog(event, \"WalletCreated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) FilterWalletCreated(opts *bind.FilterOpts, walletAddress []common.Address, userAddress []common.Address) (*ValidatorWalletCreatorWalletCreatedIterator, error) {\n\n\tvar walletAddressRule []interface{}\n\tfor _, walletAddressItem := range walletAddress {\n\t\twalletAddressRule = append(walletAddressRule, walletAddressItem)\n\t}\n\tvar userAddressRule []interface{}\n\tfor _, userAddressItem := range userAddress {\n\t\tuserAddressRule = append(userAddressRule, userAddressItem)\n\t}\n\n\tlogs, sub, err := _ValidatorWalletCreator.contract.FilterLogs(opts, \"WalletCreated\", walletAddressRule, userAddressRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ValidatorWalletCreatorWalletCreatedIterator{contract: _ValidatorWalletCreator.contract, event: \"WalletCreated\", logs: logs, sub: sub}, nil\n}",
"func walletCreate(gateway *daemon.Gateway) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlogger.Info(\"API request made to create a wallet\")\n\t\tseed := r.FormValue(\"seed\")\n\t\tlabel := r.FormValue(\"label\")\n\t\twltName := wallet.NewWalletFilename()\n\t\tvar wlt wallet.Wallet\n\t\tvar err error\n\t\t// the wallet name may dup, rename it till no conflict.\n\t\tfor {\n\t\t\twlt, err = Wg.CreateWallet(wltName, wallet.OptSeed(seed), wallet.OptLabel(label))\n\t\t\tif err != nil && strings.Contains(err.Error(), \"renaming\") {\n\t\t\t\twltName = wallet.NewWalletFilename()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\tif err := Wg.SaveWallet(wlt.GetID()); err != nil {\n\t\t\twh.Error400(w, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\trlt := wallet.NewReadableWallet(wlt)\n\t\twh.SendOr500(w, rlt)\n\t}\n}",
"func (_Pancakeswap *PancakeswapFilterer) ParsePairCreated(log types.Log) (*PancakeswapPairCreated, error) {\n\tevent := new(PancakeswapPairCreated)\n\tif err := _Pancakeswap.contract.UnpackLog(event, \"PairCreated\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func (_Testproxyfactory *TestproxyfactoryFilterer) ParseProxyCreated(log types.Log) (*TestproxyfactoryProxyCreated, error) {\n\tevent := new(TestproxyfactoryProxyCreated)\n\tif err := _Testproxyfactory.contract.UnpackLog(event, \"ProxyCreated\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func (_UniswapV2 *UniswapV2Filterer) ParsePairCreated(log types.Log) (*UniswapV2PairCreated, error) {\n\tevent := new(UniswapV2PairCreated)\n\tif err := _UniswapV2.contract.UnpackLog(event, \"PairCreated\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func CreateWallet(pubKey []byte) (*Wallet, error) {\n\tpublickHash, err := hashPublicKey(pubKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tversionedPayload := append(Version, publickHash...)\n\tchecksum := checksum(versionedPayload)\n\n\taddress := append(versionedPayload, checksum...)\n\n\treturn &Wallet{\n\t\tBase58Address: base58.Encode(address),\n\t\tAddress: address,\n\t}, nil\n}",
"func (e *Engine) CreateWallet(w *state.Wallet, childID state.WalletID, childBalance state.Balance, childScript []byte) (err error) {\n\t// Check that the wallet making the call has enough funds to deposit into the\n\t// wallet being created, and then subtract the funds from the parent wallet.\n\tif w.Balance.Compare(childBalance) < 0 {\n\t\terr = errInsufficientBalance\n\t\treturn\n\t}\n\tw.Balance.Subtract(childBalance)\n\n\t// Create a new wallet based on the inputs.\n\tchildWallet := state.Wallet{\n\t\tID: childID,\n\t\tBalance: childBalance,\n\t\tScript: childScript,\n\t}\n\n\t// Insert the child wallet.\n\terr = e.state.InsertWallet(childWallet, true)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}",
"func CreateWallet(ctx context.Context, g *libkb.GlobalContext) (created bool, err error) {\n\tdefer g.CTraceTimed(ctx, \"Stellar.CreateWallet\", func() error { return err })()\n\t// TODO: short-circuit if the user has a bundle already\n\tclearBundle, err := bundle.NewInitialBundle()\n\tif err != nil {\n\t\treturn created, err\n\t}\n\terr = remote.PostWithChainlink(ctx, g, clearBundle)\n\tswitch e := err.(type) {\n\tcase nil:\n\t\t// ok\n\tcase libkb.AppStatusError:\n\t\tswitch keybase1.StatusCode(e.Code) {\n\t\tcase keybase1.StatusCode_SCStellarWrongRevision:\n\t\t\t// Assume this happened because a bundle already existed.\n\t\t\t// And suppress the error.\n\t\t\tg.Log.CDebugf(ctx, \"suppressing error: %v\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\tdefault:\n\t\treturn false, err\n\t}\n\treturn true, err\n}",
"func walletCreateND(name string) error {\n\t_, err := nd.CreateWallet(name, store, keystorev4.New())\n\treturn err\n}",
"func (_ValidatorWalletCreator *ValidatorWalletCreatorFilterer) ParseTemplateUpdated(log types.Log) (*ValidatorWalletCreatorTemplateUpdated, error) {\n\tevent := new(ValidatorWalletCreatorTemplateUpdated)\n\tif err := _ValidatorWalletCreator.contract.UnpackLog(event, \"TemplateUpdated\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func CreateWallet(mnenomic string, password string) (*Wallet, error) {\n\n\tseed := pbkdf2.Key([]byte(mnenomic), []byte(\"mnemonic\"+password), 2048, 32, sha512.New)\n\tprivKey := ed25519.NewKeyFromSeed(seed)\n\tpubKey := privKey.Public().(ed25519.PublicKey)\n\tpubKeyBytes := []byte(pubKey)\n\tsignKp := keyPair{PrivKey: privKey, PubKey: pubKeyBytes}\n\n\taddress, err := generatePublicHash(pubKeyBytes)\n\tif err != nil {\n\t\treturn &Wallet{}, errors.Wrapf(err, \"could not create wallet\")\n\t}\n\n\twallet := Wallet{\n\t\tAddress: address,\n\t\tMnemonic: mnenomic,\n\t\tKp: signKp,\n\t\tSeed: seed,\n\t\tSk: b58cencode(privKey, edskprefix),\n\t\tPk: b58cencode(pubKeyBytes, edpkprefix),\n\t}\n\n\treturn &wallet, nil\n}",
"func (s SkyNode) CreateWallet(name string, seed string, csrf string) (*Wallet, error) {\n\tform := url.Values{\n\t\t\"label\": {name},\n\t\t\"seed\": {seed},\n\t}\n\n\treq, err := http.NewRequest(\"POST\", (fmt.Sprintf(\"%s/wallet/create\", s.baseURL)), strings.NewReader(form.Encode()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\treq.Header.Set(\"X-CSRF-Token\", csrf)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tw := &Wallet{}\n\terr = json.NewDecoder(resp.Body).Decode(w)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn w, nil\n}",
"func (wallet *WalletAPI) CreateWallet(\n\tfilename string,\n\tpassword string) error {\n\terr := wallet.check()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif filename == \"\" {\n\t\treturn errors.New(\"Filename of the wallet is required\")\n\t}\n\tif password == \"\" {\n\t\treturn errors.New(\"Password of the wallet is required\")\n\t}\n\tparams := make(map[string]interface{})\n\tparams[\"daemonHost\"] = wallet.DaemonURL\n\tparams[\"daemonPort\"] = wallet.DaemonPort\n\tparams[\"daemonSSL\"] = wallet.DaemonSSL\n\tparams[\"filename\"] = filename\n\tparams[\"password\"] = password\n\n\t_, err = wallet.makePostRequest(\"wallet/create\", params)\n\treturn err\n}",
"func (b *BTSE) CreateWalletAddress(ctx context.Context, currency string) (WalletAddress, error) {\n\tvar resp WalletAddress\n\treq := make(map[string]interface{}, 1)\n\treq[\"currency\"] = currency\n\terr := b.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, btseWalletAddress, true, nil, req, &resp, queryFunc)\n\tif err != nil {\n\t\terrResp := ErrorResponse{}\n\t\terrResponseStr := strings.Split(err.Error(), \"raw response: \")\n\t\terr := json.Unmarshal([]byte(errResponseStr[1]), &errResp)\n\t\tif err != nil {\n\t\t\treturn resp, err\n\t\t}\n\t\tif errResp.ErrorCode == 3528 {\n\t\t\twalletAddress := strings.Split(errResp.Message, \"BADREQUEST: \")\n\t\t\treturn WalletAddress{\n\t\t\t\t{\n\t\t\t\t\tAddress: walletAddress[1],\n\t\t\t\t},\n\t\t\t}, nil\n\t\t}\n\t\treturn resp, err\n\t}\n\n\treturn resp, nil\n}",
"func CreateWalletFromEthMnemonic(mnemonic, password string, statusCb WalletCallback) error {\n\tif len(_config.chain.Miners) < 1 || len(_config.chain.Sharders) < 1 {\n\t\treturn fmt.Errorf(\"SDK not initialized\")\n\t}\n\tgo func() {\n\t\tsigScheme := zcncrypto.NewSignatureScheme(_config.chain.SignatureScheme)\n\t\t_, err := sigScheme.GenerateKeysWithEth(mnemonic, password)\n\t\tif err != nil {\n\t\t\tstatusCb.OnWalletCreateComplete(StatusError, \"\", err.Error())\n\t\t\treturn\n\t\t}\n\t}()\n\treturn nil\n}",
"func lnCreateWallet(net string) error {\n\t// create wallet\n\tlog.Printf(\"Creating lightning wallet: %v\", net)\n\n\tdcrwalletExe := filepath.Join(destination,\n\t\t\"decred-\"+tuple+\"-\"+manifestDecredVersion, \"dcrlncli\")\n\targs := []string{\"create\"}\n\tswitch net {\n\tcase \"testnet\":\n\t\targs = append(args, \"--testnet\")\n\tcase \"simnet\":\n\t\targs = append(args, \"--simnet\")\n\t}\n\tcmd := exec.Command(dcrwalletExe, args...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}",
"func (_Rollup *RollupFilterer) ParseRollupCreated(log types.Log) (*RollupRollupCreated, error) {\n\tevent := new(RollupRollupCreated)\n\tif err := _Rollup.contract.UnpackLog(event, \"RollupCreated\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func (_MultiSigWalletFactoryContract *MultiSigWalletFactoryContractTransactor) Create(opts *bind.TransactOpts, _owners []common.Address, _required *big.Int) (*types.Transaction, error) {\n\treturn _MultiSigWalletFactoryContract.contract.Transact(opts, \"create\", _owners, _required)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
BuildSearchQuery : Builds search query for the Identify endpoint on the copernicus MapServer
|
func BuildSearchQuery(coords Coordinates) string {
geometry := coords.GetBoundsInMeters()
json, err := json.Marshal(geometry)
if err != nil {
log.Fatal(err)
}
geometryString := string(json)
identifySearchQuery := IdentifySearchQuery{
Geometry: geometryString,
GeometryType: "esriGeometryEnvelope",
Tolerance: 1,
MapExtent: []float64{geometry.XMin, geometry.YMin, geometry.XMax, geometry.YMax},
ReturnGeometry: false,
ImageDisplay: []int{10, 10},
Format: "pjson",
}
v, _ := query.Values(identifySearchQuery)
return v.Encode()
}
|
[
"func (sc *SearchCriteria) BuildQuery(tableName string) string {\n\twhere := \"\"\n\tif len(sc.conditions) > 0 {\n\t\twhere = \" WHERE \" + strings.Join(sc.conditions, \" AND \")\n\t}\n\n\treturn \"SELECT id, source, category, level, message, trace, payload, created_at FROM \" + tableName + where + \" ORDER BY created_at DESC LIMIT ?\"\n}",
"func buildHostSearchQuery(tx *gorm.DB, criteria *models.HostFilterCriteria) *gorm.DB {\n\tdefaultLog.Trace(\"postgres/host_store:buildHostSearchQuery() Entering\")\n\tdefer defaultLog.Trace(\"postgres/host_store:buildHostSearchQuery() Leaving\")\n\n\tif tx == nil {\n\t\treturn nil\n\t}\n\n\ttx = tx.Model(&host{})\n\n\tif criteria == nil || reflect.DeepEqual(*criteria, models.HostFilterCriteria{}) {\n\t\ttx = tx.Order(\"name asc\")\n\t\treturn tx\n\t}\n\n\tif criteria.Id != uuid.Nil {\n\t\ttx = tx.Where(\"id = ?\", criteria.Id)\n\t} else if criteria.NameEqualTo != \"\" {\n\t\ttx = tx.Where(\"name = ?\", criteria.NameEqualTo)\n\t} else if criteria.NameContains != \"\" {\n\t\ttx = tx.Where(\"name like ? \", \"%\"+criteria.NameContains+\"%\")\n\t} else if criteria.HostHardwareId != uuid.Nil {\n\t\ttx = tx.Where(\"hardware_uuid = ?\", criteria.HostHardwareId)\n\t} else if criteria.IdList != nil {\n\t\ttx = tx.Where(\"id IN (?)\", criteria.IdList)\n\t} else if criteria.Trusted != nil {\n\t\ttx = tx.Joins(\"join report on report.host_id = host.id AND report.trusted = ?\", criteria.Trusted)\n\t}\n\n\tif criteria.OrderBy == models.Descending {\n\t\ttx = tx.Order(\"name desc\")\n\t} else {\n\t\ttx = tx.Order(\"name asc\")\n\t}\n\treturn tx\n}",
"func PrepareClientSearchQuery(req *restful.Request) (*Query, error) {\n\tquery := Query{\n\t\tPageNumber: 1,\n\t\tPageSize: 20,\n\t\tSortOrder: -1,\n\t\tStatus: common.Active,\n\t}\n\n\tval := req.QueryParameter(\"pageNumber\")\n\tif val != \"\" {\n\t\ti, err := strconv.Atoi(val)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"error occurred during conversion: error: %v\\n\", err)\n\t\t\treturn nil, errors.CreateError(400, \"invalid_data\")\n\t\t}\n\n\t\tquery.PageNumber = i\n\t}\n\n\tval = req.QueryParameter(\"pageSize\")\n\tif val != \"\" {\n\t\ti, err := strconv.Atoi(val)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"error occurred during conversion: error: %v\\n\", err)\n\t\t\treturn nil, errors.CreateError(400, \"invalid_data\")\n\t\t}\n\n\t\tquery.PageSize = i\n\t}\n\n\tquery.SortBy = req.QueryParameter(\"sortBy\")\n\tquery.Status = req.QueryParameter(\"status\")\n\tquery.Keyword = req.QueryParameter(\"keyword\")\n\tval = req.QueryParameter(\"sortOrder\")\n\tif val != \"\" {\n\t\tif val == \"asc\" {\n\t\t\tquery.SortOrder = 1\n\t\t}\n\t}\n\n\treturn &query, nil\n}",
"func SearchHandler(c *gin.Context) {\n\tval := \"\"\n\tpage := 1\n\tlimit := 10\n\toffset := 0\n\n\t// build up the final query in this var\n\tq := bson.M{}\n\n\tif val = c.Query(\"name\"); val != \"\" {\n\t\tq[\"name\"] = val\n\t}\n\n\tif val = c.Query(\"zones\"); val != \"\" {\n\t\tq[\"zones\"] = bson.M{\"$in\": strings.Split(val, \",\")}\n\t}\n\n\tif val = c.Query(\"lines\"); val != \"\" {\n\t\tq[\"lines\"] = bson.M{\"$in\": strings.Split(val, \",\")}\n\t}\n\n\t// geospatial search\n\tif val = c.Query(\"near\"); val != \"\" {\n\t\tcoords := strings.Split(val, \",\")\n\t\tlon, lonErr := strconv.ParseFloat(coords[0], 64)\n\t\tlat, latErr := strconv.ParseFloat(coords[1], 64)\n\t\tif latErr == nil && lonErr == nil {\n\t\t\tgeoPoint := bson.M{\"type\": \"Point\", \"coordinates\": []float64{lon, lat}}\n\t\t\tq[\"location\"] = bson.M{\"$near\": bson.M{\"$geometry\": geoPoint}}\n\t\t}\n\t}\n\n\tif val = c.Query(\"page\"); val != \"\" {\n\t\tpage, _ = strconv.Atoi(val)\n\t}\n\n\tcount, err := Count(q) // total number of documents returned\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar previousPage string\n\tvar nextPage string\n\ttotalPages := math.Ceil(float64(count) / float64(limit))\n\n\tif page > 1 {\n\t\tqs := c.Request.URL.Query()\n\t\tqs.Set(\"page\", strconv.Itoa(page-1))\n\t\tpreviousPage = fmt.Sprintf(\"/station/search?%s\", qs.Encode())\n\t}\n\n\tif (float64(page + 1)) <= totalPages {\n\t\tqs := c.Request.URL.Query()\n\t\tqs.Set(\"page\", strconv.Itoa(page+1))\n\t\tnextPage = fmt.Sprintf(\"/stations/search?%s\", qs.Encode())\n\t}\n\n\toffset = (page * limit) - limit\n\tstations, err := FindMany(q, limit, offset)\n\tresponse := &ListResponse{count, nextPage, previousPage, stations}\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tc.JSON(200, response)\n}",
"func (p *Provider) BuildURI(q string) string {\n\treturn fmt.Sprintf(\"https://500px.com/search?q=%s&type=photos\", url.QueryEscape(q))\n}",
"func (r *AddOnsListServerRequest) Search() string {\n\tif r != nil && r.search != nil {\n\t\treturn *r.search\n\t}\n\treturn \"\"\n}",
"func (trello Trello) buildQuery(endpoint string) (trelloApi *url.URL) {\n\ttrelloApi, _ = url.Parse(trello.Domain)\n\ttrelloApi.Path = endpoint\n\tvar q = trelloApi.Query()\n\tq.Add(\"key\", trello.AppKey)\n\tq.Add(\"token\", trello.ApiToken)\n\ttrelloApi.RawQuery = q.Encode()\n\n\treturn\n}",
"func buildSearchInput(qs url.Values) (*hub.SearchPackageInput, error) {\n\t// Limit\n\tvar limit int\n\tif qs.Get(\"limit\") != \"\" {\n\t\tvar err error\n\t\tlimit, err = strconv.Atoi(qs.Get(\"limit\"))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid limit: %s\", qs.Get(\"limit\"))\n\t\t}\n\t} else {\n\t\tlimit = searchDefaultLimit\n\t}\n\n\t// Offset\n\tvar offset int\n\tif qs.Get(\"offset\") != \"\" {\n\t\tvar err error\n\t\toffset, err = strconv.Atoi(qs.Get(\"offset\"))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid offset: %s\", qs.Get(\"offset\"))\n\t\t}\n\t}\n\n\t// Facets\n\tvar facets bool\n\tif qs.Get(\"facets\") != \"\" {\n\t\tvar err error\n\t\tfacets, err = strconv.ParseBool(qs.Get(\"facets\"))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid facets: %s\", qs.Get(\"facets\"))\n\t\t}\n\t}\n\n\t// Kinds\n\tkinds := make([]hub.RepositoryKind, 0, len(qs[\"kind\"]))\n\tfor _, kindStr := range qs[\"kind\"] {\n\t\tkind, err := strconv.Atoi(kindStr)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid kind: %s\", kindStr)\n\t\t}\n\t\tkinds = append(kinds, hub.RepositoryKind(kind))\n\t}\n\n\t// Categories\n\tcategories := make([]hub.PackageCategory, 0, len(qs[\"category\"]))\n\tfor _, categoryStr := range qs[\"category\"] {\n\t\tcategory, err := strconv.Atoi(categoryStr)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid category: %s\", categoryStr)\n\t\t}\n\t\tcategories = append(categories, hub.PackageCategory(category))\n\t}\n\n\t// Only display content from verified publishers\n\tvar verifiedPublisher bool\n\tif qs.Get(\"verified_publisher\") != \"\" {\n\t\tvar err error\n\t\tverifiedPublisher, err = strconv.ParseBool(qs.Get(\"verified_publisher\"))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid verified publisher: %s\", qs.Get(\"verified_publisher\"))\n\t\t}\n\t}\n\n\t// Only display official packages\n\tvar official bool\n\tif qs.Get(\"official\") != \"\" {\n\t\tvar err error\n\t\tofficial, err = strconv.ParseBool(qs.Get(\"official\"))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid official: %s\", qs.Get(\"official\"))\n\t\t}\n\t}\n\n\t// Only display packages published by CNCF projects\n\tvar cncf bool\n\tif qs.Get(\"cncf\") != \"\" {\n\t\tvar err error\n\t\tcncf, err = strconv.ParseBool(qs.Get(\"cncf\"))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid cncf: %s\", qs.Get(\"cncf\"))\n\t\t}\n\t}\n\n\t// Only display operators\n\tvar operators bool\n\tif qs.Get(\"operators\") != \"\" {\n\t\tvar err error\n\t\toperators, err = strconv.ParseBool(qs.Get(\"operators\"))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid operators: %s\", qs.Get(\"operators\"))\n\t\t}\n\t}\n\n\t// Include deprecated packages\n\tvar deprecated bool\n\tif qs.Get(\"deprecated\") != \"\" {\n\t\tvar err error\n\t\tdeprecated, err = strconv.ParseBool(qs.Get(\"deprecated\"))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid deprecated: %s\", qs.Get(\"deprecated\"))\n\t\t}\n\t}\n\n\treturn &hub.SearchPackageInput{\n\t\tLimit: limit,\n\t\tOffset: offset,\n\t\tFacets: facets,\n\t\tTSQueryWeb: qs.Get(\"ts_query_web\"),\n\t\tTSQuery: qs.Get(\"ts_query\"),\n\t\tUsers: qs[\"user\"],\n\t\tOrgs: qs[\"org\"],\n\t\tRepositories: qs[\"repo\"],\n\t\tRepositoryKinds: kinds,\n\t\tCategories: categories,\n\t\tVerifiedPublisher: verifiedPublisher,\n\t\tOfficial: official,\n\t\tCNCF: cncf,\n\t\tOperators: operators,\n\t\tDeprecated: deprecated,\n\t\tLicenses: qs[\"license\"],\n\t\tCapabilities: qs[\"capabilities\"],\n\t\tSort: qs.Get(\"sort\"),\n\t}, nil\n}",
"func (q *CommentsQuery) Search() map[string][]Comment {\n\tsearchParams, _ := json.Marshal(q)\n\n\tv := url.Values{}\n\tv.Set(\"params\", string(searchParams))\n\tv.Set(\"output\", \"json\")\n\n\tresp, _ := http.PostForm(q.Conduit.Host+\"/api/differential.getrevisioncomments\", v)\n\n\tresult := struct {\n\t\tResults map[string][]Comment `json:\"result\"`\n\t}{}\n\n\tjson.NewDecoder(resp.Body).Decode(&result)\n\n\treturn result.Results\n}",
"func (p *SearchProxyServiceClient) Search(query *SearchQuery, provider string) (r *SearchResult_, err error) {\n if err = p.sendSearch(query, provider); err != nil { return }\n return p.recvSearch()\n}",
"func (p *Provider) BuildURI(q string) string {\n\treturn fmt.Sprintf(\"https://en.wikipedia.org/wiki/Special:Search?search=%s\", url.QueryEscape(q))\n}",
"func rawSearchQuery(r *http.Request) string {\n\treturn strings.TrimSpace(r.FormValue(\"q\"))\n}",
"func (q *DataQuery) Search(query string) *DataQuery {\n\tq.params.Add(\"search\", query)\n\treturn q\n}",
"func SearchQueryToCQL(query string) string {\n\tif query == \"\" {\n\t\treturn \"SELECT snapshot FROM latest_snapshots_by_tenant WHERE tenant = ? LIMIT 100\"\n\t}\n\tqueries := strings.Split(query, \",\")\n\tfor i, q := range queries {\n\t\tqueries[i] = strings.Trim(q, \" \")\n\t\tif isNum(queries[i][0]) { // is a sernum\n\t\t\tqueries[i] = fmt.Sprintf(\"serial_string LIKE '%%%v%%'\", queries[i])\n\t\t} else { // is a company name\n\t\t\tqueries[i] = fmt.Sprintf(\"company_name LIKE '%%%v%%'\", strings.ToLower(queries[i]))\n\t\t}\n\t}\n\taddend := strings.Join(queries, \" AND \")\n\treturn fmt.Sprintf(\"SELECT snapshot FROM latest_snapshots_by_tenant WHERE tenant = ? AND %v LIMIT 100 ALLOW FILTERING\", addend)\n}",
"func (st *Storage) BuildSearchIndicies() (err error) {\n\n\tconst recordIndexQuery = `\n\t\tINSERT INTO vrecord \n\t\t\tSELECT record.id, record.title, record.abstract, rs.subjects, ra.authors\n\t\t\tFROM record, \n\t\t\t\t(SELECT creator.record_id AS record_id,\n\t\t\t\t\tGROUP_CONCAT(creator.first_name || ' ' || creator.last_name, ' ') AS authors\n\t\t\t\t\tFROM creator GROUP BY creator.record_id) AS ra,\n\t\t\t\t(SELECT rsl.record_id, GROUP_CONCAT(subject.keyword, ' ') AS subjects\n\t\t\t\t\tFROM record_subject_link AS rsl, subject\n\t\t\t\t\tWHERE rsl.subject_id=subject.id\n\t\t\t\t\tGROUP BY rsl.record_id) AS rs\n\t\t\tWHERE ra.record_id=record.id AND rs.record_id=record.id`\n\n\tconst expertIndexQuery = `\n\t\tINSERT INTO vexpert \n\t\t\tSELECT e.id, e.first_name || ' ' || e.last_name AS full_name, et.titles, es.subjects\n\t\t\tFROM expert AS e,\n\t\t\t\t(SELECT creator.expert_id, GROUP_CONCAT(record.title,' ') AS titles\n\t\t\t\t\tFROM creator, record\n\t\t\t\t\tWHERE creator.record_id=record.id\n\t\t\t\t\tGROUP BY creator.expert_id) AS et,\n\t\t\t\t(SELECT esl.expert_id, GROUP_CONCAT(subject.keyword, ' ') AS subjects\n\t\t\t\t\tFROM expert_subject_link AS esl, subject\n\t\t\t\t\tWHERE esl.subject_id=subject.id\n\t\t\t\t\tGROUP BY esl.expert_id) AS es\n\t\t\tWHERE e.id=et.expert_id\n\t\t\t\tAND e.id=es.expert_id`\n\n\ttx, err := st.db.Begin()\n\n\tif err != nil {\n\t\tlog.Printf(\"Database error. Could not initialize transaction for building record and expert search index. %s\", err)\n\t\treturn\n\t}\n\n\ttx.Exec(\"DELETE from vrecord\")\n\ttx.Exec(recordIndexQuery)\n\ttx.Exec(\"DELETE from vexpert\")\n\ttx.Exec(expertIndexQuery)\n\n\terr = tx.Commit()\n\n\tif err != nil {\n\t\tlog.Printf(\"Database error. Could not commit transaction for for building record and expert search index. %s\", err)\n\t\treturn\n\t}\n\n\treturn\n}",
"func Build(q *Query) (query string, params []interface{}) {\n\tquery += q.padSpace(q.buildQuery())\n\tif q.typ == queryTypeRawQuery || q.typ == queryTypeExists {\n\t\treturn q.trim(query), q.params\n\t}\n\tquery += q.padSpace(q.buildAs())\n\tquery += q.padSpace(q.buildDuplicate())\n\tquery += q.padSpace(q.buildUnion())\n\tquery += q.padSpace(q.buildJoin())\n\tquery += q.padSpace(q.buildWhere())\n\tquery += q.padSpace(q.buildHaving())\n\tquery += q.padSpace(q.buildOrderBy())\n\tquery += q.padSpace(q.buildGroupBy())\n\tquery += q.padSpace(q.buildLimit())\n\tquery += q.padSpace(q.buildOffset())\n\tquery += q.padSpace(q.buildAfterQueryOptions())\n\treturn q.trim(query), q.params\n}",
"func (s *SearchParameters) queryString() string {\n\tqs := \"\"\n\tseparator := \"\"\n\n\tfor k, v := range s.parameters {\n\t\tif v == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tqs = qs + fmt.Sprintf(\"%s%s=%s\", separator, k, v)\n\t\tseparator = \"&\"\n\t}\n\treturn qs\n}",
"func (s *Server) Search(ctx context.Context, in *pb.SearchRequest) (out *pb.SearchReply, err error) {\n\tout = &pb.SearchReply{}\n\tquery := make(map[string]interface{})\n\tquery[\"name\"] = in.Name\n\tquery[\"country\"] = in.Country\n\n\tvar vasps []pb.VASP\n\tif vasps, err = s.db.Search(query); err != nil {\n\t\tout.Error = &pb.Error{\n\t\t\tCode: 400,\n\t\t\tMessage: err.Error(),\n\t\t}\n\t}\n\n\tout.Vasps = make([]*pb.VASP, len(vasps))\n\tfor i := 0; i < len(vasps); i++ {\n\t\t// avoid pointer errors from range\n\t\tout.Vasps[i] = &vasps[i]\n\n\t\t// return only entities, remove certificate info until lookup\n\t\tout.Vasps[i].VaspTRISACertification = nil\n\t}\n\n\tentry := log.With().\n\t\tStrs(\"name\", in.Name).\n\t\tStrs(\"country\", in.Country).\n\t\tInt(\"results\", len(out.Vasps)).\n\t\tLogger()\n\n\tif out.Error != nil {\n\t\tentry.Warn().Err(out.Error).Msg(\"unsuccessful search\")\n\t} else {\n\t\tentry.Info().Msg(\"search succeeded\")\n\t}\n\treturn out, nil\n}",
"func parseSearchQuery(w http.ResponseWriter, r *http.Request) (*search_query.Search, bool) {\n\tq := search_query.Search{Limit: 50}\n\tif err := search_query.ParseSearch(r, &q); err != nil {\n\t\thttputils.ReportError(w, err, \"Search for digests failed.\", http.StatusInternalServerError)\n\t\treturn nil, false\n\t}\n\treturn &q, true\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Identify : Fetches the land cover information for the coordinate
|
func (f Fetcher) Identify(coords Coordinates) (IdentifyResult, error) {
searchQuery := BuildSearchQuery(coords)
url := fmt.Sprintf("https://copernicus.discomap.eea.europa.eu/arcgis/rest/services/Corine/CLC2012_WM/MapServer/identify?%s", searchQuery)
spaceClient := http.Client{}
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
log.Fatal(err)
}
res, getErr := spaceClient.Do(req)
if getErr != nil {
log.Fatal(getErr)
}
body, readErr := ioutil.ReadAll(res.Body)
if readErr != nil {
log.Fatal(readErr)
}
result := IdentifyResult{}
jsonErr := json.Unmarshal(body, &result)
return result, jsonErr
}
|
[
"func (h *Handler) FetchRegionHouseInfo(c echo.Context) (err error) {\n\t// Retrieve house info from database\n\tneLat, _ := strconv.ParseFloat(c.QueryParam(\"ne_lat\"), 64)\n\tswLat, _ := strconv.ParseFloat(c.QueryParam(\"sw_lat\"), 64)\n\tneLng, _ := strconv.ParseFloat(c.QueryParam(\"ne_lng\"), 64)\n\tswLng, _ := strconv.ParseFloat(c.QueryParam(\"sw_lng\"), 64)\n\tcount, _ := strconv.Atoi(c.QueryParam(\"count\"))\n\n\tquery :=\n\t\t`SELECT *\n\t\tFROM (\n\t\t\tSELECT h_id, latitude, longitude\n\t\t\tFROM house \n\t\t\tWHERE latitude < &var1 and latitude > &var2 and longitude < &var3 and longitude > &var4 \n\t\t\tORDER BY DBMS_RANDOM.VALUE) \n\t\tWHERE ROWNUM <= &var5`\n\n\trows, err := h.db.Query(query, neLat, swLat, neLng, swLng, count)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\ttype HouseC struct {\n\t\tHID string `json:\"h_id\"`\n\t\tLatitude float32 `json:\"latitude\"`\n\t\tLongitude float32 `json:\"longitude\"`\n\t}\n\n\tvar houses []*HouseC\n\n\tfor rows.Next() {\n\t\thouseC := new(HouseC)\n\t\terr = rows.Scan(&houseC.HID, &houseC.Latitude, &houseC.Longitude)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\thouses = append(houses, houseC)\n\t}\n\tif err = rows.Err(); err != nil {\n\t\treturn err\n\t}\n\n\treturn c.JSON(http.StatusOK, houses)\n}",
"func (p *parser) GetDetails(a *goquery.Selection) *model.Club {\n\td := detail{area: a, cleanImg: p.cleanImg}\n\td.setID()\n\td.setName()\n\td.setImage()\n\td.setInformation()\n\td.setMember()\n\td.setPicture()\n\td.setCategory()\n\td.setDate()\n\td.setDetail()\n\td.setType()\n\treturn &d.data\n}",
"func (in *ImageRef) ExtractArea(left int, top int, width int, height int, options ...*Option) error {\n\tout, err := ExtractArea(in.image, left, top, width, height, options...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tin.SetImage(out)\n\treturn nil\n}",
"func (detector CountryDetector) getInfo(countryInfo locations.Place, code string) PlaceInfo {\n\tplace := PlaceInfo{\n\t\tName: countryInfo.Name,\n\t\tCode: code,\n\t\tLongitude: countryInfo.Longitude,\n\t\tLatitude: countryInfo.Latitude,\n\t}\n\treturn place\n}",
"func (api *APIStub) Identify(w http.ResponseWriter, r *http.Request) {\n\tauthToken := r.Header.Get(authHeaderKey)\n\txFlorenceToken := r.Header.Get(userAuthHeaderKey)\n\tlog.Info(\"headers\", log.Data{\"auth_token\": authToken, \"florence_token\": xFlorenceToken})\n\n\tfor _, identity := range api.scenarios.Identities {\n\t\tif identity.AuthorizationToken == authToken && identity.XFlorenceToken == xFlorenceToken {\n\t\t\tlog.Info(\"identity profile match\", log.Data{\n\t\t\t\t\"scenario\": identity.Scenario,\n\t\t\t\t\"status\": identity.Status,\n\t\t\t})\n\t\t\twriteResponse(\n\t\t\t\tResponse{\n\t\t\t\t\tIdentifier: identity.Identifier,\n\t\t\t\t\tMessage: identity.Message,\n\t\t\t\t\tStatus: identity.Status,\n\t\t\t\t}, w)\n\t\t\treturn\n\t\t}\n\t}\n\n\tlog.Info(\"no matching identity profile found, returning default not authenticated response\", nil)\n\twriteResponse(unauthorizedError, w)\n}",
"func captureEverquest(bounds image.Rectangle) (img image.Image, err error) {\n\tx, y, width, height, err := getEqClientArea()\n\tif err != nil {\n\t\treturn\n\t}\n\terr = raiseEverquest()\n\tif err != nil {\n\t\treturn\n\t}\n\tif width > bounds.Dx() {\n\t\twidth = bounds.Dx()\n\t}\n\tif height > bounds.Dy() {\n\t\theight = bounds.Dy()\n\t}\n\timg, err = captureImage(x+bounds.Min.X, y+bounds.Min.Y, width, height)\n\treturn\n}",
"func GetBasicInfo(img Image) (info BasicInfo) {\r\n\twidth, err := img.ReadTagValue(\"SOF0\", SOF0ImageWidth)\r\n\tif err == nil {\r\n\t\tinfo.Width = width\r\n\t} else {\r\n\t\tfmt.Println(err.Error())\r\n\t}\r\n\theight, err := img.ReadTagValue(\"SOF0\", SOF0ImageHeight)\r\n\tif err == nil {\r\n\t\tinfo.Height = height.(uint32)\r\n\t} else {\r\n\t\tfmt.Println(err.Error())\r\n\t}\r\n\tkeyword, err := img.ReadTagValue(\"IPTC\", IptcTagApplication2Keywords)\r\n\tif err == nil {\r\n\t\tinfo.Keywords = []string{keyword.(string)}\r\n\t}\r\n\tdatetime, err := img.ReadTagValue(\"EXIF\", ExifTagDateTimeOriginal)\r\n\tif err == nil {\r\n\t\tfmt.Printf(\"datetime:%v\\n\", datetime)\r\n\t}\r\n\t//height, err := img.ReadTagValue(\"IPTC\", IptcTagApplication2Keywords)\r\n\t//if err == nil {\r\n\t//\tinfo.Height = height.(float64)\r\n\t//} else {\r\n\t//\tfmt.Println(err.Error())\r\n\t//}\r\n\treturn\r\n}",
"func (c Client) LocationDetails(ctx context.Context,\n\tentityID int64, entityType EntityType) (resp LocationDetailsResp, err error) {\n\tif c.Auth == nil {\n\t\treturn resp, ErrNoAuth\n\t}\n\n\terr = c.Do(c.Auth(WithCtx(ctx, LocationDetailsReq{\n\t\tEntityID: entityID,\n\t\tEntityType: entityType,\n\t})), &resp)\n\treturn resp, errors.Wrap(err, \"Client.Do failed\")\n}",
"func censusApi(latitude, longitude float64) (outer, int, error) {\n\turl := \"https://geo.fcc.gov/api/census/area?lat=\" + FloatToString(latitude) + \"0&lon=\" + FloatToString(longitude) + \"&format=json\"\n\tresponse, err := httpClient.Get(url)\n\tif err != nil {\n\t\tfmt.Print(err.Error())\n\t}\n\tdefer response.Body.Close()\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\tfmt.Print(err.Error())\n\t}\n\tvar census outer\n\terr = json.Unmarshal(body, &census)\n\tif err != nil {\n\t\tfmt.Print(err.Error())\n\t}\n\tfmt.Printf(\"%+v\\n\", census)\n\treturn census, response.StatusCode, err\n}",
"func DetailedCanteenInformation(c echo.Context) error {\n\n\tdb, ok := c.Get(\"db\").(*gorm.DB)\n\n\tif !ok {\n\t\treturn c.NoContent(http.StatusInternalServerError)\n\t}\n\n\t_schoolID, _ := strconv.Atoi(c.Param(\"id\"))\n\n\t_canteenID, _ := strconv.Atoi(c.Param(\"id2\"))\n\n\t_canteen := canteen.Canteen{}\n\n\t_canteen.SchoolID = uint(_schoolID)\n\n\t_canteen.ID = uint(_canteenID)\n\n\terr := db.Where(&_canteen).First(&_canteen).Error\n\n\tif err != nil {\n\t\treturn c.NoContent(http.StatusNotFound)\n\t}\n\n\t_location := canteen.Location{}\n\n\t_location.CanteenID = _canteen.ID\n\n\tdb.Where(&_location).First(&_location)\n\n\t_canteen.Location = _location\n\n\tmodelview := view.ToGetDetailedCanteenInformationModelView(_canteen)\n\n\treturn c.JSON(http.StatusOK, modelview)\n\n}",
"func getBranchDetails(branch string, city string, limit int, offset int) []*Branch {\n\t//br := &Branch{}\n\tbr := make([]*Branch, 0)\n\n\tif limit > 0 {\n\t\tGetDB().Table(\"bank_branches\").Where(\"branch = ? AND city = ?\", branch, city).Limit(limit).Offset(offset).Find(&br)\n\t} else {\n\t\tGetDB().Table(\"bank_branches\").Where(\"branch = ? AND city = ?\", branch, city).Find(&br)\n\t}\n\t//GetDB().Table(\"banks\").Where(\"id = ?\", br.BankID).First(bk)\n\t//fmt.Println(\"br:\", br)\n\n\treturn br\n}",
"func (o *Object) Locate() [16]float32 {\n\treturn o.location\n}",
"func (c *Cafe) Detail(r *http.Request, args *CafeEmptyArgs, reply *CafeDetailReply) error {\n\treply.Message = \"Cafe Detail\"\n\treturn nil\n}",
"func (e *Identity) Region() (string, error) { return fetchRegion() }",
"func (m topoMap) getContour(x, y int) *contour {\n\treturn m.sets[x*m.stride+y].Find().Aux.(*contour)\n}",
"func GetSpecificNatricon(badgeType spc.BadgeType, outline bool, outlineColor *color.RGB, bodyColor *color.RGB, hairColor *color.RGB, bodyAsset int, hairAsset int, mouthAsset int, eyeAsset int) Accessories {\n\tvar accessories = Accessories{}\n\n\t// Set colors\n\taccessories.BodyColor = *bodyColor\n\taccessories.HairColor = *hairColor\n\n\t// Assets\n\taccessories.BodyAsset = GetBodyAssetWithID(bodyAsset)\n\taccessories.HairAsset = GetHairAssetWithID(hairAsset)\n\taccessories.BackHairAsset = GetBackHairAsset(accessories.HairAsset)\n\n\t// Get badge\n\tif badgeType != \"\" && badgeType != spc.BTNone {\n\t\taccessories.BadgeAsset = GetBadgeAsset(accessories.BodyAsset, badgeType)\n\t}\n\n\t// Eyes and mouth\n\taccessories.MouthAsset = GetMouthAssetWithID(mouthAsset)\n\taccessories.EyeAsset = GetEyeAssetWithID(eyeAsset)\n\n\t// Get outlines\n\tif outline {\n\t\taccessories.BodyOutlineAsset = GetBodyOutlineAsset(accessories.BodyAsset)\n\t\taccessories.HairOutlineAsset = GetHairOutlineAsset(accessories.HairAsset)\n\t\taccessories.MouthOutlineAsset = GetMouthOutlineAsset(accessories.MouthAsset)\n\t\tif outlineColor != nil {\n\t\t\taccessories.OutlineColor = *outlineColor\n\t\t} else {\n\t\t\taccessories.OutlineColor = color.RGB{R: 0, G: 0, B: 0}\n\t\t}\n\t}\n\n\treturn accessories\n}",
"func (o *CaptivePortalProfile) Fetch() *bambou.Error {\n\n\treturn bambou.CurrentSession().FetchEntity(o)\n}",
"func (s *SliceStackResource) Identify() (string, uint32) {\n\treturn s.ModelPath, s.ID\n}",
"func CalPrimaryCoilInfo(D, N, W, S string) [4]string {\n\tvar output [4]string //用于存放输出结果\n\tH := FormHeight(N, S, W)\n\toutput[0] = H\n\toutput[1] = WireLong(N, D)\n\toutput[2] = CalcInductance(N, D, H)\n\toutput[3] = CalcCap(H, D)\n\n\treturn output\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
convert2PhysicalPlan implements the LogicalPlan convert2PhysicalPlan interface. If there is no index that matches the required property, the returned physicalPlanInfo will be table scan and has the cost of MaxInt64. But this can be ignored because the parent will call convert2PhysicalPlan again with an empty requiredProperty, so the plan with the lowest cost will be chosen.
|
func (p *DataSource) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {
info, err := p.getPlanInfo(prop)
if err != nil {
return nil, errors.Trace(err)
}
if info != nil {
return info, nil
}
info, err = p.tryToConvert2DummyScan(prop)
if info != nil || err != nil {
return info, errors.Trace(err)
}
client := p.ctx.GetClient()
memDB := infoschema.IsMemoryDB(p.DBName.L)
isDistReq := !memDB && client != nil && client.SupportRequestType(kv.ReqTypeSelect, 0)
if !isDistReq {
memTable := PhysicalMemTable{
DBName: p.DBName,
Table: p.tableInfo,
Columns: p.Columns,
TableAsName: p.TableAsName,
}.init(p.allocator, p.ctx)
memTable.SetSchema(p.schema)
rb := &ranger.Builder{Sc: p.ctx.GetSessionVars().StmtCtx}
memTable.Ranges = rb.BuildTableRanges(ranger.FullRange)
info = &physicalPlanInfo{p: memTable}
info = enforceProperty(prop, info)
p.storePlanInfo(prop, info)
return info, nil
}
indices, includeTableScan := availableIndices(p.indexHints, p.tableInfo)
if includeTableScan {
info, err = p.convert2TableScan(prop)
if err != nil {
return nil, errors.Trace(err)
}
}
if !includeTableScan || p.need2ConsiderIndex(prop) {
for _, index := range indices {
indexInfo, err := p.convert2IndexScan(prop, index)
if err != nil {
return nil, errors.Trace(err)
}
if info == nil || indexInfo.cost < info.cost {
info = indexInfo
}
}
}
return info, errors.Trace(p.storePlanInfo(prop, info))
}
|
[
"func (p *LogicalAggregation) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tplanInfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif planInfo != nil {\n\t\treturn planInfo, nil\n\t}\n\tlimit := prop.limit\n\tif len(prop.props) == 0 {\n\t\tplanInfo, err = p.convert2PhysicalPlanHash()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tstreamInfo, err := p.convert2PhysicalPlanStream(removeLimit(prop))\n\tif planInfo == nil || streamInfo.cost < planInfo.cost {\n\t\tplanInfo = streamInfo\n\t}\n\tplanInfo = enforceProperty(limitProperty(limit), planInfo)\n\terr = p.storePlanInfo(prop, planInfo)\n\treturn planInfo, errors.Trace(err)\n}",
"func (p *LogicalApply) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tif p.JoinType == InnerJoin || p.JoinType == LeftOuterJoin {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanLeft(&requiredProperty{}, p.JoinType == InnerJoin)\n\t} else {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanSemi(&requiredProperty{})\n\t}\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tswitch info.p.(type) {\n\tcase *PhysicalHashJoin, *PhysicalHashSemiJoin:\n\t\tap := PhysicalApply{\n\t\t\tPhysicalJoin: info.p,\n\t\t\tOuterSchema: p.corCols,\n\t\t}.init(p.allocator, p.ctx)\n\t\tap.SetChildren(info.p.Children()...)\n\t\tap.SetSchema(info.p.Schema())\n\t\tinfo.p = ap\n\tdefault:\n\t\tinfo.cost = math.MaxFloat64\n\t\tinfo.p = nil\n\t}\n\tinfo = enforceProperty(prop, info)\n\tp.storePlanInfo(prop, info)\n\treturn info, nil\n}",
"func (p *Selection) convert2PhysicalPlanEnforce(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tchild := p.children[0].(LogicalPlan)\n\tinfo, err := child.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif prop.limit != nil && len(prop.props) > 0 {\n\t\tif t, ok := info.p.(physicalDistSQLPlan); ok {\n\t\t\tt.addTopN(p.ctx, prop)\n\t\t} else if _, ok := info.p.(*Selection); !ok {\n\t\t\tinfo = p.appendSelToInfo(info)\n\t\t}\n\t\tinfo = enforceProperty(prop, info)\n\t} else if len(prop.props) != 0 {\n\t\tinfo = &physicalPlanInfo{cost: math.MaxFloat64}\n\t}\n\treturn info, nil\n}",
"func (p *LogicalJoin) convert2PhysicalPlanLeft(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tSmallTable: 1,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = LeftOuterJoin\n\t}\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tvar lInfo *physicalPlanInfo\n\tvar err error\n\tif innerJoin {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(removeLimit(lProp))\n\t} else {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(convertLimitOffsetToCount(lProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanStream(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tfor _, aggFunc := range p.AggFuncs {\n\t\tif aggFunc.GetMode() == expression.FinalMode {\n\t\t\treturn &physicalPlanInfo{cost: math.MaxFloat64}, nil\n\t\t}\n\t}\n\tagg := PhysicalAggregation{\n\t\tAggType: StreamedAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\t// TODO: Consider distinct key.\n\tinfo := &physicalPlanInfo{cost: math.MaxFloat64}\n\tgbyCols := p.groupByCols\n\tif len(gbyCols) != len(p.GroupByItems) {\n\t\t// group by a + b is not interested in any order.\n\t\treturn info, nil\n\t}\n\tisSortKey := make([]bool, len(gbyCols))\n\tnewProp := &requiredProperty{\n\t\tprops: make([]*columnProp, 0, len(gbyCols)),\n\t}\n\tfor _, pro := range prop.props {\n\t\tidx := p.getGbyColIndex(pro.col)\n\t\tif idx == -1 {\n\t\t\treturn info, nil\n\t\t}\n\t\tisSortKey[idx] = true\n\t\t// We should add columns in aggregation in order to keep index right.\n\t\tnewProp.props = append(newProp.props, &columnProp{col: gbyCols[idx], desc: pro.desc})\n\t}\n\tnewProp.sortKeyLen = len(newProp.props)\n\tfor i, col := range gbyCols {\n\t\tif !isSortKey[i] {\n\t\t\tnewProp.props = append(newProp.props, &columnProp{col: col})\n\t\t}\n\t}\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(newProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tinfo = addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * cpuFactor\n\tinfo.count = info.count * aggFactor\n\treturn info, nil\n}",
"func (p *LogicalJoin) convert2PhysicalPlanRight(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallRight := true\n\tfor _, col := range prop.props {\n\t\tif !rChild.Schema().Contains(col.col) {\n\t\t\tallRight = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = RightOuterJoin\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trProp := prop\n\tif !allRight {\n\t\trProp = &requiredProperty{}\n\t} else {\n\t\trProp = replaceColsInPropBySchema(rProp, rChild.Schema())\n\t}\n\tvar rInfo *physicalPlanInfo\n\tif innerJoin {\n\t\trInfo, err = rChild.convert2PhysicalPlan(removeLimit(rProp))\n\t} else {\n\t\trInfo, err = rChild.convert2PhysicalPlan(convertLimitOffsetToCount(rProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allRight {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *LogicalJoin) convert2PhysicalPlanSemi(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashSemiJoin{\n\t\tWithAux: LeftOuterSemiJoin == p.JoinType,\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tAnti: p.anti,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tif p.JoinType == SemiJoin {\n\t\tlProp = removeLimit(lProp)\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif p.JoinType == SemiJoin {\n\t\tresultInfo.count = lInfo.count * selectionFactor\n\t} else {\n\t\tresultInfo.count = lInfo.count\n\t}\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else if p.JoinType == SemiJoin {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func enforceProperty(prop *requiredProperty, info *physicalPlanInfo) *physicalPlanInfo {\n\tif info.p == nil {\n\t\treturn info\n\t}\n\tif len(prop.props) != 0 {\n\t\titems := make([]*ByItems, 0, len(prop.props))\n\t\tfor _, col := range prop.props {\n\t\t\titems = append(items, &ByItems{Expr: col.col, Desc: col.desc})\n\t\t}\n\t\tsort := Sort{\n\t\t\tByItems: items,\n\t\t\tExecLimit: prop.limit,\n\t\t}.init(info.p.Allocator(), info.p.context())\n\t\tsort.SetSchema(info.p.Schema())\n\t\tinfo = addPlanToResponse(sort, info)\n\n\t\tcount := info.count\n\t\tif prop.limit != nil {\n\t\t\tcount = float64(prop.limit.Offset + prop.limit.Count)\n\t\t\tinfo.reliable = true\n\t\t}\n\t\tinfo.cost += sortCost(count)\n\t} else if prop.limit != nil {\n\t\tlimit := Limit{Offset: prop.limit.Offset, Count: prop.limit.Count}.init(info.p.Allocator(), info.p.context())\n\t\tlimit.SetSchema(info.p.Schema())\n\t\tinfo = addPlanToResponse(limit, info)\n\t\tinfo.reliable = true\n\t}\n\tif prop.limit != nil && float64(prop.limit.Count) < info.count {\n\t\tinfo.count = float64(prop.limit.Count)\n\t}\n\treturn info\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanHash() (*physicalPlanInfo, error) {\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tdistinct := false\n\tfor _, fun := range p.AggFuncs {\n\t\tif fun.IsDistinct() {\n\t\t\tdistinct = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !distinct {\n\t\tif x, ok := childInfo.p.(physicalDistSQLPlan); ok {\n\t\t\tinfo := p.convert2PhysicalPlanFinalHash(x, childInfo)\n\t\t\tif info != nil {\n\t\t\t\treturn info, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn p.convert2PhysicalPlanCompleteHash(childInfo), nil\n}",
"func (p *LogicalJoin) convert2PhysicalMergeJoin(parentProp *requiredProperty, lProp *requiredProperty, rProp *requiredProperty, condIndex int, joinType JoinType) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\n\tnewEQConds := make([]*expression.ScalarFunction, 0, len(p.EqualConditions)-1)\n\tfor i, cond := range p.EqualConditions {\n\t\tif i == condIndex {\n\t\t\tcontinue\n\t\t}\n\t\t// prevent further index contamination\n\t\tnewCond := cond.Clone()\n\t\tnewCond.ResolveIndices(p.schema)\n\t\tnewEQConds = append(newEQConds, newCond.(*expression.ScalarFunction))\n\t}\n\teqCond := p.EqualConditions[condIndex]\n\n\totherFilter := append(expression.ScalarFuncs2Exprs(newEQConds), p.OtherConditions...)\n\n\tjoin := PhysicalMergeJoin{\n\t\tEqualConditions: []*expression.ScalarFunction{eqCond},\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: otherFilter,\n\t\tDefaultValues: p.DefaultValues,\n\t\t// Assume order for both side are the same\n\t\tDesc: lProp.props[0].desc,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tjoin.JoinType = joinType\n\n\tvar lInfo *physicalPlanInfo\n\tvar rInfo *physicalPlanInfo\n\n\t// Try no sort first\n\tlInfoEnforceSort, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tlInfoEnforceSort = enforceProperty(lProp, lInfoEnforceSort)\n\n\tlInfoNoSorted, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif lInfoNoSorted.cost < lInfoEnforceSort.cost {\n\t\tlInfo = lInfoNoSorted\n\t} else {\n\t\tlInfo = lInfoEnforceSort\n\t}\n\n\trInfoEnforceSort, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfoEnforceSort = enforceProperty(rProp, rInfoEnforceSort)\n\n\trInfoNoSorted, err := rChild.convert2PhysicalPlan(rProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif rInfoEnforceSort.cost < rInfoNoSorted.cost {\n\t\trInfo = rInfoEnforceSort\n\t} else {\n\t\trInfo = rInfoNoSorted\n\t}\n\tparentProp = join.tryConsumeOrder(parentProp, eqCond)\n\n\tresultInfo := join.matchProperty(parentProp, lInfo, rInfo)\n\t// TODO: Considering keeping order in join to remove at least\n\t// one ordering property\n\tresultInfo = enforceProperty(parentProp, resultInfo)\n\treturn resultInfo, nil\n}",
"func (p *DataSource) tryToConvert2DummyScan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tsel, isSel := p.parents[0].(*Selection)\n\tif !isSel {\n\t\treturn nil, nil\n\t}\n\n\tfor _, cond := range sel.Conditions {\n\t\tif con, ok := cond.(*expression.Constant); ok {\n\t\t\tresult, err := expression.EvalBool([]expression.Expression{con}, nil, p.ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tif !result {\n\t\t\t\tdual := TableDual{}.init(p.allocator, p.ctx)\n\t\t\t\tdual.SetSchema(p.schema)\n\t\t\t\tinfo := &physicalPlanInfo{p: dual}\n\t\t\t\tp.storePlanInfo(prop, info)\n\t\t\t\treturn info, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, nil\n}",
"func (mq *metadataQuery) makePlan() (*models.PhysicalPlan, error) {\n\t//FIXME need using storage's replica state ???\n\tstorageNodes, err := mq.runtime.stateMgr.GetQueryableReplicas(mq.database)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstorageNodesLen := len(storageNodes)\n\tif storageNodesLen == 0 {\n\t\treturn nil, constants.ErrReplicaNotFound\n\t}\n\tcurBroker := mq.runtime.stateMgr.GetCurrentNode()\n\tcurBrokerIndicator := curBroker.Indicator()\n\tphysicalPlan := &models.PhysicalPlan{\n\t\tDatabase: mq.database,\n\t\tRoot: models.Root{\n\t\t\tIndicator: curBrokerIndicator,\n\t\t\tNumOfTask: int32(storageNodesLen),\n\t\t},\n\t}\n\treceivers := []models.StatelessNode{curBroker}\n\tfor storageNode, shardIDs := range storageNodes {\n\t\tphysicalPlan.AddLeaf(models.Leaf{\n\t\t\tBaseNode: models.BaseNode{\n\t\t\t\tParent: curBrokerIndicator,\n\t\t\t\tIndicator: storageNode,\n\t\t\t},\n\t\t\tShardIDs: shardIDs,\n\t\t\tReceivers: receivers,\n\t\t})\n\t}\n\treturn physicalPlan, nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanFinalHash(x physicalDistSQLPlan, childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: FinalAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.SetSchema(p.schema)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tschema := x.addAggregation(p.ctx, agg)\n\tif schema.Len() == 0 {\n\t\treturn nil\n\t}\n\tx.(PhysicalPlan).SetSchema(schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.count = info.count * aggFactor\n\t// if we build the final aggregation, it must be the best plan.\n\tinfo.cost = 0\n\treturn info\n}",
"func MakePhysicalPlan(infra *PhysicalInfrastructure) PhysicalPlan {\n\treturn PhysicalPlan{\n\t\tPhysicalInfrastructure: infra,\n\t}\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanCompleteHash(childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: CompleteAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * memoryFactor\n\tinfo.count = info.count * aggFactor\n\treturn info\n}",
"func NewPhysicalPlanner(options ...PhysicalOption) PhysicalPlanner {\n\tpp := &physicalPlanner{\n\t\theuristicPlannerPhysical: newHeuristicPlanner(),\n\t\theuristicPlannerParallel: newHeuristicPlanner(),\n\t\tdefaultMemoryLimit: math.MaxInt64,\n\t}\n\n\trulesPhysical := make([]Rule, len(ruleNameToPhysicalRule))\n\ti := 0\n\tfor _, v := range ruleNameToPhysicalRule {\n\t\trulesPhysical[i] = v\n\t\ti++\n\t}\n\n\trulesParallel := make([]Rule, len(ruleNameToParallelizeRules))\n\ti = 0\n\tfor _, v := range ruleNameToParallelizeRules {\n\t\trulesParallel[i] = v\n\t\ti++\n\t}\n\n\tpp.heuristicPlannerPhysical.addRules(rulesPhysical...)\n\n\tpp.heuristicPlannerPhysical.addRules(physicalConverterRule{})\n\n\tpp.heuristicPlannerParallel.addRules(rulesParallel...)\n\n\t// Options may add or remove rules, so process them after we've\n\t// added registered rules.\n\tfor _, opt := range options {\n\t\topt.apply(pp)\n\t}\n\n\treturn pp\n}",
"func (q *Query) Plan(fps ...FieldPath) (string, error) {\n\tif err := q.initGet(fps); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn q.coll.driver.QueryPlan(q.dq)\n}",
"func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats, sql string, skipQueryPlanCache bool) (*TabletPlan, error) {\n\tspan, _ := trace.NewSpan(ctx, \"QueryEngine.GetPlan\")\n\tdefer span.Finish()\n\tif !skipQueryPlanCache {\n\t\tif plan := qe.getQuery(sql); plan != nil {\n\t\t\tlogStats.CachedPlan = true\n\t\t\treturn plan, nil\n\t\t}\n\t}\n\t// Obtain read lock to prevent schema from changing while\n\t// we build a plan. The read lock allows multiple identical\n\t// queries to build the same plan. One of them will win by\n\t// updating the query cache and prevent future races. Due to\n\t// this, query stats reporting may not be accurate, but it's\n\t// acceptable because those numbers are best effort.\n\tqe.mu.RLock()\n\tdefer qe.mu.RUnlock()\n\tstatement, err := sqlparser.Parse(sql)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsplan, err := planbuilder.Build(statement, qe.tables, qe.env.Config().DB.DBName, qe.env.Config().EnableViews)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tplan := &TabletPlan{Plan: splan, Original: sql}\n\tplan.Rules = qe.queryRuleSources.FilterByPlan(sql, plan.PlanID, plan.TableNames()...)\n\tplan.buildAuthorized()\n\tif plan.PlanID == planbuilder.PlanDDL || plan.PlanID == planbuilder.PlanSet {\n\t\treturn plan, nil\n\t}\n\tif !skipQueryPlanCache && !sqlparser.SkipQueryPlanCacheDirective(statement) {\n\t\tqe.plans.Set(sql, plan)\n\t}\n\treturn plan, nil\n}",
"func TurnNominalSortIntoProj(p PhysicalPlan, onlyColumn bool, orderByItems []*util.ByItems) PhysicalPlan {\n\tif onlyColumn {\n\t\treturn p.Children()[0]\n\t}\n\n\tnumOrderByItems := len(orderByItems)\n\tchildPlan := p.Children()[0]\n\n\tbottomProjSchemaCols := make([]*expression.Column, 0, len(childPlan.Schema().Columns)+numOrderByItems)\n\tbottomProjExprs := make([]expression.Expression, 0, len(childPlan.Schema().Columns)+numOrderByItems)\n\tfor _, col := range childPlan.Schema().Columns {\n\t\tnewCol := col.Clone().(*expression.Column)\n\t\tnewCol.Index = childPlan.Schema().ColumnIndex(newCol)\n\t\tbottomProjSchemaCols = append(bottomProjSchemaCols, newCol)\n\t\tbottomProjExprs = append(bottomProjExprs, newCol)\n\t}\n\n\tfor _, item := range orderByItems {\n\t\titemExpr := item.Expr\n\t\tif _, isScalarFunc := itemExpr.(*expression.ScalarFunction); !isScalarFunc {\n\t\t\tcontinue\n\t\t}\n\t\tbottomProjExprs = append(bottomProjExprs, itemExpr)\n\t\tnewArg := &expression.Column{\n\t\t\tUniqueID: p.SCtx().GetSessionVars().AllocPlanColumnID(),\n\t\t\tRetType: itemExpr.GetType(),\n\t\t\tIndex: len(bottomProjSchemaCols),\n\t\t}\n\t\tbottomProjSchemaCols = append(bottomProjSchemaCols, newArg)\n\t}\n\n\tchildProp := p.GetChildReqProps(0).CloneEssentialFields()\n\tbottomProj := PhysicalProjection{\n\t\tExprs: bottomProjExprs,\n\t\tAvoidColumnEvaluator: false,\n\t}.Init(p.SCtx(), childPlan.StatsInfo().ScaleByExpectCnt(childProp.ExpectedCnt), p.SelectBlockOffset(), childProp)\n\tbottomProj.SetSchema(expression.NewSchema(bottomProjSchemaCols...))\n\tbottomProj.SetChildren(childPlan)\n\n\ttopProjExprs := make([]expression.Expression, 0, childPlan.Schema().Len())\n\tfor i := range childPlan.Schema().Columns {\n\t\tcol := childPlan.Schema().Columns[i].Clone().(*expression.Column)\n\t\tcol.Index = i\n\t\ttopProjExprs = append(topProjExprs, col)\n\t}\n\ttopProj := PhysicalProjection{\n\t\tExprs: topProjExprs,\n\t\tAvoidColumnEvaluator: false,\n\t}.Init(p.SCtx(), childPlan.StatsInfo().ScaleByExpectCnt(childProp.ExpectedCnt), p.SelectBlockOffset(), childProp)\n\ttopProj.SetSchema(childPlan.Schema().Clone())\n\ttopProj.SetChildren(bottomProj)\n\n\tif origChildProj, isChildProj := childPlan.(*PhysicalProjection); isChildProj {\n\t\trefine4NeighbourProj(bottomProj, origChildProj)\n\t}\n\n\treturn topProj\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
tryToConvert2DummyScan is an optimization which checks if its parent is a selection with a constant condition that evaluates to false. If it is, there is no need for a real physical scan, a dummy scan will do.
|
func (p *DataSource) tryToConvert2DummyScan(prop *requiredProperty) (*physicalPlanInfo, error) {
sel, isSel := p.parents[0].(*Selection)
if !isSel {
return nil, nil
}
for _, cond := range sel.Conditions {
if con, ok := cond.(*expression.Constant); ok {
result, err := expression.EvalBool([]expression.Expression{con}, nil, p.ctx)
if err != nil {
return nil, errors.Trace(err)
}
if !result {
dual := TableDual{}.init(p.allocator, p.ctx)
dual.SetSchema(p.schema)
info := &physicalPlanInfo{p: dual}
p.storePlanInfo(prop, info)
return info, nil
}
}
}
return nil, nil
}
|
[
"func (p *DataSource) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tinfo, err = p.tryToConvert2DummyScan(prop)\n\tif info != nil || err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tclient := p.ctx.GetClient()\n\tmemDB := infoschema.IsMemoryDB(p.DBName.L)\n\tisDistReq := !memDB && client != nil && client.SupportRequestType(kv.ReqTypeSelect, 0)\n\tif !isDistReq {\n\t\tmemTable := PhysicalMemTable{\n\t\t\tDBName: p.DBName,\n\t\t\tTable: p.tableInfo,\n\t\t\tColumns: p.Columns,\n\t\t\tTableAsName: p.TableAsName,\n\t\t}.init(p.allocator, p.ctx)\n\t\tmemTable.SetSchema(p.schema)\n\t\trb := &ranger.Builder{Sc: p.ctx.GetSessionVars().StmtCtx}\n\t\tmemTable.Ranges = rb.BuildTableRanges(ranger.FullRange)\n\t\tinfo = &physicalPlanInfo{p: memTable}\n\t\tinfo = enforceProperty(prop, info)\n\t\tp.storePlanInfo(prop, info)\n\t\treturn info, nil\n\t}\n\tindices, includeTableScan := availableIndices(p.indexHints, p.tableInfo)\n\tif includeTableScan {\n\t\tinfo, err = p.convert2TableScan(prop)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tif !includeTableScan || p.need2ConsiderIndex(prop) {\n\t\tfor _, index := range indices {\n\t\t\tindexInfo, err := p.convert2IndexScan(prop, index)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tif info == nil || indexInfo.cost < info.cost {\n\t\t\t\tinfo = indexInfo\n\t\t\t}\n\t\t}\n\t}\n\treturn info, errors.Trace(p.storePlanInfo(prop, info))\n}",
"func (s *SoundIo) ForceDeviceScan() {\n\tC.soundio_force_device_scan(s.ptr)\n}",
"func TestScanEmpty4B(t *testing.T) {\n}",
"func (ci *ConnInfo) PlanScan(oid uint32, formatCode int16, dst interface{}) ScanPlan {\n\tswitch formatCode {\n\tcase BinaryFormatCode:\n\t\tswitch dst.(type) {\n\t\tcase *string:\n\t\t\tswitch oid {\n\t\t\tcase TextOID, VarcharOID:\n\t\t\t\treturn scanPlanString{}\n\t\t\t}\n\t\tcase *int16:\n\t\t\tif oid == Int2OID {\n\t\t\t\treturn scanPlanBinaryInt16{}\n\t\t\t}\n\t\tcase *int32:\n\t\t\tif oid == Int4OID {\n\t\t\t\treturn scanPlanBinaryInt32{}\n\t\t\t}\n\t\tcase *int64:\n\t\t\tif oid == Int8OID {\n\t\t\t\treturn scanPlanBinaryInt64{}\n\t\t\t}\n\t\tcase *float32:\n\t\t\tif oid == Float4OID {\n\t\t\t\treturn scanPlanBinaryFloat32{}\n\t\t\t}\n\t\tcase *float64:\n\t\t\tif oid == Float8OID {\n\t\t\t\treturn scanPlanBinaryFloat64{}\n\t\t\t}\n\t\tcase *[]byte:\n\t\t\tswitch oid {\n\t\t\tcase ByteaOID, TextOID, VarcharOID, JSONOID:\n\t\t\t\treturn scanPlanBinaryBytes{}\n\t\t\t}\n\t\tcase BinaryDecoder:\n\t\t\treturn scanPlanDstBinaryDecoder{}\n\t\t}\n\tcase TextFormatCode:\n\t\tswitch dst.(type) {\n\t\tcase *string:\n\t\t\treturn scanPlanString{}\n\t\tcase *[]byte:\n\t\t\tif oid != ByteaOID {\n\t\t\t\treturn scanPlanBinaryBytes{}\n\t\t\t}\n\t\tcase TextDecoder:\n\t\t\treturn scanPlanDstTextDecoder{}\n\t\t}\n\t}\n\n\tvar dt *DataType\n\n\tif oid == 0 {\n\t\tif dataType, ok := ci.DataTypeForValue(dst); ok {\n\t\t\tdt = dataType\n\t\t}\n\t} else {\n\t\tif dataType, ok := ci.DataTypeForOID(oid); ok {\n\t\t\tdt = dataType\n\t\t}\n\t}\n\n\tif dt != nil {\n\t\tif isScanner(dst) {\n\t\t\treturn (*scanPlanDataTypeSQLScanner)(dt)\n\t\t}\n\t\treturn (*scanPlanDataTypeAssignTo)(dt)\n\t}\n\n\tif isScanner(dst) {\n\t\treturn scanPlanSQLScanner{}\n\t}\n\n\treturn scanPlanReflection{}\n}",
"func Scan(cfg *ScanConfig, checkDir string) (*ScanResult, error) {\n\tscanResult := new(ScanResult)\n\tscanResult.Total = 0\n\tscanResult.Passed = 0\n\tscanResult.Failed = 0\n\tscanResult.SkippedFiles = 0\n\tscanResult.SkippedDirs = 0\n\tscanResult.Errors = 0\n\tscanResult.FailedExtensionSet = make(map[string]bool)\n\tstartTime := time.Now()\n\n\terr := filepath.Walk(checkDir, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\tscanResult.Total++\n\t\t\tscanResult.Errors++\n\t\t\tscanResult.ErrorPaths = append(scanResult.ErrorPaths, filepath.ToSlash(path))\n\t\t\treturn err\n\t\t}\n\t\tobjName := info.Name()\n\t\tfileExtension := filepath.Ext(objName)\n\t\tif fileExtension == \"\" {\n\t\t\tfileExtension = objName\n\t\t}\n\t\tnormalisedPath := filepath.ToSlash(path)\n\t\tif info.IsDir() {\n\t\t\tpathInSkipDir := contains(cfg.Skip.Dir, normalisedPath)\n\t\t\tnameInSkipDirAll := contains(cfg.Skip.DirAll, objName)\n\t\t\tif objName == \".git\" {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t} else if nameInSkipDirAll == true {\n\t\t\t\tscanResult.SkippedDirs++\n\t\t\t\treturn filepath.SkipDir\n\t\t\t} else if pathInSkipDir == true {\n\t\t\t\tscanResult.SkippedDirs++\n\t\t\t\treturn filepath.SkipDir\n\t\t\t} else {\n\t\t\t\t//Move on, can't process dir but nothing special to do\n\t\t\t}\n\t\t} else if info.Mode().IsRegular() {\n\t\t\tpathInSkipFile := contains(cfg.Skip.File, normalisedPath)\n\t\t\tnameInSkipFileAll := contains(cfg.Skip.FileAll, objName)\n\t\t\tfileExtInSkipExt := contains(cfg.Skip.Extension, fileExtension)\n\t\t\tif objName == \".\" {\n\t\t\t\t//Skip but don't record\n\t\t\t} else if nameInSkipFileAll == true {\n\t\t\t\tscanResult.SkippedFiles++\n\t\t\t} else if pathInSkipFile == true {\n\t\t\t\tscanResult.SkippedFiles++\n\t\t\t} else if fileExtInSkipExt == true {\n\t\t\t\tscanResult.SkippedFiles++\n\t\t\t} else if info.Size() == 0 {\n\t\t\t\tscanResult.Total++\n\t\t\t\tscanResult.Failed++\n\t\t\t\tscanResult.FailedPaths = append(scanResult.FailedPaths, normalisedPath)\n\t\t\t\tscanResult.FailedExtensionSet[fileExtension] = true\n\t\t\t} else {\n\t\t\t\tresult, err := checkLineEnding(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\tscanResult.Total++\n\t\t\t\t\tscanResult.Errors++\n\t\t\t\t\tscanResult.ErrorPaths = append(scanResult.ErrorPaths, normalisedPath)\n\t\t\t\t} else if result == true {\n\t\t\t\t\tscanResult.Total++\n\t\t\t\t\tscanResult.Passed++\n\t\t\t\t} else {\n\t\t\t\t\tscanResult.Total++\n\t\t\t\t\tscanResult.Failed++\n\t\t\t\t\tscanResult.FailedPaths = append(scanResult.FailedPaths, normalisedPath)\n\t\t\t\t\tscanResult.FailedExtensionSet[fileExtension] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tscanResult.Time = time.Since(startTime)\n\tif err != nil {\n\t\treturn scanResult, err\n\t}\n\treturn scanResult, nil\n}",
"func TestScanLimitZero4B(t *testing.T) {\n}",
"func NewDummyScanner(log logrus.FieldLogger) *DummyScanner {\n\treturn &DummyScanner{\n\t\tlog: log.WithField(\"prefix\", \"scanner.dummy\"),\n\t\taddrsMap: make(map[string]struct{}),\n\t\tcoinTypes: make(map[string]struct{}),\n\t\tdeposits: make(chan DepositNote, 100),\n\t}\n}",
"func (s *slowSeedScanner) scan(cancel <-chan struct{}) error {\n\t// generate a bunch of keys and scan the blockchain looking for them. If\n\t// none of the 'upper' half of the generated keys are found, we are done;\n\t// otherwise, generate more keys and try again (bounded by a sane\n\t// default).\n\t//\n\t// NOTE: since scanning is very slow, we aim to only scan once, which\n\t// means generating many keys.\n\ts.gapScanner = newFastSeedScanner(s.seed, s.addressGapLimit, s.cs, s.log)\n\n\ts.generateKeys(numInitialKeys)\n\ts.cancel = make(chan struct{}) // this will disturbe thread stop to stop scan\n\terr := s.cs.HeaderConsensusSetSubscribe(s, modules.ConsensusChangeBeginning, s.cancel)\n\tif err != siasync.ErrStopped {\n\t\treturn err\n\t}\n\ts.cs.HeaderUnsubscribe(s)\n\n\t// log.Printf(\"end fist part slow scan s.maximumExternalIndex %d\\n\", s.maximumExternalIndex)\n\ts.gapScanner.minimumIndex = s.maximumExternalIndex\n\ts.gapScanner.maximumInternalIndex = s.maximumExternalIndex\n\ts.gapScanner.maximumExternalIndex = s.maximumExternalIndex\n\ts.gapScanner.siacoinOutputs = s.siacoinOutputs\n\ts.gapScanner.generateKeys(uint64(s.addressGapLimit))\n\n\tif err := s.gapScanner.cs.HeaderConsensusSetSubscribe(s.gapScanner, s.lastConsensusChange, cancel); err != nil {\n\t\treturn err\n\t}\n\ts.gapScanner.cs.HeaderUnsubscribe(s.gapScanner)\n\n\ts.maximumExternalIndex = s.gapScanner.maximumExternalIndex\n\t// log.Printf(\"slow scan s.maximumExternalIndex %d\\n\", s.maximumExternalIndex)\n\t// for id, sco := range s.gapScanner.siacoinOutputs {\n\t// \tlog.Printf(\"siacoinOutputs: %d %s\", sco.seedIndex, sco.value.String())\n\t// \ts.siacoinOutputs[id] = sco\n\t// }\n\n\treturn nil\n}",
"func dummyImpl(blk *ssa.BasicBlock) bool {\n\tvar ops [8]*ssa.Value\n\tfor _, instr := range blk.Instrs {\n\t\tfor _, val := range instr.Operands(ops[:0]) {\n\t\t\tswitch x := (*val).(type) {\n\t\t\tcase nil, *ssa.Const, *ssa.ChangeType, *ssa.Alloc,\n\t\t\t\t*ssa.MakeInterface, *ssa.Function,\n\t\t\t\t*ssa.Global, *ssa.IndexAddr, *ssa.Slice:\n\t\t\tcase *ssa.Call:\n\t\t\t\tif rxHarmlessCall.MatchString(x.Call.Value.String()) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\tswitch x := instr.(type) {\n\t\tcase *ssa.Alloc, *ssa.Store, *ssa.UnOp, *ssa.BinOp,\n\t\t\t*ssa.MakeInterface, *ssa.MakeMap, *ssa.Extract,\n\t\t\t*ssa.IndexAddr, *ssa.FieldAddr, *ssa.Slice,\n\t\t\t*ssa.Lookup, *ssa.ChangeType, *ssa.TypeAssert,\n\t\t\t*ssa.Convert, *ssa.ChangeInterface:\n\t\t\t// non-trivial expressions in panic/log/print\n\t\t\t// calls\n\t\tcase *ssa.Return, *ssa.Panic:\n\t\t\treturn true\n\t\tcase *ssa.Call:\n\t\t\tif rxHarmlessCall.MatchString(x.Call.Value.String()) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn x.Call.Value.Name() == \"throw\" // runtime's panic\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\treturn false\n}",
"func (p *Selection) makeScanController() *physicalPlanInfo {\n\tvar (\n\t\tchild PhysicalPlan\n\t\tcorColConds []expression.Expression\n\t)\n\tds := p.children[0].(*DataSource)\n\tindices, _ := availableIndices(ds.indexHints, ds.tableInfo)\n\tfor _, expr := range p.Conditions {\n\t\tif !expr.IsCorrelated() {\n\t\t\tcontinue\n\t\t}\n\t\tcond := expression.PushDownNot(expr, false, nil)\n\t\tcorCols := extractCorColumns(cond)\n\t\tfor _, col := range corCols {\n\t\t\t*col.Data = expression.One.Value\n\t\t}\n\t\tnewCond, _ := expression.SubstituteCorCol2Constant(cond)\n\t\tcorColConds = append(corColConds, newCond)\n\t}\n\tif p.controllerStatus == controlTableScan {\n\t\tts := PhysicalTableScan{\n\t\t\tTable: ds.tableInfo,\n\t\t\tColumns: ds.Columns,\n\t\t\tTableAsName: ds.TableAsName,\n\t\t\tDBName: ds.DBName,\n\t\t\tphysicalTableSource: physicalTableSource{client: ds.ctx.GetClient()},\n\t\t}.init(p.allocator, p.ctx)\n\t\tts.SetSchema(ds.schema)\n\t\tif ds.ctx.Txn() != nil {\n\t\t\tts.readOnly = p.ctx.Txn().IsReadOnly()\n\t\t} else {\n\t\t\tts.readOnly = true\n\t\t}\n\t\tchild = ts\n\t} else if p.controllerStatus == controlIndexScan {\n\t\tvar (\n\t\t\tchosenPlan *PhysicalIndexScan\n\t\t\tbestEqualCount int\n\t\t)\n\t\tfor _, idx := range indices {\n\t\t\tcondsBackUp := make([]expression.Expression, 0, len(corColConds))\n\t\t\tfor _, cond := range corColConds {\n\t\t\t\tcondsBackUp = append(condsBackUp, cond.Clone())\n\t\t\t}\n\t\t\t_, _, accessEqualCount, _ := ranger.DetachIndexScanConditions(condsBackUp, idx)\n\t\t\tif chosenPlan == nil || bestEqualCount < accessEqualCount {\n\t\t\t\tis := PhysicalIndexScan{\n\t\t\t\t\tTable: ds.tableInfo,\n\t\t\t\t\tIndex: idx,\n\t\t\t\t\tColumns: ds.Columns,\n\t\t\t\t\tTableAsName: ds.TableAsName,\n\t\t\t\t\tOutOfOrder: true,\n\t\t\t\t\tDBName: ds.DBName,\n\t\t\t\t\tphysicalTableSource: physicalTableSource{client: ds.ctx.GetClient()},\n\t\t\t\t}.init(p.allocator, p.ctx)\n\t\t\t\tis.SetSchema(ds.schema)\n\t\t\t\tif is.ctx.Txn() != nil {\n\t\t\t\t\tis.readOnly = p.ctx.Txn().IsReadOnly()\n\t\t\t\t} else {\n\t\t\t\t\tis.readOnly = true\n\t\t\t\t}\n\t\t\t\tis.DoubleRead = !isCoveringIndex(is.Columns, is.Index.Columns, is.Table.PKIsHandle)\n\t\t\t\tchosenPlan, bestEqualCount = is, accessEqualCount\n\t\t\t}\n\t\t}\n\t\tchild = chosenPlan\n\t}\n\tnewSel := p.Copy().(*Selection)\n\tnewSel.ScanController = true\n\tnewSel.SetChildren(child)\n\tinfo := &physicalPlanInfo{\n\t\tp: newSel,\n\t\tcount: float64(ds.statisticTable.Count),\n\t}\n\tinfo.cost = info.count * selectionFactor\n\treturn info\n}",
"func (ie *IfdEnumerate) scan(iiGeneral *exifcommon.IfdIdentity, ifdOffset uint32, visitor TagVisitorFn, med *MiscellaneousExifData) (err error) {\n\tdefer func() {\n\t\tif state := recover(); state != nil {\n\t\t\terr = log.Wrap(state.(error))\n\t\t}\n\t}()\n\n\t// TODO(dustin): Add test\n\n\tfor ifdIndex := 0; ; ifdIndex++ {\n\t\tiiSibling := iiGeneral.NewSibling(ifdIndex)\n\n\t\tifdEnumerateLogger.Debugf(nil, \"Parsing IFD [%s] at offset (0x%04x) (scan).\", iiSibling.String(), ifdOffset)\n\n\t\tbp, err := ie.getByteParser(ifdOffset)\n\t\tif err != nil {\n\t\t\tif err == ErrOffsetInvalid {\n\t\t\t\tifdEnumerateLogger.Errorf(nil, nil, \"IFD [%s] at offset (0x%04x) is unreachable. Terminating scan.\", iiSibling.String(), ifdOffset)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tlog.Panic(err)\n\t\t}\n\n\t\tnextIfdOffset, _, _, err := ie.parseIfd(iiSibling, bp, visitor, true, med)\n\t\tlog.PanicIf(err)\n\n\t\tcurrentOffset := bp.CurrentOffset()\n\t\tif currentOffset > ie.furthestOffset {\n\t\t\tie.furthestOffset = currentOffset\n\t\t}\n\n\t\tif nextIfdOffset == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tifdOffset = nextIfdOffset\n\t}\n\n\treturn nil\n}",
"func (up *udevProbe) scan() error {\n\n\t// By using a semaphore, we ensure thread safety.\n\tif !sem.TryAcquire(1) {\n\t\treturn errors.New(\"Scan is in progress\")\n\t}\n\tdefer sem.Release(1)\n\n\tif (up.udev == nil) || (up.udevEnumerate == nil) {\n\t\treturn errors.New(\"unable to scan udev and udev enumerate is nil\")\n\t}\n\tdiskInfo := make([]*blockdevice.BlockDevice, 0)\n\tdisksUid := make([]string, 0)\n\terr := up.udevEnumerate.AddSubsystemFilter(libudevwrapper.UDEV_SUBSYSTEM)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = up.udevEnumerate.ScanDevices()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// everytime while performing the scan, we are re-initializing the\n\t// disk map of the system\n\tup.controller.BDHierarchy = make(blockdevice.Hierarchy)\n\tfor l := up.udevEnumerate.ListEntry(); l != nil; l = l.GetNextEntry() {\n\t\ts := l.GetName()\n\t\tnewUdevice, err := up.udev.NewDeviceFromSysPath(s)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif newUdevice.IsDisk() || newUdevice.IsParitition() {\n\t\t\tdeviceDetails := &blockdevice.BlockDevice{}\n\t\t\tif features.FeatureGates.IsEnabled(features.GPTBasedUUID) {\n\t\t\t\t// WWN, Serial, PartitionTableUUID/GPTLabel, PartitionUUID, FileSystemUUID and DeviceType\n\t\t\t\t// are the fields we use to generate the UUID. These fields will be fetched\n\t\t\t\t// from the udev event itself. This is to guarantee that we do not need to rely\n\t\t\t\t// on any other probes to fill in those details which are critical for device identification.\n\t\t\t\tdeviceDetails.DeviceAttributes.WWN = newUdevice.GetPropertyValue(libudevwrapper.UDEV_WWN)\n\t\t\t\tdeviceDetails.DeviceAttributes.Serial = newUdevice.GetPropertyValue(libudevwrapper.UDEV_SERIAL)\n\t\t\t\tdeviceDetails.PartitionInfo.PartitionTableUUID = newUdevice.GetPropertyValue(libudevwrapper.UDEV_PARTITION_TABLE_UUID)\n\t\t\t\tdeviceDetails.PartitionInfo.PartitionEntryUUID = newUdevice.GetPropertyValue(libudevwrapper.UDEV_PARTITION_UUID)\n\t\t\t\tdeviceDetails.FSInfo.FileSystemUUID = newUdevice.GetPropertyValue(libudevwrapper.UDEV_FS_UUID)\n\t\t\t\tdeviceDetails.DMInfo.DMUUID = newUdevice.GetPropertyValue(libudevwrapper.UDEV_DM_UUID)\n\t\t\t} else {\n\t\t\t\tuuid := newUdevice.GetUid()\n\t\t\t\tdisksUid = append(disksUid, uuid)\n\t\t\t\tdeviceDetails.UUID = uuid\n\t\t\t}\n\t\t\tudevDeviceType := newUdevice.GetPropertyValue(libudevwrapper.UDEV_DEVTYPE)\n\t\t\tdeviceDetails.SysPath = newUdevice.GetSyspath()\n\t\t\tdeviceDetails.DevPath = newUdevice.GetPath()\n\n\t\t\t// log the details only if present, to avoid log flooding\n\t\t\tif deviceDetails.DeviceAttributes.WWN != \"\" {\n\t\t\t\tklog.V(4).Infof(\"device: %s, WWN: %s filled during udev scan\",\n\t\t\t\t\tdeviceDetails.DevPath, deviceDetails.DeviceAttributes.WWN)\n\t\t\t}\n\t\t\tif deviceDetails.DeviceAttributes.Serial != \"\" {\n\t\t\t\tklog.V(4).Infof(\"device: %s, Serial: %s filled during udev scan\",\n\t\t\t\t\tdeviceDetails.DevPath, deviceDetails.DeviceAttributes.Serial)\n\t\t\t}\n\t\t\tif deviceDetails.PartitionInfo.PartitionTableUUID != \"\" {\n\t\t\t\tklog.V(4).Infof(\"device: %s, PartitionTableUUID: %s filled during udev scan\",\n\t\t\t\t\tdeviceDetails.DevPath, deviceDetails.PartitionInfo.PartitionTableUUID)\n\t\t\t}\n\t\t\tif deviceDetails.PartitionInfo.PartitionEntryUUID != \"\" {\n\t\t\t\tklog.V(4).Infof(\"device: %s, PartitionEntryUUID: %s filled during udev scan\",\n\t\t\t\t\tdeviceDetails.DevPath, deviceDetails.PartitionInfo.PartitionEntryUUID)\n\t\t\t}\n\t\t\tif deviceDetails.FSInfo.FileSystemUUID != \"\" {\n\t\t\t\tklog.V(4).Infof(\"device: %s, FileSystemUUID: %s filled during udev scan\",\n\t\t\t\t\tdeviceDetails.DevPath, deviceDetails.FSInfo.FileSystemUUID)\n\t\t\t}\n\n\t\t\tsysfsDevice, err := sysfs.NewSysFsDeviceFromDevPath(deviceDetails.DevPath)\n\t\t\t// TODO if error occurs a rescan may be required\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"could not get sysfs device for %s, err: %v\", deviceDetails.DevPath, err)\n\t\t\t} else {\n\t\t\t\t// get the dependents of the block device\n\t\t\t\t// this is done by scanning sysfs\n\t\t\t\tdependents, err := sysfsDevice.GetDependents()\n\t\t\t\t// TODO if error occurs need to do a scan from the beginning\n\t\t\t\tif err != nil {\n\t\t\t\t\tklog.Errorf(\"error getting dependent devices for %s, err: %v\", deviceDetails.DevPath, err)\n\t\t\t\t} else {\n\t\t\t\t\tdeviceDetails.DependentDevices = dependents\n\t\t\t\t\tklog.Infof(\"Dependents of %s : %+v\", deviceDetails.DevPath, dependents)\n\t\t\t\t}\n\t\t\t\t// the device type reported by udev will always be disk/partition. Using this info\n\t\t\t\t// and the entries from sysfs, the actual device type is found out.\n\t\t\t\tdeviceType, err := sysfsDevice.GetDeviceType(udevDeviceType)\n\t\t\t\tif err != nil {\n\t\t\t\t\tklog.Errorf(\"could not get device type for %s, falling back to udev reported type: %s\", deviceDetails.DevPath, udevDeviceType)\n\t\t\t\t\tdeviceType = udevDeviceType\n\t\t\t\t}\n\t\t\t\tdeviceDetails.DeviceAttributes.DeviceType = deviceType\n\t\t\t\tklog.Infof(\"Device: %s is of type: %s\", deviceDetails.DevPath, deviceDetails.DeviceAttributes.DeviceType)\n\t\t\t}\n\n\t\t\tdiskInfo = append(diskInfo, deviceDetails)\n\t\t}\n\t\tnewUdevice.UdevDeviceUnref()\n\t}\n\n\t// when GPTBasedUUID is enabled, all the blockdevices will be made inactive initially.\n\t// after that each device that is detected by the probe will be marked as Active.\n\tup.controller.DeactivateStaleBlockDeviceResource(disksUid)\n\teventDetails := controller.EventMessage{\n\t\tAction: libudevwrapper.UDEV_ACTION_ADD,\n\t\tDevices: diskInfo,\n\t}\n\tcontroller.EventMessageChannel <- eventDetails\n\treturn nil\n}",
"func TestScanLimit4B(t *testing.T) {\n}",
"func TryAdapt[From, To any](inner SeriesParser[From], adapt func(From) (To, error)) SeriesParser[To] {\n\treturn newAdapter(inner, adapt)\n}",
"func (g *genericScheduler) findNodesThatPassFilters(\n\tctx context.Context,\n\tfwk framework.Framework,\n\tstate *framework.CycleState,\n\tpod *v1.Pod,\n\tdiagnosis framework.Diagnosis,\n\tnodes []*framework.NodeInfo) ([]*v1.Node, error) {\n\tnumNodesToFind := g.numFeasibleNodesToFind(int32(len(nodes)))\n\n\t// Create feasible list with enough space to avoid growing it\n\t// and allow assigning.\n\tfeasibleNodes := make([]*v1.Node, numNodesToFind)\n\n\tif !fwk.HasFilterPlugins() {\n\t\tlength := len(nodes)\n\t\tfor i := range feasibleNodes {\n\t\t\tfeasibleNodes[i] = nodes[(g.nextStartNodeIndex+i)%length].Node()\n\t\t}\n\t\tg.nextStartNodeIndex = (g.nextStartNodeIndex + len(feasibleNodes)) % length\n\t\treturn feasibleNodes, nil\n\t}\n\n\terrCh := parallelize.NewErrorChannel()\n\tvar statusesLock sync.Mutex\n\tvar feasibleNodesLen int32\n\tctx, cancel := context.WithCancel(ctx)\n\tcheckNode := func(i int) {\n\t\t// We check the nodes starting from where we left off in the previous scheduling cycle,\n\t\t// this is to make sure all nodes have the same chance of being examined across pods.\n\t\tnodeInfo := nodes[(g.nextStartNodeIndex+i)%len(nodes)]\n\t\tstatus := fwk.RunFilterPluginsWithNominatedPods(ctx, state, pod, nodeInfo)\n\t\tif status.Code() == framework.Error {\n\t\t\terrCh.SendErrorWithCancel(status.AsError(), cancel)\n\t\t\treturn\n\t\t}\n\t\tif status.IsSuccess() {\n\t\t\tlength := atomic.AddInt32(&feasibleNodesLen, 1)\n\t\t\tif length > numNodesToFind {\n\t\t\t\tcancel()\n\t\t\t\tatomic.AddInt32(&feasibleNodesLen, -1)\n\t\t\t} else {\n\t\t\t\tfeasibleNodes[length-1] = nodeInfo.Node()\n\t\t\t}\n\t\t} else {\n\t\t\tstatusesLock.Lock()\n\t\t\tdiagnosis.NodeToStatusMap[nodeInfo.Node().Name] = status\n\t\t\tdiagnosis.UnschedulablePlugins.Insert(status.FailedPlugin())\n\t\t\tstatusesLock.Unlock()\n\t\t}\n\t}\n\n\tbeginCheckNode := time.Now()\n\tstatusCode := framework.Success\n\tdefer func() {\n\t\t// We record Filter extension point latency here instead of in framework.go because framework.RunFilterPlugins\n\t\t// function is called for each node, whereas we want to have an overall latency for all nodes per scheduling cycle.\n\t\t// Note that this latency also includes latency for `addNominatedPods`, which calls framework.RunPreFilterAddPod.\n\t\tmetrics.FrameworkExtensionPointDuration.WithLabelValues(runtime.Filter, statusCode.String(), fwk.ProfileName()).Observe(metrics.SinceInSeconds(beginCheckNode))\n\t}()\n\n\t// Stops searching for more nodes once the configured number of feasible nodes\n\t// are found.\n\tfwk.Parallelizer().Until(ctx, len(nodes), checkNode)\n\tprocessedNodes := int(feasibleNodesLen) + len(diagnosis.NodeToStatusMap)\n\tg.nextStartNodeIndex = (g.nextStartNodeIndex + processedNodes) % len(nodes)\n\n\tfeasibleNodes = feasibleNodes[:feasibleNodesLen]\n\tif err := errCh.ReceiveError(); err != nil {\n\t\tstatusCode = framework.Error\n\t\treturn nil, err\n\t}\n\treturn feasibleNodes, nil\n}",
"func PrepareConversion(output, input image.Image) (*ConverterConfig, error) {\n\tsrc, _, err := inspect(input, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdst, _, err := inspect(output, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = checkConversion(dst, src)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ConverterConfig{\n\t\tInput: *src,\n\t\tOutput: *dst,\n\t}, nil\n}",
"func dummyImpl(blk *ssa.BasicBlock) bool {\n\tvar ops [8]*ssa.Value\n\tfor _, instr := range blk.Instrs {\n\t\tif insertedStore(instr) {\n\t\t\tcontinue // inserted by go/ssa, not from the code\n\t\t}\n\t\tfor _, val := range instr.Operands(ops[:0]) {\n\t\t\tswitch x := (*val).(type) {\n\t\t\tcase nil, *ssa.Const, *ssa.ChangeType, *ssa.Alloc,\n\t\t\t\t*ssa.MakeInterface, *ssa.Function,\n\t\t\t\t*ssa.Global, *ssa.IndexAddr, *ssa.Slice,\n\t\t\t\t*ssa.UnOp, *ssa.Parameter:\n\t\t\tcase *ssa.Call:\n\t\t\t\tif rxHarmlessCall.MatchString(x.Call.Value.String()) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\tswitch x := instr.(type) {\n\t\tcase *ssa.Alloc, *ssa.Store, *ssa.UnOp, *ssa.BinOp,\n\t\t\t*ssa.MakeInterface, *ssa.MakeMap, *ssa.Extract,\n\t\t\t*ssa.IndexAddr, *ssa.FieldAddr, *ssa.Slice,\n\t\t\t*ssa.Lookup, *ssa.ChangeType, *ssa.TypeAssert,\n\t\t\t*ssa.Convert, *ssa.ChangeInterface:\n\t\t\t// non-trivial expressions in panic/log/print\n\t\t\t// calls\n\t\tcase *ssa.Return, *ssa.Panic:\n\t\t\treturn true\n\t\tcase *ssa.Call:\n\t\t\tif rxHarmlessCall.MatchString(x.Call.Value.String()) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn x.Call.Value.Name() == \"throw\" // runtime's panic\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\treturn false\n}",
"func OptimalAssignment2(input [][]int, track [][]int)(assignments int){\n\tfmt.Println(\"3. AFTER OPTIMAL ASSIGNMENT\")\n\tfmt.Println(\"------------------------------------\")\n\tfmt.Println(\"1: selected zero (covered) \\n-1: zero (covered) \\n2: uncovered\")\n\tfmt.Println(\"3: covered once\\n4: covered twice\")\n\tfmt.Println(\"------------------------------------\")\n\n\tassignments = 0\n\t//For rows\n\tfor i:=0;i<len(input);i++{\n\t\t//fmt.Println(checkRowzeroes(input,i,track))\n\t\tif CheckRowzeroes(input,i,track)==1 { //if only 1 zero in the column\n\t\t\tindex := GetRowZeroIndex(track,i)\n\t\t\tif (track[i][index]==-1){\n\t\t\t\t//skip\n\t\t\t} else {\n\t\t\t\tassignments ++\n\t\t\t\ttrack[i][index] = 1//make that 1 on the tracking matrix\n\t\t\t\tzeroesincolumn := GetAllColZeroIndex(track, index, i) //get all zeroes in the column associated with that position\n\t\t\t\tfor n := 0; n < len(zeroesincolumn); n++ {\n\t\t\t\t\ttrack[zeroesincolumn[n]][index] = -1\n\t\t\t\t}\n\t\t\t\t//ADDED\n\t\t\t\tfor n:=0;n<len(track[i]);n++{\n\t\t\t\t\tif(track[n][index]==2){\n\t\t\t\t\t\ttrack[n][index]=3 //if it hasn't been covered it is now covered denoted by a 3\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfmt.Println(\"Checking Row: \",i)\n\t\tPrintMatrix(track)\n\t}\n\t//For columns\n\tfor i:=0;i<len(input[0]);i++ {\n\t\t//fmt.Println(checkColzeroes(input,i,track))\n\t\tif (CheckColzeroes(input,i,track))==1{\n\t\t\tindex := GetColZeroIndex(track,i)\n\t\t\tif (track[index][i])==0{\n\t\t\t\tassignments ++\n\t\t\t\ttrack[index][i]= 1\n\t\t\t\tzeroesinrow := GetAllRowZeroIndex(track,index,i) //changed from input to track, getColZeroIndex(track,i) to index\n\t\t\t\tfor n := 0; n < len(zeroesinrow); n++ {\n\t\t\t\t\ttrack[index][zeroesinrow[n]] = -1\n\t\t\t\t}\n\t\t\t\t//ADDED\n\t\t\t\tfor n:=0;n<len(track[index]);n++{\n\t\t\t\t\tif(track[index][n]==2){\n\t\t\t\t\t\ttrack[index][n]=3 // if it hasn't been covered then its now covered once denoted by a 3\n\t\t\t\t\t}else if (track[index][n]==3){\n\t\t\t\t\t\ttrack[index][n]=4 //if its already been covered, its now covered twice denoted by a 4\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfmt.Println(\"Checking Column: \",i)\n\t\tPrintMatrix(track)\n\t}\n\n\ttotalremaining := len(input)-assignments //calculate how many assignments left to make\n\tremainder := 0 //determine how many zeroes remaining\n\tnozero := false\n\tfor i:=0;i<len(track);i++{\n\t\tfor n:=0;n<len(track[i]);n++{\n\t\t\tif (track[i][n])==0{\n\t\t\t\tnozero = true\n\t\t\t\tremainder+=1\n\t\t\t}\n\t\t}\n\t}\n\tif (nozero){ //if there were zeroes\n\t\tif (remainder%totalremaining==1){\n\t\t\tassignments += totalremaining\n\t\t}else if (remainder%totalremaining==0){\n\t\t\tassignments += remainder/totalremaining\n\t\t}}\n\tif totalremaining!=0{\n\t\tif (remainder==1){\n\t\t\tassignments += totalremaining\n\t\t}}\n\treturn assignments\n}",
"func (s *Sudoku) try(val value, idx index) (*Sudoku, error) {\n\t// create a copy as we are guessing\n\tsc := s.copy()\n\tif err := sc.assign(val, idx); err != nil {\n\t\treturn nil, err\n\t}\n\tif !sc.grid.solved() {\n\t\t// take another guess\n\t\tif err := sc.search(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn sc, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.