element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
112
| imports
stringlengths 0
2.45k
| structs
stringclasses 523
values | interfaces
stringclasses 5
values | file_location
stringlengths 63
170
| code
stringlengths 15
138k
| global_vars
stringclasses 135
values | package
stringclasses 525
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
file | eco-gotests | 6a1f077a-72e0-4662-9060-f033abe2f5ec | config.go | Imports Used: import (
"log"
"os"
"path/filepath"
"runtime"
"github.com/golang/glog"
"github.com/kelseyhightower/envconfig"
"github.com/openshift-kni/eco-goinfra/pkg/clients"
"github.com/openshift-kni/eco-gotests/tests/internal/config"
"gopkg.in/yaml.v2"
) | File Location: github.com/eco-gotests/tests/accel/internal/accelconfig/config.go | package accelconfig
import (
"log"
"os"
"path/filepath"
"runtime"
"github.com/golang/glog"
"github.com/kelseyhightower/envconfig"
"github.com/openshift-kni/eco-goinfra/pkg/clients"
"github.com/openshift-kni/eco-gotests/tests/internal/config"
"gopkg.in/yaml.v2"
)
const (
// PathToDefaultAccelParamsFile path to config file with default accel tests parameters.
PathToDefaultAccelParamsFile = "./default.yaml"
)
// AccelConfig contains environment information related to ocp upgrade tests.
type AccelConfig struct {
PullSecret string `envconfig:"ECO_ACCEL_PULL_SECRET"`
Registry string `envconfig:"ECO_ACCEL_REGISTRY"`
UpgradeTargetVersion string `envconfig:"ECO_ACCEL_UPGRADE_TARGET_IMAGE"`
SpokeKubeConfig string `envconfig:"ECO_ACCEL_SPOKE_KUBECONFIG"`
HubClusterName string `envconfig:"ECO_ACCEL_HUB_CLUSTER_NAME"`
HubMinorVersion string `envconfig:"ECO_ACCEL_HUB_MINOR_VERSION"`
IBUWorkloadImage string `yaml:"ibu_workload_image" envconfig:"ECO_ACCEL_WORKLOAD_IMAGE"`
SpokeAPIClient *clients.Settings
*config.GeneralConfig
}
// NewAccelConfig returns instance of AccelConfig type.
func NewAccelConfig() *AccelConfig {
log.Print("Creating new AccelConfig")
var accelConfig AccelConfig
accelConfig.GeneralConfig = config.NewConfig()
_, filename, _, _ := runtime.Caller(0)
baseDir := filepath.Dir(filename)
configFile := filepath.Join(baseDir, PathToDefaultAccelParamsFile)
err := readFile(&accelConfig, configFile)
if err != nil {
glog.V(90).Infof("Error reading config file %s", configFile)
return nil
}
err = envconfig.Process("eco_accel_", &accelConfig)
if err != nil {
log.Printf("failed to instantiate AccelConfig: %v", err)
return nil
}
if accelConfig.SpokeKubeConfig != "" {
glog.V(90).Infof("Creating spoke api client from %s", accelConfig.SpokeKubeConfig)
if accelConfig.SpokeAPIClient = clients.New(
accelConfig.SpokeKubeConfig); accelConfig.SpokeAPIClient == nil {
glog.V(90).Infof("failed to load provided spoke kubeconfig")
}
} else {
accelConfig.SpokeAPIClient = nil
}
return &accelConfig
}
func readFile(accelConfig *AccelConfig, configFile string) error {
openedConfigFile, err := os.Open(configFile)
if err != nil {
return err
}
defer func() {
_ = openedConfigFile.Close()
}()
decoder := yaml.NewDecoder(openedConfigFile)
err = decoder.Decode(&accelConfig)
return err
}
| Package Name: package accelconfig | ||||
function | eco-gotests | aa14300d-7357-4eba-b98b-683f974ac5e2 | NewAccelConfig | Imports Used: ['"log"', '"path/filepath"', '"runtime"', '"github.com/golang/glog"', '"github.com/kelseyhightower/envconfig"', '"github.com/openshift-kni/eco-goinfra/pkg/clients"', '"github.com/openshift-kni/eco-gotests/tests/internal/config"'] | Structs Used: ['AccelConfig'] | File Location: github.com/eco-gotests/tests/accel/internal/accelconfig/config.go | func NewAccelConfig() *AccelConfig {
log.Print("Creating new AccelConfig")
var accelConfig AccelConfig
accelConfig.GeneralConfig = config.NewConfig()
_, filename, _, _ := runtime.Caller(0)
baseDir := filepath.Dir(filename)
configFile := filepath.Join(baseDir, PathToDefaultAccelParamsFile)
err := readFile(&accelConfig, configFile)
if err != nil {
glog.V(90).Infof("Error reading config file %s", configFile)
return nil
}
err = envconfig.Process("eco_accel_", &accelConfig)
if err != nil {
log.Printf("failed to instantiate AccelConfig: %v", err)
return nil
}
if accelConfig.SpokeKubeConfig != "" {
glog.V(90).Infof("Creating spoke api client from %s", accelConfig.SpokeKubeConfig)
if accelConfig.SpokeAPIClient = clients.New(
accelConfig.SpokeKubeConfig); accelConfig.SpokeAPIClient == nil {
glog.V(90).Infof("failed to load provided spoke kubeconfig")
}
} else {
accelConfig.SpokeAPIClient = nil
}
return &accelConfig
} | Package Name: accelconfig | |||
function | eco-gotests | 9b9804e3-1ca3-4169-ab2c-278e9f655ce3 | readFile | Imports Used: ['"os"'] | Structs Used: ['AccelConfig'] | File Location: github.com/eco-gotests/tests/accel/internal/accelconfig/config.go | func readFile(accelConfig *AccelConfig, configFile string) error {
openedConfigFile, err := os.Open(configFile)
if err != nil {
return err
}
defer func() {
_ = openedConfigFile.Close()
}()
decoder := yaml.NewDecoder(openedConfigFile)
err = decoder.Decode(&accelConfig)
return err
} | Package Name: accelconfig | |||
file | eco-gotests | 7fa96bfe-4793-4b9c-bf9e-cbb3f70680f1 | accelinittools.go | Imports Used: import (
"github.com/openshift-kni/eco-goinfra/pkg/clients"
"github.com/openshift-kni/eco-gotests/tests/accel/internal/accelconfig"
"github.com/openshift-kni/eco-gotests/tests/internal/inittools"
) | File Location: github.com/eco-gotests/tests/accel/internal/accelinittools/accelinittools.go | package accelinittools
import (
"github.com/openshift-kni/eco-goinfra/pkg/clients"
"github.com/openshift-kni/eco-gotests/tests/accel/internal/accelconfig"
"github.com/openshift-kni/eco-gotests/tests/internal/inittools"
)
var (
// HubAPIClient provides API access to hub cluster.
HubAPIClient *clients.Settings
// SpokeAPIClient provides API access to spoke cluster.
SpokeAPIClient *clients.Settings
// AccelConfig provides access to configuration parameters.
AccelConfig *accelconfig.AccelConfig
)
func init() {
HubAPIClient = inittools.APIClient
AccelConfig = accelconfig.NewAccelConfig()
SpokeAPIClient = AccelConfig.SpokeAPIClient
}
| Package Name: package accelinittools | ||||
function | eco-gotests | 28aba1d0-20c2-4353-8501-dde90e12f54c | init | Imports Used: ['"github.com/openshift-kni/eco-gotests/tests/accel/internal/accelconfig"', '"github.com/openshift-kni/eco-gotests/tests/internal/inittools"'] | File Location: github.com/eco-gotests/tests/accel/internal/accelinittools/accelinittools.go | func init() {
HubAPIClient = inittools.APIClient
AccelConfig = accelconfig.NewAccelConfig()
SpokeAPIClient = AccelConfig.SpokeAPIClient
} | Package Name: accelinittools | ||||
file | eco-gotests | bcae3e3c-aa60-4a2f-9f36-74d08ce63d80 | const.go | File Location: github.com/eco-gotests/tests/accel/internal/accelparams/const.go | package accelparams
const (
// Label represents accel label that can be used for test cases selection.
Label = "accel"
)
| Package Name: package accelparams | |||||
file | eco-gotests | 894ddfa2-3f06-4dbd-abae-37dc7dfa28b2 | upgrade_suite_test.go | Imports Used: import (
"runtime"
"testing"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/openshift-kni/eco-goinfra/pkg/namespace"
"github.com/openshift-kni/eco-goinfra/pkg/reportxml"
. "github.com/openshift-kni/eco-gotests/tests/accel/internal/accelinittools"
"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/upgradeparams"
_ "github.com/openshift-kni/eco-gotests/tests/accel/upgrade/tests"
"github.com/openshift-kni/eco-gotests/tests/internal/reporter"
) | File Location: github.com/eco-gotests/tests/accel/upgrade/upgrade_suite_test.go | package upgrade
import (
"runtime"
"testing"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/openshift-kni/eco-goinfra/pkg/namespace"
"github.com/openshift-kni/eco-goinfra/pkg/reportxml"
. "github.com/openshift-kni/eco-gotests/tests/accel/internal/accelinittools"
"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/upgradeparams"
_ "github.com/openshift-kni/eco-gotests/tests/accel/upgrade/tests"
"github.com/openshift-kni/eco-gotests/tests/internal/reporter"
)
var (
_, currentFile, _, _ = runtime.Caller(0)
testNS = namespace.NewBuilder(HubAPIClient, upgradeparams.TestNamespaceName)
)
func TestUpgrade(t *testing.T) {
_, reporterConfig := GinkgoConfiguration()
reporterConfig.JUnitReport = AccelConfig.GetJunitReportPath(currentFile)
RegisterFailHandler(Fail)
RunSpecs(t, "Acceleration upgrade test", Label(upgradeparams.Labels...), reporterConfig)
}
var _ = AfterSuite(func() {
By("Deleting test namespace")
err := testNS.DeleteAndWait(5 * time.Minute)
Expect(err).ToNot(HaveOccurred(), "error to delete test namespace")
})
var _ = JustAfterEach(func() {
reporter.ReportIfFailed(
CurrentSpecReport(), currentFile, upgradeparams.ReporterNamespacesToDump, upgradeparams.ReporterCRDsToDump)
})
var _ = ReportAfterSuite("", func(report Report) {
reportxml.Create(report, AccelConfig.GetReportPath(), AccelConfig.TCPrefix)
})
| Package Name: package upgrade | ||||
function | eco-gotests | 12b6cef4-7561-42e6-b7ea-8a57bb042546 | TestUpgrade | Imports Used: ['"testing"', '"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/upgradeparams"'] | File Location: github.com/eco-gotests/tests/accel/upgrade/upgrade_suite_test.go | func TestUpgrade(t *testing.T) {
_, reporterConfig := GinkgoConfiguration()
reporterConfig.JUnitReport = AccelConfig.GetJunitReportPath(currentFile)
RegisterFailHandler(Fail)
RunSpecs(t, "Acceleration upgrade test", Label(upgradeparams.Labels...), reporterConfig)
} | Global Variables: {'_': 'ReportAfterSuite("", func(report Report) {\n\treportxml.Create(report, AccelConfig.GetReportPath(), AccelConfig.TCPrefix)\n})'} | Package Name: upgrade | |||
file | eco-gotests | de0747f2-0921-433f-990e-d30e13613022 | create.go | Imports Used: import (
"fmt"
"time"
"github.com/golang/glog"
"github.com/openshift-kni/eco-goinfra/pkg/clients"
"github.com/openshift-kni/eco-goinfra/pkg/deployment"
"github.com/openshift-kni/eco-goinfra/pkg/pod"
"github.com/openshift-kni/eco-goinfra/pkg/route"
"github.com/openshift-kni/eco-goinfra/pkg/service"
upgradeinittools "github.com/openshift-kni/eco-gotests/tests/accel/internal/accelinittools"
"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/upgradeparams"
corev1 "k8s.io/api/core/v1"
) | File Location: github.com/eco-gotests/tests/accel/upgrade/internal/createres/create.go | package createres
import (
"fmt"
"time"
"github.com/golang/glog"
"github.com/openshift-kni/eco-goinfra/pkg/clients"
"github.com/openshift-kni/eco-goinfra/pkg/deployment"
"github.com/openshift-kni/eco-goinfra/pkg/pod"
"github.com/openshift-kni/eco-goinfra/pkg/route"
"github.com/openshift-kni/eco-goinfra/pkg/service"
upgradeinittools "github.com/openshift-kni/eco-gotests/tests/accel/internal/accelinittools"
"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/upgradeparams"
corev1 "k8s.io/api/core/v1"
)
// Workload creates a workload with test image.
func Workload(apiClient *clients.Settings, workloadImage string) (*deployment.Builder, error) {
glog.V(90).Infof("Creating Deployment %q", upgradeparams.DeploymentName)
containerConfig, err := pod.NewContainerBuilder(upgradeparams.DeploymentName, upgradeinittools.
AccelConfig.IBUWorkloadImage, []string{"/hello-openshift"}).WithPorts(
[]corev1.ContainerPort{{Name: "http", ContainerPort: 8080}}).
WithSecurityContext(upgradeparams.DefaultSC).GetContainerCfg()
if err != nil {
return nil, fmt.Errorf("failed to get containerConfig with error: %w", err)
}
workloadDeployment, err := deployment.NewBuilder(
upgradeinittools.HubAPIClient, upgradeparams.DeploymentName, upgradeparams.TestNamespaceName, map[string]string{
"app": upgradeparams.DeploymentName,
}, *containerConfig).WithLabel("app", upgradeparams.DeploymentName).CreateAndWaitUntilReady(time.Second * 120)
if err != nil {
return nil, fmt.Errorf("failed to create workload with error: %w", err)
}
return workloadDeployment, nil
}
// Service creates a service for a workload.
// Return nil on success, otherwise returns an error.
func Service(apiClient *clients.Settings, port int32) (*service.Builder, error) {
glog.V(90).Infof("Creating Service %q", upgradeparams.DeploymentName)
glog.V(90).Infof("Defining ServicePort")
svcPort, err := service.DefineServicePort(
upgradeparams.ServicePort,
upgradeparams.ServicePort,
corev1.Protocol("TCP"))
if err != nil {
glog.V(90).Infof("Error defining service port: %v", err)
return nil, err
}
glog.V(90).Infof("Creating Service Builder")
svcDemo, err := service.NewBuilder(apiClient,
upgradeparams.DeploymentName,
upgradeparams.TestNamespaceName,
upgradeparams.ContainerLabelsMap,
*svcPort).Create()
if err != nil {
glog.V(90).Infof("Error creating service: %v", err)
return nil, err
}
glog.V(90).Infof("Created service: %q in %q namespace",
svcDemo.Definition.Name, svcDemo.Definition.Namespace)
return svcDemo, nil
}
// WorkloadRoute creates a route for the workload service.
func WorkloadRoute(apiClient *clients.Settings) (*route.Builder, error) {
workloadRoute, err := route.NewBuilder(
apiClient, upgradeparams.DeploymentName, upgradeparams.TestNamespaceName, upgradeparams.DeploymentName).Create()
if err != nil {
glog.V(90).Infof("Error creating route: %v", err)
return nil, err
}
return workloadRoute, err
}
| Package Name: package createres | ||||
function | eco-gotests | 25ee7ad8-060f-4e24-a143-e331be50b948 | Workload | Imports Used: ['"fmt"', '"time"', '"github.com/golang/glog"', '"github.com/openshift-kni/eco-goinfra/pkg/clients"', '"github.com/openshift-kni/eco-goinfra/pkg/deployment"', '"github.com/openshift-kni/eco-goinfra/pkg/pod"', '"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/upgradeparams"'] | File Location: github.com/eco-gotests/tests/accel/upgrade/internal/createres/create.go | func Workload(apiClient *clients.Settings, workloadImage string) (*deployment.Builder, error) {
glog.V(90).Infof("Creating Deployment %q", upgradeparams.DeploymentName)
containerConfig, err := pod.NewContainerBuilder(upgradeparams.DeploymentName, upgradeinittools.
AccelConfig.IBUWorkloadImage, []string{"/hello-openshift"}).WithPorts(
[]corev1.ContainerPort{{Name: "http", ContainerPort: 8080}}).
WithSecurityContext(upgradeparams.DefaultSC).GetContainerCfg()
if err != nil {
return nil, fmt.Errorf("failed to get containerConfig with error: %w", err)
}
workloadDeployment, err := deployment.NewBuilder(
upgradeinittools.HubAPIClient, upgradeparams.DeploymentName, upgradeparams.TestNamespaceName, map[string]string{
"app": upgradeparams.DeploymentName,
}, *containerConfig).WithLabel("app", upgradeparams.DeploymentName).CreateAndWaitUntilReady(time.Second * 120)
if err != nil {
return nil, fmt.Errorf("failed to create workload with error: %w", err)
}
return workloadDeployment, nil
} | Package Name: createres | ||||
function | eco-gotests | 0607b5df-e744-41f7-ad86-87c58fa114c2 | Service | Imports Used: ['"github.com/golang/glog"', '"github.com/openshift-kni/eco-goinfra/pkg/clients"', '"github.com/openshift-kni/eco-goinfra/pkg/service"', '"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/upgradeparams"'] | File Location: github.com/eco-gotests/tests/accel/upgrade/internal/createres/create.go | func Service(apiClient *clients.Settings, port int32) (*service.Builder, error) {
glog.V(90).Infof("Creating Service %q", upgradeparams.DeploymentName)
glog.V(90).Infof("Defining ServicePort")
svcPort, err := service.DefineServicePort(
upgradeparams.ServicePort,
upgradeparams.ServicePort,
corev1.Protocol("TCP"))
if err != nil {
glog.V(90).Infof("Error defining service port: %v", err)
return nil, err
}
glog.V(90).Infof("Creating Service Builder")
svcDemo, err := service.NewBuilder(apiClient,
upgradeparams.DeploymentName,
upgradeparams.TestNamespaceName,
upgradeparams.ContainerLabelsMap,
*svcPort).Create()
if err != nil {
glog.V(90).Infof("Error creating service: %v", err)
return nil, err
}
glog.V(90).Infof("Created service: %q in %q namespace",
svcDemo.Definition.Name, svcDemo.Definition.Namespace)
return svcDemo, nil
} | Package Name: createres | ||||
function | eco-gotests | 8efd033b-4d2f-4b59-8a7f-563eeb3c3457 | WorkloadRoute | Imports Used: ['"github.com/golang/glog"', '"github.com/openshift-kni/eco-goinfra/pkg/clients"', '"github.com/openshift-kni/eco-goinfra/pkg/route"', '"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/upgradeparams"'] | File Location: github.com/eco-gotests/tests/accel/upgrade/internal/createres/create.go | func WorkloadRoute(apiClient *clients.Settings) (*route.Builder, error) {
workloadRoute, err := route.NewBuilder(
apiClient, upgradeparams.DeploymentName, upgradeparams.TestNamespaceName, upgradeparams.DeploymentName).Create()
if err != nil {
glog.V(90).Infof("Error creating route: %v", err)
return nil, err
}
return workloadRoute, err
} | Package Name: createres | ||||
file | eco-gotests | 83f151b8-9302-4b87-9ac3-87277e78c96a | delete.go | Imports Used: import (
"context"
"time"
"github.com/golang/glog"
"github.com/openshift-kni/eco-goinfra/pkg/clients"
"github.com/openshift-kni/eco-goinfra/pkg/namespace"
"github.com/openshift-kni/eco-goinfra/pkg/pod"
"github.com/openshift-kni/eco-goinfra/pkg/service"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/upgradeparams"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) | File Location: github.com/eco-gotests/tests/accel/upgrade/internal/deleteres/delete.go | package deleteres
import (
"context"
"time"
"github.com/golang/glog"
"github.com/openshift-kni/eco-goinfra/pkg/clients"
"github.com/openshift-kni/eco-goinfra/pkg/namespace"
"github.com/openshift-kni/eco-goinfra/pkg/pod"
"github.com/openshift-kni/eco-goinfra/pkg/service"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/upgradeparams"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
retryDurationSecs = 360
pollIntervalSecs = 20
)
// Workload deletes a workload.
// Return nil on success, otherwise returns an error.
func Workload(apiClient *clients.Settings) error {
var (
oldPods []*pod.Builder
err error
)
pollSuccess := false
err = wait.PollUntilContextTimeout(
context.TODO(), pollIntervalSecs, retryDurationSecs, true, func(ctx context.Context) (bool, error) {
oldPods, err = pod.List(apiClient, upgradeparams.TestNamespaceName,
metav1.ListOptions{LabelSelector: upgradeparams.ContainerLabelsStr})
if err != nil {
return false, nil
}
pollSuccess = true
glog.V(90).Infof("Found %d pods matching label %q ",
len(oldPods), upgradeparams.ContainerLabelsStr)
return true, nil
})
if !pollSuccess {
glog.V(90).Infof("Error listing pods in %q namespace",
upgradeparams.TestNamespaceName)
return err
}
if len(oldPods) == 0 {
glog.V(90).Infof("No pods matching label %q found in %q namespace",
upgradeparams.ContainerLabelsStr, upgradeparams.TestNamespaceName)
}
for _, _pod := range oldPods {
glog.V(90).Infof("Deleting pod %q in %q namspace",
_pod.Definition.Name, _pod.Definition.Namespace)
_pod, err = _pod.DeleteAndWait(300 * time.Second)
if err != nil {
glog.V(90).Infof("Failed to delete pod %q: %v",
_pod.Definition.Name, err)
return err
}
}
return nil
}
// Service deletes a service.
// Returns nil on success, otherwise returns an error.
func Service(apiClient *clients.Settings) error {
glog.V(90).Infof("Deleting Service %q in %q namespace",
upgradeparams.DeploymentName, upgradeparams.TestNamespaceName)
svcDemo, err := service.Pull(apiClient, upgradeparams.DeploymentName, upgradeparams.TestNamespaceName)
if err != nil && svcDemo == nil {
glog.V(90).Infof("Service %q not found in %q namespace",
upgradeparams.DeploymentName, upgradeparams.TestNamespaceName)
return err
}
err = svcDemo.Delete()
if err != nil {
glog.V(90).Infof("Error deleting service: %v", err)
return err
}
glog.V(90).Infof("Deleted service %q in %q namespace",
upgradeparams.DeploymentName, upgradeparams.TestNamespaceName)
return nil
}
// Namespace deletes the workload test namespace.
func Namespace(apiClient *clients.Settings) error {
glog.V(90).Infof("Deleting namespace %q", upgradeparams.TestNamespaceName)
nsDemo, err := namespace.Pull(apiClient, upgradeparams.TestNamespaceName)
if err != nil && nsDemo == nil {
glog.V(90).Infof("Namespace %q not found", upgradeparams.TestNamespaceName)
return err
}
err = nsDemo.DeleteAndWait(5 * time.Minute)
if err != nil {
glog.V(90).Infof("Error deleting namespace: %v", err)
return err
}
glog.V(90).Infof("Deleted namespace %q", upgradeparams.TestNamespaceName)
return nil
}
| Package Name: package deleteres | ||||
function | eco-gotests | f247c097-ca59-4c14-8d0f-fc8f916f731c | Workload | Imports Used: ['"context"', '"time"', '"github.com/golang/glog"', '"github.com/openshift-kni/eco-goinfra/pkg/clients"', '"github.com/openshift-kni/eco-goinfra/pkg/namespace"', '"github.com/openshift-kni/eco-goinfra/pkg/pod"', '"k8s.io/apimachinery/pkg/util/wait"', '"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/upgradeparams"'] | File Location: github.com/eco-gotests/tests/accel/upgrade/internal/deleteres/delete.go | func Workload(apiClient *clients.Settings) error {
var (
oldPods []*pod.Builder
err error
)
pollSuccess := false
err = wait.PollUntilContextTimeout(
context.TODO(), pollIntervalSecs, retryDurationSecs, true, func(ctx context.Context) (bool, error) {
oldPods, err = pod.List(apiClient, upgradeparams.TestNamespaceName,
metav1.ListOptions{LabelSelector: upgradeparams.ContainerLabelsStr})
if err != nil {
return false, nil
}
pollSuccess = true
glog.V(90).Infof("Found %d pods matching label %q ",
len(oldPods), upgradeparams.ContainerLabelsStr)
return true, nil
})
if !pollSuccess {
glog.V(90).Infof("Error listing pods in %q namespace",
upgradeparams.TestNamespaceName)
return err
}
if len(oldPods) == 0 {
glog.V(90).Infof("No pods matching label %q found in %q namespace",
upgradeparams.ContainerLabelsStr, upgradeparams.TestNamespaceName)
}
for _, _pod := range oldPods {
glog.V(90).Infof("Deleting pod %q in %q namspace",
_pod.Definition.Name, _pod.Definition.Namespace)
_pod, err = _pod.DeleteAndWait(300 * time.Second)
if err != nil {
glog.V(90).Infof("Failed to delete pod %q: %v",
_pod.Definition.Name, err)
return err
}
}
return nil
} | Package Name: deleteres | ||||
function | eco-gotests | a394a461-6c85-4648-8fb8-2fbeacfb94de | Service | Imports Used: ['"github.com/golang/glog"', '"github.com/openshift-kni/eco-goinfra/pkg/clients"', '"github.com/openshift-kni/eco-goinfra/pkg/namespace"', '"github.com/openshift-kni/eco-goinfra/pkg/service"', '"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/upgradeparams"'] | File Location: github.com/eco-gotests/tests/accel/upgrade/internal/deleteres/delete.go | func Service(apiClient *clients.Settings) error {
glog.V(90).Infof("Deleting Service %q in %q namespace",
upgradeparams.DeploymentName, upgradeparams.TestNamespaceName)
svcDemo, err := service.Pull(apiClient, upgradeparams.DeploymentName, upgradeparams.TestNamespaceName)
if err != nil && svcDemo == nil {
glog.V(90).Infof("Service %q not found in %q namespace",
upgradeparams.DeploymentName, upgradeparams.TestNamespaceName)
return err
}
err = svcDemo.Delete()
if err != nil {
glog.V(90).Infof("Error deleting service: %v", err)
return err
}
glog.V(90).Infof("Deleted service %q in %q namespace",
upgradeparams.DeploymentName, upgradeparams.TestNamespaceName)
return nil
} | Package Name: deleteres | ||||
function | eco-gotests | 44b8c6e3-a174-4daf-8805-8d38732e7ab0 | Namespace | Imports Used: ['"time"', '"github.com/golang/glog"', '"github.com/openshift-kni/eco-goinfra/pkg/clients"', '"github.com/openshift-kni/eco-goinfra/pkg/namespace"', '"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/upgradeparams"'] | File Location: github.com/eco-gotests/tests/accel/upgrade/internal/deleteres/delete.go | func Namespace(apiClient *clients.Settings) error {
glog.V(90).Infof("Deleting namespace %q", upgradeparams.TestNamespaceName)
nsDemo, err := namespace.Pull(apiClient, upgradeparams.TestNamespaceName)
if err != nil && nsDemo == nil {
glog.V(90).Infof("Namespace %q not found", upgradeparams.TestNamespaceName)
return err
}
err = nsDemo.DeleteAndWait(5 * time.Minute)
if err != nil {
glog.V(90).Infof("Error deleting namespace: %v", err)
return err
}
glog.V(90).Infof("Deleted namespace %q", upgradeparams.TestNamespaceName)
return nil
} | Package Name: deleteres | ||||
file | eco-gotests | 6d094d3d-d24d-4769-b5ee-52737e3c985c | const.go | File Location: github.com/eco-gotests/tests/accel/upgrade/internal/upgradeparams/const.go | package upgradeparams
const (
// Label represents accel label that can be used for test cases selection.
Label = "upgrade"
// Y stream.
Y = "Y"
// Z stream.
Z = "Z"
// X stream.
X = "X"
)
| Package Name: package upgradeparams | |||||
file | eco-gotests | 233cdeda-8a94-41e0-bd87-e9d8254bc52e | vars.go | Imports Used: import (
"fmt"
"github.com/openshift-kni/eco-gotests/tests/accel/internal/accelparams"
"github.com/openshift-kni/k8sreporter"
v1 "github.com/openshift/api/config/v1"
corev1 "k8s.io/api/core/v1"
) | File Location: github.com/eco-gotests/tests/accel/upgrade/internal/upgradeparams/vars.go | package upgradeparams
import (
"fmt"
"github.com/openshift-kni/eco-gotests/tests/accel/internal/accelparams"
"github.com/openshift-kni/k8sreporter"
v1 "github.com/openshift/api/config/v1"
corev1 "k8s.io/api/core/v1"
)
var (
// Labels represents the range of labels that can be used for test cases selection.
Labels = []string{accelparams.Label, Label}
// DeploymentName is the name of the test workload.
DeploymentName = "test-workload"
// TestNamespaceName is the namespace where the workload is deployed.
TestNamespaceName = "accel-upgrade-workload-ns"
// ContainerLabelsMap labels in an map used when creating the workload container.
ContainerLabelsMap = map[string]string{"app": DeploymentName}
// ContainerLabelsStr labels in a str used when creating the workload container.
ContainerLabelsStr = fmt.Sprintf("%s=%s", "app", DeploymentName)
// ServicePort is the workload service port.
ServicePort int32 = 8080
// ReporterNamespacesToDump tells to the reporter from where to collect logs.
ReporterNamespacesToDump = map[string]string{"test-workload": "test-workload",
"accel-upgrade-workload-ns": "accel-upgrade-workload-ns"}
// ReporterCRDsToDump tells to the reporter what CRs to dump.
ReporterCRDsToDump = []k8sreporter.CRData{
{Cr: &corev1.PodList{}},
{Cr: &v1.ClusterOperatorList{}},
{Cr: &v1.ClusterVersionList{}},
}
trueFlag = true
falseFlag = false
// DefaultSC is the default security context for the containers.
DefaultSC = &corev1.SecurityContext{
AllowPrivilegeEscalation: &falseFlag,
RunAsNonRoot: &trueFlag,
SeccompProfile: &corev1.SeccompProfile{
Type: "RuntimeDefault",
},
}
)
| Package Name: package upgradeparams | ||||
file | eco-gotests | fa56dec4-20cc-4777-a6ca-688f085b5f16 | upgrade.go | Imports Used: import (
"fmt"
"time"
"github.com/golang/glog"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/openshift-kni/eco-goinfra/pkg/clusteroperator"
"github.com/openshift-kni/eco-goinfra/pkg/clusterversion"
"github.com/openshift-kni/eco-goinfra/pkg/namespace"
"github.com/openshift-kni/eco-goinfra/pkg/pod"
"github.com/openshift-kni/eco-goinfra/pkg/reportxml"
"github.com/openshift-kni/eco-goinfra/pkg/route"
. "github.com/openshift-kni/eco-gotests/tests/accel/internal/accelinittools"
"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/createres"
"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/deleteres"
"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/upgradeparams"
"github.com/openshift-kni/eco-gotests/tests/internal/url"
) | File Location: github.com/eco-gotests/tests/accel/upgrade/tests/upgrade.go | package upgrade
import (
"fmt"
"time"
"github.com/golang/glog"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/openshift-kni/eco-goinfra/pkg/clusteroperator"
"github.com/openshift-kni/eco-goinfra/pkg/clusterversion"
"github.com/openshift-kni/eco-goinfra/pkg/namespace"
"github.com/openshift-kni/eco-goinfra/pkg/pod"
"github.com/openshift-kni/eco-goinfra/pkg/reportxml"
"github.com/openshift-kni/eco-goinfra/pkg/route"
. "github.com/openshift-kni/eco-gotests/tests/accel/internal/accelinittools"
"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/createres"
"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/deleteres"
"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/upgradeparams"
"github.com/openshift-kni/eco-gotests/tests/internal/url"
)
var (
waitToUpgradeStart = 5 * time.Minute
waitToUpgradeCompleted = 130 * time.Minute
desiredUpgradeChannel = "stable-4." + AccelConfig.HubMinorVersion
)
var _ = Describe("OCP_UPGRADE", Ordered, Label("minor"), func() {
Context("OCP", func() {
It("should upgrade successfully", reportxml.ID("72245"), func() {
By("Get the clusterversion struct")
version, err := clusterversion.Pull(HubAPIClient)
Expect(err).ToNot(HaveOccurred(), "error retrieving clusterversion")
glog.V(90).Infof("got the clusterversion struct %+v", version)
By("Deploy a workload in the cluster, expose a service and create a route")
workloadRoute := startTestWorkloadAndGetRoute()
By("Patch the clusterversion with the desired upgrade channel")
glog.V(90).Infof("this is the desired upgrade channel: %+v", desiredUpgradeChannel)
if desiredUpgradeChannel == "stable-4." {
desiredUpgradeChannel = version.Object.Spec.Channel
glog.V(90).Infof("clusterversion channel %s", desiredUpgradeChannel)
}
version, err = version.WithDesiredUpdateChannel(desiredUpgradeChannel).Update()
Expect(err).ToNot(HaveOccurred(), "error patching the desired upgrade channel")
glog.V(90).Infof("patched the clusterversion channel %s", desiredUpgradeChannel)
By("Get the desired update image")
desiredImage := AccelConfig.UpgradeTargetVersion
if desiredImage == "" {
desiredImage, err = version.GetNextUpdateVersionImage(upgradeparams.Z, false)
Expect(err).ToNot(HaveOccurred(), "error getting the next update image")
}
glog.V(90).Infof("got the desired update image in %s stream %s", upgradeparams.Z, desiredImage)
By("Patch the clusterversion with the desired upgrade image")
version, err = version.WithDesiredUpdateImage(desiredImage, true).Update()
Expect(err).ToNot(HaveOccurred(), "error patching the desired image")
Expect(version.Object.Spec.DesiredUpdate.Image).To(Equal(desiredImage))
glog.V(90).Infof("patched the clusterversion with desired image %s", desiredImage)
By("Wait until upgrade starts")
err = version.WaitUntilUpdateIsStarted(waitToUpgradeStart)
Expect(err).ToNot(HaveOccurred(), "the upgrade didn't start after %s", waitToUpgradeStart)
glog.V(90).Infof("upgrade has started")
By("Wait until upgrade completes")
err = version.WaitUntilUpdateIsCompleted(waitToUpgradeCompleted)
Expect(err).ToNot(HaveOccurred(), "the upgrade didn't complete after %s", waitToUpgradeCompleted)
glog.V(90).Infof("upgrade has completed")
By("Check that the clusterversion is updated to the desired version")
Expect(version.Object.Status.Desired.Image).To(Equal(desiredImage))
glog.V(90).Infof("upgrade to image %s has completed successfully", desiredImage)
By("Check that all the operators version is the desired version")
clusteroperatorList, err := clusteroperator.List(HubAPIClient)
Expect(err).ToNot(HaveOccurred(), "failed to get the clusteroperators list %v", err)
hasVersion, err := clusteroperator.VerifyClusterOperatorsVersion(version.Object.Status.Desired.Version,
clusteroperatorList)
Expect(err).NotTo(HaveOccurred(), "error while checking operators version")
Expect(hasVersion).To(BeTrue())
By("Check that no cluster operator is progressing")
cosStoppedProgressing, err := clusteroperator.
WaitForAllClusteroperatorsStopProgressing(HubAPIClient, time.Minute*5)
Expect(err).ToNot(HaveOccurred(), "error while waiting for cluster operators to stop progressing")
Expect(cosStoppedProgressing).To(BeTrue(), "error: some cluster operators are still progressing")
By("Check that all cluster operators are available")
cosAvailable, err := clusteroperator.
WaitForAllClusteroperatorsAvailable(HubAPIClient, time.Minute*5)
Expect(err).NotTo(HaveOccurred(), "error while waiting for cluster operators to become available")
Expect(cosAvailable).To(BeTrue(), "error: some cluster operators are not available")
By("Check that all pods are running in workload namespace")
workloadPods, err := pod.List(HubAPIClient, upgradeparams.TestNamespaceName)
Expect(err).NotTo(HaveOccurred(), "error listing pods in workload namespace %s", upgradeparams.TestNamespaceName)
Expect(len(workloadPods) > 0).To(BeTrue(),
"error: found no running pods in workload namespace %s", upgradeparams.TestNamespaceName)
for _, workloadPod := range workloadPods {
err := workloadPod.WaitUntilReady(time.Minute * 2)
Expect(err).To(BeNil(), "error waiting for workload pod to become ready")
}
verifyWorkloadReachable(workloadRoute)
})
AfterAll(func() {
By("Delete workload test namespace")
glog.V(90).Infof("Deleting test deployments")
deleteWorkloadNamespace()
})
})
})
func startTestWorkloadAndGetRoute() *route.Builder {
By("Check if workload app namespace exists")
if _, err := namespace.Pull(HubAPIClient, upgradeparams.TestNamespaceName); err == nil {
deleteWorkloadNamespace()
}
By("Create workload app namespace")
_, err := namespace.NewBuilder(HubAPIClient, upgradeparams.TestNamespaceName).Create()
Expect(err).NotTo(HaveOccurred(), "error creating namespace for workload app")
By("Create workload app deployment")
_, err = createres.Workload(HubAPIClient, AccelConfig.IBUWorkloadImage)
Expect(err).ToNot(HaveOccurred(), "error creating workload application")
By("Create workload app service")
_, err = createres.Service(HubAPIClient, upgradeparams.ServicePort)
Expect(err).ToNot(HaveOccurred(), "error creating workload service %v", err)
By("Create workload app route")
workloadRoute, err := createres.WorkloadRoute(HubAPIClient)
Expect(err).ToNot(HaveOccurred(), "error creating workload route %v", err)
verifyWorkloadReachable(workloadRoute)
return workloadRoute
}
func deleteWorkloadNamespace() {
By("Delete workload")
err := deleteres.Namespace(HubAPIClient)
Expect(err).NotTo(HaveOccurred(), "error deleting workload namespace %v", err)
}
func verifyWorkloadReachable(workloadRoute *route.Builder) {
By("Verify workload is reachable")
Eventually(func() bool {
_, rc, err := url.Fetch(fmt.Sprintf("http://%s", workloadRoute.Object.Spec.Host), "get", true)
glog.V(90).Infof("trying to reach the workload with error %v", err)
return rc == 200
}, time.Second*10, time.Second*2).Should(BeTrue(), "error reaching the workload")
}
| Package Name: package upgrade | ||||
function | eco-gotests | 9bb72a5d-81c5-42a3-870d-04d99916a4e6 | startTestWorkloadAndGetRoute | Imports Used: ['"github.com/openshift-kni/eco-goinfra/pkg/namespace"', '"github.com/openshift-kni/eco-goinfra/pkg/route"', '"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/createres"', '"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/upgradeparams"'] | File Location: github.com/eco-gotests/tests/accel/upgrade/tests/upgrade.go | func startTestWorkloadAndGetRoute() *route.Builder {
By("Check if workload app namespace exists")
if _, err := namespace.Pull(HubAPIClient, upgradeparams.TestNamespaceName); err == nil {
deleteWorkloadNamespace()
}
By("Create workload app namespace")
_, err := namespace.NewBuilder(HubAPIClient, upgradeparams.TestNamespaceName).Create()
Expect(err).NotTo(HaveOccurred(), "error creating namespace for workload app")
By("Create workload app deployment")
_, err = createres.Workload(HubAPIClient, AccelConfig.IBUWorkloadImage)
Expect(err).ToNot(HaveOccurred(), "error creating workload application")
By("Create workload app service")
_, err = createres.Service(HubAPIClient, upgradeparams.ServicePort)
Expect(err).ToNot(HaveOccurred(), "error creating workload service %v", err)
By("Create workload app route")
workloadRoute, err := createres.WorkloadRoute(HubAPIClient)
Expect(err).ToNot(HaveOccurred(), "error creating workload route %v", err)
verifyWorkloadReachable(workloadRoute)
return workloadRoute
} | Global Variables: {'_': 'Describe("OCP_UPGRADE", Ordered, Label("minor"), func() {\n\tContext("OCP", func() {\n\t\tIt("should upgrade successfully", reportxml.ID("72245"), func() {\n\t\t\tBy("Get the clusterversion struct")\n\t\t\tversion, err := clusterversion.Pull(HubAPIClient)\n\t\t\tExpect(err).ToNot(HaveOccurred(), "error retrieving clusterversion")\n\t\t\tglog.V(90).Infof("got the clusterversion struct %+v", version)\n\n\t\t\tBy("Deploy a workload in the cluster, expose a service and create a route")\n\t\t\tworkloadRoute := startTestWorkloadAndGetRoute()\n\n\t\t\tBy("Patch the clusterversion with the desired upgrade channel")\n\t\t\tglog.V(90).Infof("this is the desired upgrade channel: %+v", desiredUpgradeChannel)\n\t\t\tif desiredUpgradeChannel == "stable-4." {\n\t\t\t\tdesiredUpgradeChannel = version.Object.Spec.Channel\n\t\t\t\tglog.V(90).Infof("clusterversion channel %s", desiredUpgradeChannel)\n\t\t\t}\n\t\t\tversion, err = version.WithDesiredUpdateChannel(desiredUpgradeChannel).Update()\n\t\t\tExpect(err).ToNot(HaveOccurred(), "error patching the desired upgrade channel")\n\t\t\tglog.V(90).Infof("patched the clusterversion channel %s", desiredUpgradeChannel)\n\n\t\t\tBy("Get the desired update image")\n\t\t\tdesiredImage := AccelConfig.UpgradeTargetVersion\n\t\t\tif desiredImage == "" {\n\t\t\t\tdesiredImage, err = version.GetNextUpdateVersionImage(upgradeparams.Z, false)\n\t\t\t\tExpect(err).ToNot(HaveOccurred(), "error getting the next update image")\n\t\t\t}\n\t\t\tglog.V(90).Infof("got the desired update image in %s stream %s", upgradeparams.Z, desiredImage)\n\n\t\t\tBy("Patch the clusterversion with the desired upgrade image")\n\t\t\tversion, err = version.WithDesiredUpdateImage(desiredImage, true).Update()\n\t\t\tExpect(err).ToNot(HaveOccurred(), "error patching the desired image")\n\t\t\tExpect(version.Object.Spec.DesiredUpdate.Image).To(Equal(desiredImage))\n\t\t\tglog.V(90).Infof("patched the clusterversion with desired image %s", desiredImage)\n\n\t\t\tBy("Wait until upgrade starts")\n\t\t\terr = version.WaitUntilUpdateIsStarted(waitToUpgradeStart)\n\t\t\tExpect(err).ToNot(HaveOccurred(), "the upgrade didn\'t start after %s", waitToUpgradeStart)\n\t\t\tglog.V(90).Infof("upgrade has started")\n\n\t\t\tBy("Wait until upgrade completes")\n\t\t\terr = version.WaitUntilUpdateIsCompleted(waitToUpgradeCompleted)\n\t\t\tExpect(err).ToNot(HaveOccurred(), "the upgrade didn\'t complete after %s", waitToUpgradeCompleted)\n\t\t\tglog.V(90).Infof("upgrade has completed")\n\n\t\t\tBy("Check that the clusterversion is updated to the desired version")\n\t\t\tExpect(version.Object.Status.Desired.Image).To(Equal(desiredImage))\n\t\t\tglog.V(90).Infof("upgrade to image %s has completed successfully", desiredImage)\n\n\t\t\tBy("Check that all the operators version is the desired version")\n\t\t\tclusteroperatorList, err := clusteroperator.List(HubAPIClient)\n\t\t\tExpect(err).ToNot(HaveOccurred(), "failed to get the clusteroperators list %v", err)\n\t\t\thasVersion, err := clusteroperator.VerifyClusterOperatorsVersion(version.Object.Status.Desired.Version,\n\t\t\t\tclusteroperatorList)\n\t\t\tExpect(err).NotTo(HaveOccurred(), "error while checking operators version")\n\t\t\tExpect(hasVersion).To(BeTrue())\n\n\t\t\tBy("Check that no cluster operator is progressing")\n\t\t\tcosStoppedProgressing, err := clusteroperator.\n\t\t\t\tWaitForAllClusteroperatorsStopProgressing(HubAPIClient, time.Minute*5)\n\t\t\tExpect(err).ToNot(HaveOccurred(), "error while waiting for cluster operators to stop progressing")\n\t\t\tExpect(cosStoppedProgressing).To(BeTrue(), "error: some cluster operators are still progressing")\n\n\t\t\tBy("Check that all cluster operators are available")\n\t\t\tcosAvailable, err := clusteroperator.\n\t\t\t\tWaitForAllClusteroperatorsAvailable(HubAPIClient, time.Minute*5)\n\t\t\tExpect(err).NotTo(HaveOccurred(), "error while waiting for cluster operators to become available")\n\t\t\tExpect(cosAvailable).To(BeTrue(), "error: some cluster operators are not available")\n\n\t\t\tBy("Check that all pods are running in workload namespace")\n\t\t\tworkloadPods, err := pod.List(HubAPIClient, upgradeparams.TestNamespaceName)\n\t\t\tExpect(err).NotTo(HaveOccurred(), "error listing pods in workload namespace %s", upgradeparams.TestNamespaceName)\n\t\t\tExpect(len(workloadPods) > 0).To(BeTrue(),\n\t\t\t\t"error: found no running pods in workload namespace %s", upgradeparams.TestNamespaceName)\n\n\t\t\tfor _, workloadPod := range workloadPods {\n\t\t\t\terr := workloadPod.WaitUntilReady(time.Minute * 2)\n\t\t\t\tExpect(err).To(BeNil(), "error waiting for workload pod to become ready")\n\t\t\t}\n\n\t\t\tverifyWorkloadReachable(workloadRoute)\n\t\t})\n\n\t\tAfterAll(func() {\n\t\t\tBy("Delete workload test namespace")\n\t\t\tglog.V(90).Infof("Deleting test deployments")\n\t\t\tdeleteWorkloadNamespace()\n\t\t})\n\t})\n})'} | Package Name: upgrade | |||
function | eco-gotests | ce6063a8-b75e-4611-9236-0eb5e1d4ad51 | deleteWorkloadNamespace | Imports Used: ['"github.com/openshift-kni/eco-goinfra/pkg/namespace"', '"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/deleteres"'] | File Location: github.com/eco-gotests/tests/accel/upgrade/tests/upgrade.go | func deleteWorkloadNamespace() {
By("Delete workload")
err := deleteres.Namespace(HubAPIClient)
Expect(err).NotTo(HaveOccurred(), "error deleting workload namespace %v", err)
} | Package Name: upgrade | ||||
function | eco-gotests | 3391764d-710f-4ca0-a7a4-45c75799a708 | verifyWorkloadReachable | Imports Used: ['"fmt"', '"time"', '"github.com/golang/glog"', '"github.com/openshift-kni/eco-goinfra/pkg/route"', '"github.com/openshift-kni/eco-gotests/tests/internal/url"'] | File Location: github.com/eco-gotests/tests/accel/upgrade/tests/upgrade.go | func verifyWorkloadReachable(workloadRoute *route.Builder) {
By("Verify workload is reachable")
Eventually(func() bool {
_, rc, err := url.Fetch(fmt.Sprintf("http://%s", workloadRoute.Object.Spec.Host), "get", true)
glog.V(90).Infof("trying to reach the workload with error %v", err)
return rc == 200
}, time.Second*10, time.Second*2).Should(BeTrue(), "error reaching the workload")
} | Package Name: upgrade | ||||
test | eco-gotests | 42d267a4-0c2d-4a21-bbe4-dff1934480dd | OCP_UPGRADE | Imports Used: ['"time"', '"github.com/golang/glog"', '"github.com/openshift-kni/eco-goinfra/pkg/clusteroperator"', '"github.com/openshift-kni/eco-goinfra/pkg/clusterversion"', '"github.com/openshift-kni/eco-goinfra/pkg/namespace"', '"github.com/openshift-kni/eco-goinfra/pkg/pod"', '"github.com/openshift-kni/eco-goinfra/pkg/reportxml"', '"github.com/openshift-kni/eco-goinfra/pkg/route"', '"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/upgradeparams"'] | File Location: github.com/eco-gotests/tests/accel/upgrade/tests/upgrade.go | Describe("OCP_UPGRADE", Ordered, Label("minor"), func() {
Context("OCP", func() {
It("should upgrade successfully", reportxml.ID("72245"), func() {
By("Get the clusterversion struct")
version, err := clusterversion.Pull(HubAPIClient)
Expect(err).ToNot(HaveOccurred(), "error retrieving clusterversion")
glog.V(90).Infof("got the clusterversion struct %+v", version)
By("Deploy a workload in the cluster, expose a service and create a route")
workloadRoute := startTestWorkloadAndGetRoute()
By("Patch the clusterversion with the desired upgrade channel")
glog.V(90).Infof("this is the desired upgrade channel: %+v", desiredUpgradeChannel)
if desiredUpgradeChannel == "stable-4." {
desiredUpgradeChannel = version.Object.Spec.Channel
glog.V(90).Infof("clusterversion channel %s", desiredUpgradeChannel)
}
version, err = version.WithDesiredUpdateChannel(desiredUpgradeChannel).Update()
Expect(err).ToNot(HaveOccurred(), "error patching the desired upgrade channel")
glog.V(90).Infof("patched the clusterversion channel %s", desiredUpgradeChannel)
By("Get the desired update image")
desiredImage := AccelConfig.UpgradeTargetVersion
if desiredImage == "" {
desiredImage, err = version.GetNextUpdateVersionImage(upgradeparams.Z, false)
Expect(err).ToNot(HaveOccurred(), "error getting the next update image")
}
glog.V(90).Infof("got the desired update image in %s stream %s", upgradeparams.Z, desiredImage)
By("Patch the clusterversion with the desired upgrade image")
version, err = version.WithDesiredUpdateImage(desiredImage, true).Update()
Expect(err).ToNot(HaveOccurred(), "error patching the desired image")
Expect(version.Object.Spec.DesiredUpdate.Image).To(Equal(desiredImage))
glog.V(90).Infof("patched the clusterversion with desired image %s", desiredImage)
By("Wait until upgrade starts")
err = version.WaitUntilUpdateIsStarted(waitToUpgradeStart)
Expect(err).ToNot(HaveOccurred(), "the upgrade didn't start after %s", waitToUpgradeStart)
glog.V(90).Infof("upgrade has started")
By("Wait until upgrade completes")
err = version.WaitUntilUpdateIsCompleted(waitToUpgradeCompleted)
Expect(err).ToNot(HaveOccurred(), "the upgrade didn't complete after %s", waitToUpgradeCompleted)
glog.V(90).Infof("upgrade has completed")
By("Check that the clusterversion is updated to the desired version")
Expect(version.Object.Status.Desired.Image).To(Equal(desiredImage))
glog.V(90).Infof("upgrade to image %s has completed successfully", desiredImage)
By("Check that all the operators version is the desired version")
clusteroperatorList, err := clusteroperator.List(HubAPIClient)
Expect(err).ToNot(HaveOccurred(), "failed to get the clusteroperators list %v", err)
hasVersion, err := clusteroperator.VerifyClusterOperatorsVersion(version.Object.Status.Desired.Version,
clusteroperatorList)
Expect(err).NotTo(HaveOccurred(), "error while checking operators version")
Expect(hasVersion).To(BeTrue())
By("Check that no cluster operator is progressing")
cosStoppedProgressing, err := clusteroperator.
WaitForAllClusteroperatorsStopProgressing(HubAPIClient, time.Minute*5)
Expect(err).ToNot(HaveOccurred(), "error while waiting for cluster operators to stop progressing")
Expect(cosStoppedProgressing).To(BeTrue(), "error: some cluster operators are still progressing")
By("Check that all cluster operators are available")
cosAvailable, err := clusteroperator.
WaitForAllClusteroperatorsAvailable(HubAPIClient, time.Minute*5)
Expect(err).NotTo(HaveOccurred(), "error while waiting for cluster operators to become available")
Expect(cosAvailable).To(BeTrue(), "error: some cluster operators are not available")
By("Check that all pods are running in workload namespace")
workloadPods, err := pod.List(HubAPIClient, upgradeparams.TestNamespaceName)
Expect(err).NotTo(HaveOccurred(), "error listing pods in workload namespace %s", upgradeparams.TestNamespaceName)
Expect(len(workloadPods) > 0).To(BeTrue(),
"error: found no running pods in workload namespace %s", upgradeparams.TestNamespaceName)
for _, workloadPod := range workloadPods {
err := workloadPod.WaitUntilReady(time.Minute * 2)
Expect(err).To(BeNil(), "error waiting for workload pod to become ready")
}
verifyWorkloadReachable(workloadRoute)
})
AfterAll(func() {
By("Delete workload test namespace")
glog.V(90).Infof("Deleting test deployments")
deleteWorkloadNamespace()
})
})
}) | Global Variables: {'_': 'Describe("OCP_UPGRADE", Ordered, Label("minor"), func() {\n\tContext("OCP", func() {\n\t\tIt("should upgrade successfully", reportxml.ID("72245"), func() {\n\t\t\tBy("Get the clusterversion struct")\n\t\t\tversion, err := clusterversion.Pull(HubAPIClient)\n\t\t\tExpect(err).ToNot(HaveOccurred(), "error retrieving clusterversion")\n\t\t\tglog.V(90).Infof("got the clusterversion struct %+v", version)\n\n\t\t\tBy("Deploy a workload in the cluster, expose a service and create a route")\n\t\t\tworkloadRoute := startTestWorkloadAndGetRoute()\n\n\t\t\tBy("Patch the clusterversion with the desired upgrade channel")\n\t\t\tglog.V(90).Infof("this is the desired upgrade channel: %+v", desiredUpgradeChannel)\n\t\t\tif desiredUpgradeChannel == "stable-4." {\n\t\t\t\tdesiredUpgradeChannel = version.Object.Spec.Channel\n\t\t\t\tglog.V(90).Infof("clusterversion channel %s", desiredUpgradeChannel)\n\t\t\t}\n\t\t\tversion, err = version.WithDesiredUpdateChannel(desiredUpgradeChannel).Update()\n\t\t\tExpect(err).ToNot(HaveOccurred(), "error patching the desired upgrade channel")\n\t\t\tglog.V(90).Infof("patched the clusterversion channel %s", desiredUpgradeChannel)\n\n\t\t\tBy("Get the desired update image")\n\t\t\tdesiredImage := AccelConfig.UpgradeTargetVersion\n\t\t\tif desiredImage == "" {\n\t\t\t\tdesiredImage, err = version.GetNextUpdateVersionImage(upgradeparams.Z, false)\n\t\t\t\tExpect(err).ToNot(HaveOccurred(), "error getting the next update image")\n\t\t\t}\n\t\t\tglog.V(90).Infof("got the desired update image in %s stream %s", upgradeparams.Z, desiredImage)\n\n\t\t\tBy("Patch the clusterversion with the desired upgrade image")\n\t\t\tversion, err = version.WithDesiredUpdateImage(desiredImage, true).Update()\n\t\t\tExpect(err).ToNot(HaveOccurred(), "error patching the desired image")\n\t\t\tExpect(version.Object.Spec.DesiredUpdate.Image).To(Equal(desiredImage))\n\t\t\tglog.V(90).Infof("patched the clusterversion with desired image %s", desiredImage)\n\n\t\t\tBy("Wait until upgrade starts")\n\t\t\terr = version.WaitUntilUpdateIsStarted(waitToUpgradeStart)\n\t\t\tExpect(err).ToNot(HaveOccurred(), "the upgrade didn\'t start after %s", waitToUpgradeStart)\n\t\t\tglog.V(90).Infof("upgrade has started")\n\n\t\t\tBy("Wait until upgrade completes")\n\t\t\terr = version.WaitUntilUpdateIsCompleted(waitToUpgradeCompleted)\n\t\t\tExpect(err).ToNot(HaveOccurred(), "the upgrade didn\'t complete after %s", waitToUpgradeCompleted)\n\t\t\tglog.V(90).Infof("upgrade has completed")\n\n\t\t\tBy("Check that the clusterversion is updated to the desired version")\n\t\t\tExpect(version.Object.Status.Desired.Image).To(Equal(desiredImage))\n\t\t\tglog.V(90).Infof("upgrade to image %s has completed successfully", desiredImage)\n\n\t\t\tBy("Check that all the operators version is the desired version")\n\t\t\tclusteroperatorList, err := clusteroperator.List(HubAPIClient)\n\t\t\tExpect(err).ToNot(HaveOccurred(), "failed to get the clusteroperators list %v", err)\n\t\t\thasVersion, err := clusteroperator.VerifyClusterOperatorsVersion(version.Object.Status.Desired.Version,\n\t\t\t\tclusteroperatorList)\n\t\t\tExpect(err).NotTo(HaveOccurred(), "error while checking operators version")\n\t\t\tExpect(hasVersion).To(BeTrue())\n\n\t\t\tBy("Check that no cluster operator is progressing")\n\t\t\tcosStoppedProgressing, err := clusteroperator.\n\t\t\t\tWaitForAllClusteroperatorsStopProgressing(HubAPIClient, time.Minute*5)\n\t\t\tExpect(err).ToNot(HaveOccurred(), "error while waiting for cluster operators to stop progressing")\n\t\t\tExpect(cosStoppedProgressing).To(BeTrue(), "error: some cluster operators are still progressing")\n\n\t\t\tBy("Check that all cluster operators are available")\n\t\t\tcosAvailable, err := clusteroperator.\n\t\t\t\tWaitForAllClusteroperatorsAvailable(HubAPIClient, time.Minute*5)\n\t\t\tExpect(err).NotTo(HaveOccurred(), "error while waiting for cluster operators to become available")\n\t\t\tExpect(cosAvailable).To(BeTrue(), "error: some cluster operators are not available")\n\n\t\t\tBy("Check that all pods are running in workload namespace")\n\t\t\tworkloadPods, err := pod.List(HubAPIClient, upgradeparams.TestNamespaceName)\n\t\t\tExpect(err).NotTo(HaveOccurred(), "error listing pods in workload namespace %s", upgradeparams.TestNamespaceName)\n\t\t\tExpect(len(workloadPods) > 0).To(BeTrue(),\n\t\t\t\t"error: found no running pods in workload namespace %s", upgradeparams.TestNamespaceName)\n\n\t\t\tfor _, workloadPod := range workloadPods {\n\t\t\t\terr := workloadPod.WaitUntilReady(time.Minute * 2)\n\t\t\t\tExpect(err).To(BeNil(), "error waiting for workload pod to become ready")\n\t\t\t}\n\n\t\t\tverifyWorkloadReachable(workloadRoute)\n\t\t})\n\n\t\tAfterAll(func() {\n\t\t\tBy("Delete workload test namespace")\n\t\t\tglog.V(90).Infof("Deleting test deployments")\n\t\t\tdeleteWorkloadNamespace()\n\t\t})\n\t})\n})'} | Package Name: upgrade | |||
test case | eco-gotests | b36fb8db-cdc1-4aac-9700-f3de8931f83f | should upgrade successfully | Imports Used: ['"time"', '"github.com/golang/glog"', '"github.com/openshift-kni/eco-goinfra/pkg/clusteroperator"', '"github.com/openshift-kni/eco-goinfra/pkg/clusterversion"', '"github.com/openshift-kni/eco-goinfra/pkg/namespace"', '"github.com/openshift-kni/eco-goinfra/pkg/pod"', '"github.com/openshift-kni/eco-goinfra/pkg/reportxml"', '"github.com/openshift-kni/eco-goinfra/pkg/route"', '"github.com/openshift-kni/eco-gotests/tests/accel/upgrade/internal/upgradeparams"'] | File Location: github.com/eco-gotests/tests/accel/upgrade/tests/upgrade.go | It("should upgrade successfully", reportxml.ID("72245"), func() {
By("Get the clusterversion struct")
version, err := clusterversion.Pull(HubAPIClient)
Expect(err).ToNot(HaveOccurred(), "error retrieving clusterversion")
glog.V(90).Infof("got the clusterversion struct %+v", version)
By("Deploy a workload in the cluster, expose a service and create a route")
workloadRoute := startTestWorkloadAndGetRoute()
By("Patch the clusterversion with the desired upgrade channel")
glog.V(90).Infof("this is the desired upgrade channel: %+v", desiredUpgradeChannel)
if desiredUpgradeChannel == "stable-4." {
desiredUpgradeChannel = version.Object.Spec.Channel
glog.V(90).Infof("clusterversion channel %s", desiredUpgradeChannel)
}
version, err = version.WithDesiredUpdateChannel(desiredUpgradeChannel).Update()
Expect(err).ToNot(HaveOccurred(), "error patching the desired upgrade channel")
glog.V(90).Infof("patched the clusterversion channel %s", desiredUpgradeChannel)
By("Get the desired update image")
desiredImage := AccelConfig.UpgradeTargetVersion
if desiredImage == "" {
desiredImage, err = version.GetNextUpdateVersionImage(upgradeparams.Z, false)
Expect(err).ToNot(HaveOccurred(), "error getting the next update image")
}
glog.V(90).Infof("got the desired update image in %s stream %s", upgradeparams.Z, desiredImage)
By("Patch the clusterversion with the desired upgrade image")
version, err = version.WithDesiredUpdateImage(desiredImage, true).Update()
Expect(err).ToNot(HaveOccurred(), "error patching the desired image")
Expect(version.Object.Spec.DesiredUpdate.Image).To(Equal(desiredImage))
glog.V(90).Infof("patched the clusterversion with desired image %s", desiredImage)
By("Wait until upgrade starts")
err = version.WaitUntilUpdateIsStarted(waitToUpgradeStart)
Expect(err).ToNot(HaveOccurred(), "the upgrade didn't start after %s", waitToUpgradeStart)
glog.V(90).Infof("upgrade has started")
By("Wait until upgrade completes")
err = version.WaitUntilUpdateIsCompleted(waitToUpgradeCompleted)
Expect(err).ToNot(HaveOccurred(), "the upgrade didn't complete after %s", waitToUpgradeCompleted)
glog.V(90).Infof("upgrade has completed")
By("Check that the clusterversion is updated to the desired version")
Expect(version.Object.Status.Desired.Image).To(Equal(desiredImage))
glog.V(90).Infof("upgrade to image %s has completed successfully", desiredImage)
By("Check that all the operators version is the desired version")
clusteroperatorList, err := clusteroperator.List(HubAPIClient)
Expect(err).ToNot(HaveOccurred(), "failed to get the clusteroperators list %v", err)
hasVersion, err := clusteroperator.VerifyClusterOperatorsVersion(version.Object.Status.Desired.Version,
clusteroperatorList)
Expect(err).NotTo(HaveOccurred(), "error while checking operators version")
Expect(hasVersion).To(BeTrue())
By("Check that no cluster operator is progressing")
cosStoppedProgressing, err := clusteroperator.
WaitForAllClusteroperatorsStopProgressing(HubAPIClient, time.Minute*5)
Expect(err).ToNot(HaveOccurred(), "error while waiting for cluster operators to stop progressing")
Expect(cosStoppedProgressing).To(BeTrue(), "error: some cluster operators are still progressing")
By("Check that all cluster operators are available")
cosAvailable, err := clusteroperator.
WaitForAllClusteroperatorsAvailable(HubAPIClient, time.Minute*5)
Expect(err).NotTo(HaveOccurred(), "error while waiting for cluster operators to become available")
Expect(cosAvailable).To(BeTrue(), "error: some cluster operators are not available")
By("Check that all pods are running in workload namespace")
workloadPods, err := pod.List(HubAPIClient, upgradeparams.TestNamespaceName)
Expect(err).NotTo(HaveOccurred(), "error listing pods in workload namespace %s", upgradeparams.TestNamespaceName)
Expect(len(workloadPods) > 0).To(BeTrue(),
"error: found no running pods in workload namespace %s", upgradeparams.TestNamespaceName)
for _, workloadPod := range workloadPods {
err := workloadPod.WaitUntilReady(time.Minute * 2)
Expect(err).To(BeNil(), "error waiting for workload pod to become ready")
}
verifyWorkloadReachable(workloadRoute)
}) | |||||
file | eco-gotests | f16b20ab-c62a-4a57-aa4e-eeaf325fab7e | config.go | Imports Used: import (
"log"
"github.com/openshift-kni/eco-gotests/tests/internal/config"
) | File Location: github.com/eco-gotests/tests/assisted/internal/assistedconfig/config.go | package assistedconfig
import (
"log"
"github.com/openshift-kni/eco-gotests/tests/internal/config"
)
// AssistedConfig type contains assisted installer configuration.
type AssistedConfig struct {
*config.GeneralConfig
}
// NewAssistedConfig returns instance of AssistedConfig type.
func NewAssistedConfig() *AssistedConfig {
log.Print("Creating new AssistedConfig struct")
var assistedConfig AssistedConfig
assistedConfig.GeneralConfig = config.NewConfig()
return &assistedConfig
}
| Package Name: package assistedconfig | ||||
function | eco-gotests | efb9ac04-f0de-4f6c-bb42-c1917d83f92f | NewAssistedConfig | Imports Used: ['"log"', '"github.com/openshift-kni/eco-gotests/tests/internal/config"'] | Structs Used: ['AssistedConfig'] | File Location: github.com/eco-gotests/tests/assisted/internal/assistedconfig/config.go | func NewAssistedConfig() *AssistedConfig {
log.Print("Creating new AssistedConfig struct")
var assistedConfig AssistedConfig
assistedConfig.GeneralConfig = config.NewConfig()
return &assistedConfig
} | Package Name: assistedconfig | |||
file | eco-gotests | 59f2f092-4109-487a-867a-610496944e57 | const.go | File Location: github.com/eco-gotests/tests/assisted/internal/assistedparams/const.go | package assistedparams
const (
// Label represents assisted label that can be used for test cases selection.
Label = "assisted"
)
| Package Name: package assistedparams | |||||
file | eco-gotests | ac67ac53-9eff-45c0-aa70-1000c9aec32d | find.go | Imports Used: import (
"fmt"
"strings"
"github.com/openshift-kni/eco-goinfra/pkg/clients"
"github.com/openshift-kni/eco-goinfra/pkg/hive"
"github.com/openshift-kni/eco-goinfra/pkg/pod"
"github.com/openshift-kni/eco-gotests/tests/internal/cluster"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/find/find.go | package find
import (
"fmt"
"strings"
"github.com/openshift-kni/eco-goinfra/pkg/clients"
"github.com/openshift-kni/eco-goinfra/pkg/hive"
"github.com/openshift-kni/eco-goinfra/pkg/pod"
"github.com/openshift-kni/eco-gotests/tests/internal/cluster"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ClusterVersion returns the Major.Minor part of a cluster's OCP version.
func ClusterVersion(clusterObj cluster.APIClientGetter) (string, error) {
clusterVersion, err := cluster.GetOCPClusterVersion(clusterObj)
if err != nil {
return "", err
}
if !clusterVersion.Exists() {
return "", fmt.Errorf("cluster version not found")
}
splitVersion := strings.Split(clusterVersion.Object.Status.Desired.Version, ".")
return fmt.Sprintf("%s.%s", splitVersion[0], splitVersion[1]), nil
}
// SpokeClusterName returns the spoke cluster name based on hub and spoke cluster apiclients.
func SpokeClusterName(hubAPIClient, spokeAPIClient *clients.Settings) (string, error) {
spokeClusterVersion, err := cluster.GetOCPClusterVersion(spokeAPIClient)
if err != nil {
return "", err
}
if !spokeClusterVersion.Exists() {
return "", fmt.Errorf("spoke cluster version not found")
}
spokeClusterID := spokeClusterVersion.Object.Spec.ClusterID
clusterDeployments, err := hive.ListClusterDeploymentsInAllNamespaces(hubAPIClient)
if err != nil {
return "", err
}
for _, clusterDeploymentBuilder := range clusterDeployments {
if clusterDeploymentBuilder.Object.Spec.ClusterMetadata != nil &&
clusterDeploymentBuilder.Object.Spec.ClusterMetadata.ClusterID == string(spokeClusterID) {
return clusterDeploymentBuilder.Object.Spec.ClusterName, nil
}
}
return "", fmt.Errorf("could not find ClusterDeployment from provided API clients")
}
// AssistedServicePod returns pod running assisted-service.
func AssistedServicePod(apiClient *clients.Settings) (*pod.Builder, error) {
return getPodBuilder(apiClient, "app=assisted-service")
}
// AssistedImageServicePod returns pod running assisted-image-service.
func AssistedImageServicePod(apiClient *clients.Settings) (*pod.Builder, error) {
return getPodBuilder(apiClient, "app=assisted-image-service")
}
// InfrastructureOperatorPod returns pod running infrastructure-operator.
func InfrastructureOperatorPod(apiClient *clients.Settings) (*pod.Builder, error) {
return getPodBuilder(apiClient, "control-plane=infrastructure-operator")
}
// getPodBuilder returns a podBuilder of a pod based on provided label.
func getPodBuilder(apiClient *clients.Settings, label string) (*pod.Builder, error) {
if apiClient == nil {
return nil, fmt.Errorf("apiClient is nil")
}
podList, err := pod.ListInAllNamespaces(apiClient, metav1.ListOptions{LabelSelector: label})
if err != nil {
return nil, fmt.Errorf("failed to list pods on cluster: %w", err)
}
if len(podList) == 0 {
return nil, fmt.Errorf("pod with label '%s' not currently running", label)
}
if len(podList) > 1 {
return nil, fmt.Errorf("got unexpected pods when checking for pods with label '%s'", label)
}
return podList[0], nil
}
| Package Name: package find | ||||
function | eco-gotests | 4648fc34-6ae0-4723-8c5c-d3bd860a6278 | ClusterVersion | Imports Used: ['"fmt"', '"strings"', '"github.com/openshift-kni/eco-gotests/tests/internal/cluster"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/find/find.go | func ClusterVersion(clusterObj cluster.APIClientGetter) (string, error) {
clusterVersion, err := cluster.GetOCPClusterVersion(clusterObj)
if err != nil {
return "", err
}
if !clusterVersion.Exists() {
return "", fmt.Errorf("cluster version not found")
}
splitVersion := strings.Split(clusterVersion.Object.Status.Desired.Version, ".")
return fmt.Sprintf("%s.%s", splitVersion[0], splitVersion[1]), nil
} | Package Name: find | ||||
function | eco-gotests | e5bae1eb-6f44-474b-9ae4-27377fe504af | SpokeClusterName | Imports Used: ['"fmt"', '"github.com/openshift-kni/eco-goinfra/pkg/clients"', '"github.com/openshift-kni/eco-goinfra/pkg/hive"', '"github.com/openshift-kni/eco-gotests/tests/internal/cluster"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/find/find.go | func SpokeClusterName(hubAPIClient, spokeAPIClient *clients.Settings) (string, error) {
spokeClusterVersion, err := cluster.GetOCPClusterVersion(spokeAPIClient)
if err != nil {
return "", err
}
if !spokeClusterVersion.Exists() {
return "", fmt.Errorf("spoke cluster version not found")
}
spokeClusterID := spokeClusterVersion.Object.Spec.ClusterID
clusterDeployments, err := hive.ListClusterDeploymentsInAllNamespaces(hubAPIClient)
if err != nil {
return "", err
}
for _, clusterDeploymentBuilder := range clusterDeployments {
if clusterDeploymentBuilder.Object.Spec.ClusterMetadata != nil &&
clusterDeploymentBuilder.Object.Spec.ClusterMetadata.ClusterID == string(spokeClusterID) {
return clusterDeploymentBuilder.Object.Spec.ClusterName, nil
}
}
return "", fmt.Errorf("could not find ClusterDeployment from provided API clients")
} | Package Name: find | ||||
function | eco-gotests | 9265a751-d7a9-45ca-b8e6-3496b4634c57 | AssistedServicePod | Imports Used: ['"github.com/openshift-kni/eco-goinfra/pkg/clients"', '"github.com/openshift-kni/eco-goinfra/pkg/pod"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/find/find.go | func AssistedServicePod(apiClient *clients.Settings) (*pod.Builder, error) {
return getPodBuilder(apiClient, "app=assisted-service")
} | Package Name: find | ||||
function | eco-gotests | bc54b1f0-21cc-458a-97e6-01a6e10c3f02 | AssistedImageServicePod | Imports Used: ['"github.com/openshift-kni/eco-goinfra/pkg/clients"', '"github.com/openshift-kni/eco-goinfra/pkg/pod"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/find/find.go | func AssistedImageServicePod(apiClient *clients.Settings) (*pod.Builder, error) {
return getPodBuilder(apiClient, "app=assisted-image-service")
} | Package Name: find | ||||
function | eco-gotests | ecc68b04-e53b-4d9c-b2b3-dcde58ef68f2 | InfrastructureOperatorPod | Imports Used: ['"github.com/openshift-kni/eco-goinfra/pkg/clients"', '"github.com/openshift-kni/eco-goinfra/pkg/pod"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/find/find.go | func InfrastructureOperatorPod(apiClient *clients.Settings) (*pod.Builder, error) {
return getPodBuilder(apiClient, "control-plane=infrastructure-operator")
} | Package Name: find | ||||
function | eco-gotests | 097f6bf7-893b-4ff8-beed-c6a18d784747 | getPodBuilder | Imports Used: ['"fmt"', '"github.com/openshift-kni/eco-goinfra/pkg/clients"', '"github.com/openshift-kni/eco-goinfra/pkg/pod"', '"github.com/openshift-kni/eco-gotests/tests/internal/cluster"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/find/find.go | func getPodBuilder(apiClient *clients.Settings, label string) (*pod.Builder, error) {
if apiClient == nil {
return nil, fmt.Errorf("apiClient is nil")
}
podList, err := pod.ListInAllNamespaces(apiClient, metav1.ListOptions{LabelSelector: label})
if err != nil {
return nil, fmt.Errorf("failed to list pods on cluster: %w", err)
}
if len(podList) == 0 {
return nil, fmt.Errorf("pod with label '%s' not currently running", label)
}
if len(podList) > 1 {
return nil, fmt.Errorf("got unexpected pods when checking for pods with label '%s'", label)
}
return podList[0], nil
} | Package Name: find | ||||
file | eco-gotests | 0d770397-0185-47f4-bfba-4ff092d4bfb0 | installconfig.go | Imports Used: import (
installerTypes "github.com/openshift/installer/pkg/types"
"gopkg.in/yaml.v2"
) | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/installconfig/installconfig.go | package installconfig
import (
installerTypes "github.com/openshift/installer/pkg/types"
"gopkg.in/yaml.v2"
)
// NewInstallConfigFromString returns an unmarshalled install-config from provided string.
func NewInstallConfigFromString(config string) (installerTypes.InstallConfig, error) {
var installConfigData installerTypes.InstallConfig
err := yaml.Unmarshal([]byte(config), &installConfigData)
if err != nil {
return installerTypes.InstallConfig{}, err
}
return installConfigData, nil
}
| Package Name: package installconfig | ||||
function | eco-gotests | 084735d1-8062-4723-8df6-9902a7b99c46 | NewInstallConfigFromString | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/installconfig/installconfig.go | func NewInstallConfigFromString(config string) (installerTypes.InstallConfig, error) {
var installConfigData installerTypes.InstallConfig
err := yaml.Unmarshal([]byte(config), &installConfigData)
if err != nil {
return installerTypes.InstallConfig{}, err
}
return installConfigData, nil
} | Package Name: installconfig | |||||
file | eco-gotests | 128db7f7-b7e1-4405-b866-9b7c7a90c87b | meets.go | Imports Used: import (
"fmt"
"net"
"time"
"github.com/hashicorp/go-version"
"github.com/openshift-kni/eco-goinfra/pkg/hive"
"github.com/openshift-kni/eco-goinfra/pkg/pod"
. "github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpinittools"
"github.com/openshift-kni/eco-gotests/tests/internal/cluster"
configv1 "github.com/openshift/api/config/v1"
corev1 "k8s.io/api/core/v1"
) | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | package meets
import (
"fmt"
"net"
"time"
"github.com/hashicorp/go-version"
"github.com/openshift-kni/eco-goinfra/pkg/hive"
"github.com/openshift-kni/eco-goinfra/pkg/pod"
. "github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpinittools"
"github.com/openshift-kni/eco-gotests/tests/internal/cluster"
configv1 "github.com/openshift/api/config/v1"
corev1 "k8s.io/api/core/v1"
)
// AllRequirements accepts multiple requirement functions to ensure the environment meets all requirements.
func AllRequirements(f ...func() (bool, string)) (bool, string) {
for _, req := range f {
met, msg := req()
if !met {
return met, msg
}
}
return true, ""
}
// HubInfrastructureOperandRunningRequirement ensures that both
// the assisted-service and assisted-image-service pods are running on the hub cluster.
func HubInfrastructureOperandRunningRequirement() (bool, string) {
servicePodBuilder := ZTPConfig.HubAssistedServicePod()
running, msg := checkPodRunning(servicePodBuilder)
if !running {
return running, msg
}
imageBuilder := ZTPConfig.HubAssistedImageServicePod()
return checkPodRunning(imageBuilder)
}
// SpokeAPIClientReadyRequirement checks that the spoke APIClient has been properly initialized.
func SpokeAPIClientReadyRequirement() (bool, string) {
if SpokeAPIClient == nil {
return false, "spoke APIClient has not been initialized"
}
return true, ""
}
// SpokeClusterImageSetVersionRequirement checks that the provided clusterimageset meets the version provided.
func SpokeClusterImageSetVersionRequirement(requiredVersion string) (bool, string) {
if ZTPConfig.SpokeClusterImageSet == "" {
return false, "Spoke clusterimageset version was not provided through environment"
}
_, err := hive.PullClusterImageSet(HubAPIClient, ZTPConfig.SpokeClusterImageSet)
if err != nil {
return false, fmt.Sprintf("ClusterImageSet could not be found: %v", err)
}
imgSetVersion, _ := version.NewVersion(ZTPConfig.SpokeClusterImageSet)
currentVersion, _ := version.NewVersion(requiredVersion)
if imgSetVersion.LessThan(currentVersion) {
return false, fmt.Sprintf("Discovered clusterimageset version does not meet requirement: %v",
imgSetVersion.String())
}
return true, ""
}
// HubOCPVersionRequirement checks that hub ocp version meets the version provided.
func HubOCPVersionRequirement(requiredVersion string) (bool, string) {
return ocpVersionRequirement(HubAPIClient, requiredVersion)
}
// SpokeOCPVersionRequirement checks that spoke ocp version meets the version provided.
func SpokeOCPVersionRequirement(requiredVersion string) (bool, string) {
return ocpVersionRequirement(SpokeAPIClient, requiredVersion)
}
// HubProxyConfiguredRequirement checks that the cluster proxy is configured on the hub.
func HubProxyConfiguredRequirement() (bool, string) {
return proxyConfiguredRequirement(HubAPIClient)
}
// SpokeProxyConfiguredRequirement checks that the cluster proxy is configured on the spoke.
func SpokeProxyConfiguredRequirement() (bool, string) {
return proxyConfiguredRequirement(SpokeAPIClient)
}
// HubDisconnectedRequirement checks that the hub is disconnected.
func HubDisconnectedRequirement() (bool, string) {
return disconnectedRequirement(HubAPIClient)
}
// SpokeDisconnectedRequirement checks that the spoke is disconnected.
func SpokeDisconnectedRequirement() (bool, string) {
return disconnectedRequirement(SpokeAPIClient)
}
// HubConnectedRequirement checks that the hub is connected.
func HubConnectedRequirement() (bool, string) {
return connectedRequirement(HubAPIClient)
}
// SpokeConnectedRequirement checks that the spoke is connected.
func SpokeConnectedRequirement() (bool, string) {
return connectedRequirement(SpokeAPIClient)
}
// HubSingleStackIPv4Requirement checks that the hub has IPv4 single-stack networking.
func HubSingleStackIPv4Requirement() (bool, string) {
return singleStackIPv4Requirement(HubAPIClient)
}
// SpokeSingleStackIPv4Requirement checks that the spoke has IPv4 single-stack networking.
func SpokeSingleStackIPv4Requirement() (bool, string) {
return singleStackIPv4Requirement(SpokeAPIClient)
}
// HubSingleStackIPv6Requirement checks that the hub has IPv6 single-stack networking.
func HubSingleStackIPv6Requirement() (bool, string) {
return singleStackIPv6Requirement(HubAPIClient)
}
// SpokeSingleStackIPv6Requirement checks that the spoke has IPv6 single-stack networking.
func SpokeSingleStackIPv6Requirement() (bool, string) {
return singleStackIPv6Requirement(SpokeAPIClient)
}
// HubDualStackRequirement checks that the hub has dual-stack networking.
func HubDualStackRequirement() (bool, string) {
return dualStackRequirement(HubAPIClient)
}
// SpokeDualStackRequirement checks that the spoke has dual-stack networking.
func SpokeDualStackRequirement() (bool, string) {
return dualStackRequirement(SpokeAPIClient)
}
// checkPodRunning waits for the specified pod to be running.
func checkPodRunning(podBuilder *pod.Builder) (bool, string) {
err := podBuilder.WaitUntilInStatus(corev1.PodRunning, time.Second*10)
if err != nil {
return false, fmt.Sprintf("%s pod found but was not running", podBuilder.Definition.Name)
}
return true, ""
}
// ocpVersionRequirement checks that the OCP version of the provided client meets requiredVersion.
func ocpVersionRequirement(clusterobj cluster.APIClientGetter, requiredVersion string) (bool, string) {
clusterVersion, err := cluster.GetOCPClusterVersion(clusterobj)
if err != nil {
return false, fmt.Sprintf("Failed to get clusterversion from %s cluster: %v", getClusterType(clusterobj), err)
}
ocpVersion, _ := version.NewVersion(clusterVersion.Definition.Status.Desired.Version)
currentVersion, _ := version.NewVersion(requiredVersion)
if ocpVersion.LessThan(currentVersion) {
return false, fmt.Sprintf("Discovered openshift version does not meet requirement: %v",
ocpVersion.String())
}
return true, ""
}
// proxyConfiguredRequirement checks that the OCP proxy of the provided client is configured.
func proxyConfiguredRequirement(clusterobj cluster.APIClientGetter) (bool, string) {
ocpProxy, err := cluster.GetOCPProxy(clusterobj)
if err != nil {
return false, fmt.Sprintf("Failed to get cluster proxy from %s cluster: %v", getClusterType(clusterobj), err)
}
if ocpProxy.Object.Status.HTTPProxy == "" &&
ocpProxy.Object.Status.HTTPSProxy == "" &&
ocpProxy.Object.Status.NoProxy == "" {
return false, fmt.Sprintf("Discovered proxy not configured: %v", ocpProxy.Object.Status)
}
return true, ""
}
// disconnectedRequirement checks that the OCP cluster of the provided client is disconnected.
func disconnectedRequirement(clusterobj cluster.APIClientGetter) (bool, string) {
clusterVersion, err := cluster.GetOCPClusterVersion(clusterobj)
if err != nil {
return false, fmt.Sprintf("Failed to get clusterversion from %s cluster: %v", getClusterType(clusterobj), err)
}
for _, condition := range clusterVersion.Object.Status.Conditions {
if condition.Type == configv1.RetrievedUpdates {
if condition.Reason == "RemoteFailed" {
return true, ""
}
return false, "Provided cluster is connected"
}
}
return false, fmt.Sprintf("Failed to determine if cluster is disconnected, "+
"could not find '%s' condition", configv1.RetrievedUpdates)
}
// connectedRequirement checks that the OCP cluster of the provided client is connected.
func connectedRequirement(clusterobj cluster.APIClientGetter) (bool, string) {
clusterVersion, err := cluster.GetOCPClusterVersion(clusterobj)
if err != nil {
return false, fmt.Sprintf("Failed to get clusterversion from %s cluster: %v", getClusterType(clusterobj), err)
}
for _, condition := range clusterVersion.Object.Status.Conditions {
if condition.Type == configv1.RetrievedUpdates {
if condition.Reason == "RemoteFailed" {
return false, "Provided cluster is disconnected"
}
return true, ""
}
}
return false, fmt.Sprintf("Failed to determine if cluster is connected, "+
"could not find '%s' condition", configv1.RetrievedUpdates)
}
// singleStackIPv4Requirement checks that the OCP network of the provided client is single-stack ipv4.
func singleStackIPv4Requirement(clusterobj cluster.APIClientGetter) (bool, string) {
ocpNetwork, err := cluster.GetOCPNetworkConfig(clusterobj)
if err != nil {
return false, fmt.Sprintf("Failed to get cluster network from %s cluster: %v", getClusterType(clusterobj), err)
}
for _, clusterNet := range ocpNetwork.Object.Status.ClusterNetwork {
ip, _, _ := net.ParseCIDR(clusterNet.CIDR)
v4Check := ip.To4()
if v4Check == nil {
return false, "ClusterNetwork was not IPv4"
}
}
return true, ""
}
// singleStackIPv6Requirement checks that the OCP network of the provided client is single-stack ipv6.
func singleStackIPv6Requirement(clusterobj cluster.APIClientGetter) (bool, string) {
ocpNetwork, err := cluster.GetOCPNetworkConfig(clusterobj)
if err != nil {
return false, fmt.Sprintf("Failed to get cluster network from %s cluster: %v", getClusterType(clusterobj), err)
}
for _, clusterNet := range ocpNetwork.Object.Status.ClusterNetwork {
ip, _, _ := net.ParseCIDR(clusterNet.CIDR)
v4Check := ip.To4()
if v4Check != nil {
return false, "ClusterNetwork was not IPv6"
}
}
return true, ""
}
// dualStackRequirement checks that the OCP network of the provided client is dual-stack.
func dualStackRequirement(clusterobj cluster.APIClientGetter) (bool, string) {
ipv4 := false
ipv6 := false
hubNetwork, err := cluster.GetOCPNetworkConfig(clusterobj)
if err != nil {
return false, fmt.Sprintf("Failed to get cluster network from %s cluster: %v", getClusterType(clusterobj), err)
}
for _, clusterNet := range hubNetwork.Object.Status.ClusterNetwork {
ip, _, _ := net.ParseCIDR(clusterNet.CIDR)
v4Check := ip.To4()
if v4Check != nil {
ipv4 = true
} else {
ipv6 = true
}
}
if !ipv4 || !ipv6 {
return false, "Only found cluster networks in one address family"
}
return true, ""
}
// getClusterType returns cluster type based on provided apiClient.
func getClusterType(clusterobj cluster.APIClientGetter) string {
if clusterobj == HubAPIClient {
return "hub"
}
return "spoke"
}
| Package Name: package meets | ||||
function | eco-gotests | 5a63695d-fb5f-4f21-a082-9dc40a1a10f4 | AllRequirements | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func AllRequirements(f ...func() (bool, string)) (bool, string) {
for _, req := range f {
met, msg := req()
if !met {
return met, msg
}
}
return true, ""
} | Package Name: meets | |||||
function | eco-gotests | d860c517-a1fa-4010-9495-8b9c164d49f9 | HubInfrastructureOperandRunningRequirement | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func HubInfrastructureOperandRunningRequirement() (bool, string) {
servicePodBuilder := ZTPConfig.HubAssistedServicePod()
running, msg := checkPodRunning(servicePodBuilder)
if !running {
return running, msg
}
imageBuilder := ZTPConfig.HubAssistedImageServicePod()
return checkPodRunning(imageBuilder)
} | Package Name: meets | |||||
function | eco-gotests | 2963775e-ba68-426b-8fbf-c0979e298b76 | SpokeAPIClientReadyRequirement | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func SpokeAPIClientReadyRequirement() (bool, string) {
if SpokeAPIClient == nil {
return false, "spoke APIClient has not been initialized"
}
return true, ""
} | Package Name: meets | |||||
function | eco-gotests | 78b780f2-a63f-4820-b090-586fa9ef7547 | SpokeClusterImageSetVersionRequirement | Imports Used: ['"fmt"', '"github.com/openshift-kni/eco-goinfra/pkg/hive"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func SpokeClusterImageSetVersionRequirement(requiredVersion string) (bool, string) {
if ZTPConfig.SpokeClusterImageSet == "" {
return false, "Spoke clusterimageset version was not provided through environment"
}
_, err := hive.PullClusterImageSet(HubAPIClient, ZTPConfig.SpokeClusterImageSet)
if err != nil {
return false, fmt.Sprintf("ClusterImageSet could not be found: %v", err)
}
imgSetVersion, _ := version.NewVersion(ZTPConfig.SpokeClusterImageSet)
currentVersion, _ := version.NewVersion(requiredVersion)
if imgSetVersion.LessThan(currentVersion) {
return false, fmt.Sprintf("Discovered clusterimageset version does not meet requirement: %v",
imgSetVersion.String())
}
return true, ""
} | Package Name: meets | ||||
function | eco-gotests | d256d77d-9c6f-4323-abfe-06ec6ac872b3 | HubOCPVersionRequirement | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func HubOCPVersionRequirement(requiredVersion string) (bool, string) {
return ocpVersionRequirement(HubAPIClient, requiredVersion)
} | Package Name: meets | |||||
function | eco-gotests | c14ac354-22cf-4e4b-9f37-b04e8b48e16a | SpokeOCPVersionRequirement | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func SpokeOCPVersionRequirement(requiredVersion string) (bool, string) {
return ocpVersionRequirement(SpokeAPIClient, requiredVersion)
} | Package Name: meets | |||||
function | eco-gotests | e8eb3c94-7057-4f89-a5ea-94f05e69e277 | HubProxyConfiguredRequirement | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func HubProxyConfiguredRequirement() (bool, string) {
return proxyConfiguredRequirement(HubAPIClient)
} | Package Name: meets | |||||
function | eco-gotests | ac026fef-1689-4fcd-95c2-cad5f90f5115 | SpokeProxyConfiguredRequirement | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func SpokeProxyConfiguredRequirement() (bool, string) {
return proxyConfiguredRequirement(SpokeAPIClient)
} | Package Name: meets | |||||
function | eco-gotests | aa1ac7cd-ff7e-4a5e-b6a1-342544523c54 | HubDisconnectedRequirement | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func HubDisconnectedRequirement() (bool, string) {
return disconnectedRequirement(HubAPIClient)
} | Package Name: meets | |||||
function | eco-gotests | be06ef63-cbab-4f81-95f0-e6a53e56cf57 | SpokeDisconnectedRequirement | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func SpokeDisconnectedRequirement() (bool, string) {
return disconnectedRequirement(SpokeAPIClient)
} | Package Name: meets | |||||
function | eco-gotests | 833952d9-9184-455a-a89d-3f3b5795281b | HubConnectedRequirement | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func HubConnectedRequirement() (bool, string) {
return connectedRequirement(HubAPIClient)
} | Package Name: meets | |||||
function | eco-gotests | d954b347-339e-4e53-9db3-ba3d63c21c23 | SpokeConnectedRequirement | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func SpokeConnectedRequirement() (bool, string) {
return connectedRequirement(SpokeAPIClient)
} | Package Name: meets | |||||
function | eco-gotests | c10ff803-307f-4754-bac2-1088743d730e | HubSingleStackIPv4Requirement | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func HubSingleStackIPv4Requirement() (bool, string) {
return singleStackIPv4Requirement(HubAPIClient)
} | Package Name: meets | |||||
function | eco-gotests | 0f785bf6-d24c-4d5f-957c-401d09c95376 | SpokeSingleStackIPv4Requirement | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func SpokeSingleStackIPv4Requirement() (bool, string) {
return singleStackIPv4Requirement(SpokeAPIClient)
} | Package Name: meets | |||||
function | eco-gotests | 518da529-e476-48c4-b057-2e5293a8d388 | HubSingleStackIPv6Requirement | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func HubSingleStackIPv6Requirement() (bool, string) {
return singleStackIPv6Requirement(HubAPIClient)
} | Package Name: meets | |||||
function | eco-gotests | c4f293a9-8e84-453f-9ddb-d07180da7599 | SpokeSingleStackIPv6Requirement | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func SpokeSingleStackIPv6Requirement() (bool, string) {
return singleStackIPv6Requirement(SpokeAPIClient)
} | Package Name: meets | |||||
function | eco-gotests | 47d2cf44-5314-4a41-a5ee-41889ba968b6 | HubDualStackRequirement | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func HubDualStackRequirement() (bool, string) {
return dualStackRequirement(HubAPIClient)
} | Package Name: meets | |||||
function | eco-gotests | 268a62d7-68c5-4144-bdd9-06cbfcfcae11 | SpokeDualStackRequirement | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func SpokeDualStackRequirement() (bool, string) {
return dualStackRequirement(SpokeAPIClient)
} | Package Name: meets | |||||
function | eco-gotests | bd2ee70d-ab0c-4a91-bc0d-9d3b27915189 | checkPodRunning | Imports Used: ['"fmt"', '"time"', '"github.com/openshift-kni/eco-goinfra/pkg/pod"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func checkPodRunning(podBuilder *pod.Builder) (bool, string) {
err := podBuilder.WaitUntilInStatus(corev1.PodRunning, time.Second*10)
if err != nil {
return false, fmt.Sprintf("%s pod found but was not running", podBuilder.Definition.Name)
}
return true, ""
} | Package Name: meets | ||||
function | eco-gotests | 9e7ecc6f-31d9-4bfc-94eb-78902511c584 | ocpVersionRequirement | Imports Used: ['"fmt"', '"github.com/openshift-kni/eco-gotests/tests/internal/cluster"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func ocpVersionRequirement(clusterobj cluster.APIClientGetter, requiredVersion string) (bool, string) {
clusterVersion, err := cluster.GetOCPClusterVersion(clusterobj)
if err != nil {
return false, fmt.Sprintf("Failed to get clusterversion from %s cluster: %v", getClusterType(clusterobj), err)
}
ocpVersion, _ := version.NewVersion(clusterVersion.Definition.Status.Desired.Version)
currentVersion, _ := version.NewVersion(requiredVersion)
if ocpVersion.LessThan(currentVersion) {
return false, fmt.Sprintf("Discovered openshift version does not meet requirement: %v",
ocpVersion.String())
}
return true, ""
} | Package Name: meets | ||||
function | eco-gotests | 37995d60-f1f3-4392-adea-17c7199bea6f | proxyConfiguredRequirement | Imports Used: ['"fmt"', '"github.com/openshift-kni/eco-gotests/tests/internal/cluster"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func proxyConfiguredRequirement(clusterobj cluster.APIClientGetter) (bool, string) {
ocpProxy, err := cluster.GetOCPProxy(clusterobj)
if err != nil {
return false, fmt.Sprintf("Failed to get cluster proxy from %s cluster: %v", getClusterType(clusterobj), err)
}
if ocpProxy.Object.Status.HTTPProxy == "" &&
ocpProxy.Object.Status.HTTPSProxy == "" &&
ocpProxy.Object.Status.NoProxy == "" {
return false, fmt.Sprintf("Discovered proxy not configured: %v", ocpProxy.Object.Status)
}
return true, ""
} | Package Name: meets | ||||
function | eco-gotests | 496ad415-6887-4d7a-805c-57a6d359c790 | disconnectedRequirement | Imports Used: ['"fmt"', '"github.com/openshift-kni/eco-gotests/tests/internal/cluster"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func disconnectedRequirement(clusterobj cluster.APIClientGetter) (bool, string) {
clusterVersion, err := cluster.GetOCPClusterVersion(clusterobj)
if err != nil {
return false, fmt.Sprintf("Failed to get clusterversion from %s cluster: %v", getClusterType(clusterobj), err)
}
for _, condition := range clusterVersion.Object.Status.Conditions {
if condition.Type == configv1.RetrievedUpdates {
if condition.Reason == "RemoteFailed" {
return true, ""
}
return false, "Provided cluster is connected"
}
}
return false, fmt.Sprintf("Failed to determine if cluster is disconnected, "+
"could not find '%s' condition", configv1.RetrievedUpdates)
} | Package Name: meets | ||||
function | eco-gotests | 10c71ef2-e92f-440c-a89a-d07eddd694d1 | connectedRequirement | Imports Used: ['"fmt"', '"github.com/openshift-kni/eco-gotests/tests/internal/cluster"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func connectedRequirement(clusterobj cluster.APIClientGetter) (bool, string) {
clusterVersion, err := cluster.GetOCPClusterVersion(clusterobj)
if err != nil {
return false, fmt.Sprintf("Failed to get clusterversion from %s cluster: %v", getClusterType(clusterobj), err)
}
for _, condition := range clusterVersion.Object.Status.Conditions {
if condition.Type == configv1.RetrievedUpdates {
if condition.Reason == "RemoteFailed" {
return false, "Provided cluster is disconnected"
}
return true, ""
}
}
return false, fmt.Sprintf("Failed to determine if cluster is connected, "+
"could not find '%s' condition", configv1.RetrievedUpdates)
} | Package Name: meets | ||||
function | eco-gotests | 600df0cc-45fb-472f-b6a0-f7cab5763998 | singleStackIPv4Requirement | Imports Used: ['"fmt"', '"net"', '"github.com/openshift-kni/eco-gotests/tests/internal/cluster"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func singleStackIPv4Requirement(clusterobj cluster.APIClientGetter) (bool, string) {
ocpNetwork, err := cluster.GetOCPNetworkConfig(clusterobj)
if err != nil {
return false, fmt.Sprintf("Failed to get cluster network from %s cluster: %v", getClusterType(clusterobj), err)
}
for _, clusterNet := range ocpNetwork.Object.Status.ClusterNetwork {
ip, _, _ := net.ParseCIDR(clusterNet.CIDR)
v4Check := ip.To4()
if v4Check == nil {
return false, "ClusterNetwork was not IPv4"
}
}
return true, ""
} | Package Name: meets | ||||
function | eco-gotests | ffee1340-f6a8-408d-8cae-3f562f5f45aa | singleStackIPv6Requirement | Imports Used: ['"fmt"', '"net"', '"github.com/openshift-kni/eco-gotests/tests/internal/cluster"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func singleStackIPv6Requirement(clusterobj cluster.APIClientGetter) (bool, string) {
ocpNetwork, err := cluster.GetOCPNetworkConfig(clusterobj)
if err != nil {
return false, fmt.Sprintf("Failed to get cluster network from %s cluster: %v", getClusterType(clusterobj), err)
}
for _, clusterNet := range ocpNetwork.Object.Status.ClusterNetwork {
ip, _, _ := net.ParseCIDR(clusterNet.CIDR)
v4Check := ip.To4()
if v4Check != nil {
return false, "ClusterNetwork was not IPv6"
}
}
return true, ""
} | Package Name: meets | ||||
function | eco-gotests | 81708412-b1b6-44e5-a923-2ca63ec4c6ba | dualStackRequirement | Imports Used: ['"fmt"', '"net"', '"github.com/openshift-kni/eco-gotests/tests/internal/cluster"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func dualStackRequirement(clusterobj cluster.APIClientGetter) (bool, string) {
ipv4 := false
ipv6 := false
hubNetwork, err := cluster.GetOCPNetworkConfig(clusterobj)
if err != nil {
return false, fmt.Sprintf("Failed to get cluster network from %s cluster: %v", getClusterType(clusterobj), err)
}
for _, clusterNet := range hubNetwork.Object.Status.ClusterNetwork {
ip, _, _ := net.ParseCIDR(clusterNet.CIDR)
v4Check := ip.To4()
if v4Check != nil {
ipv4 = true
} else {
ipv6 = true
}
}
if !ipv4 || !ipv6 {
return false, "Only found cluster networks in one address family"
}
return true, ""
} | Package Name: meets | ||||
function | eco-gotests | d58cb1c1-3c65-41b8-871e-11e54e17e675 | getClusterType | Imports Used: ['"github.com/openshift-kni/eco-gotests/tests/internal/cluster"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/meets/meets.go | func getClusterType(clusterobj cluster.APIClientGetter) string {
if clusterobj == HubAPIClient {
return "hub"
}
return "spoke"
} | Package Name: meets | ||||
file | eco-gotests | 8c9417da-35bc-46e6-aeb3-81106e71c3bd | setup.go | Imports Used: import (
"fmt"
"math/rand"
"time"
"github.com/openshift-kni/eco-goinfra/pkg/assisted"
"github.com/openshift-kni/eco-goinfra/pkg/clients"
"github.com/openshift-kni/eco-goinfra/pkg/hive"
"github.com/openshift-kni/eco-goinfra/pkg/namespace"
"github.com/openshift-kni/eco-goinfra/pkg/schemes/assisted/api/hiveextension/v1beta1"
"github.com/openshift-kni/eco-goinfra/pkg/secret"
. "github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpinittools"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1 "k8s.io/api/core/v1"
) | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/setup/setup.go | package setup
import (
"fmt"
"math/rand"
"time"
"github.com/openshift-kni/eco-goinfra/pkg/assisted"
"github.com/openshift-kni/eco-goinfra/pkg/clients"
"github.com/openshift-kni/eco-goinfra/pkg/hive"
"github.com/openshift-kni/eco-goinfra/pkg/namespace"
"github.com/openshift-kni/eco-goinfra/pkg/schemes/assisted/api/hiveextension/v1beta1"
"github.com/openshift-kni/eco-goinfra/pkg/secret"
. "github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpinittools"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1 "k8s.io/api/core/v1"
)
// SpokeClusterResources contains necessary resources for creating a spoke cluster.
type SpokeClusterResources struct {
Name string
apiClient *clients.Settings
err error
Namespace *namespace.Builder
PullSecret *secret.Builder
ClusterDeployment *hive.ClusterDeploymentBuilder
AgentClusterInstall *assisted.AgentClusterInstallBuilder
InfraEnv *assisted.InfraEnvBuilder
}
// NewSpokeCluster creates a new instance of SpokeClusterResources.
func NewSpokeCluster(apiClient *clients.Settings) *SpokeClusterResources {
return &SpokeClusterResources{apiClient: apiClient}
}
// WithName sets an explicit name for the spoke cluster.
func (spoke *SpokeClusterResources) WithName(name string) *SpokeClusterResources {
if name == "" {
spoke.err = fmt.Errorf("spoke name cannot be empty")
}
spoke.Name = name
return spoke
}
// WithAutoGeneratedName generates a random name for the spoke cluster.
func (spoke *SpokeClusterResources) WithAutoGeneratedName() *SpokeClusterResources {
spoke.Name = generateName(12)
return spoke
}
// WithDefaultNamespace creates a default namespace for the spoke cluster.
func (spoke *SpokeClusterResources) WithDefaultNamespace() *SpokeClusterResources {
spoke.Namespace = namespace.NewBuilder(spoke.apiClient, spoke.Name)
return spoke
}
// WithDefaultPullSecret creates a default pull-secret for the spoke cluster.
func (spoke *SpokeClusterResources) WithDefaultPullSecret() *SpokeClusterResources {
spoke.PullSecret = secret.NewBuilder(
spoke.apiClient,
fmt.Sprintf("%s-pull-secret", spoke.Name),
spoke.Name,
corev1.SecretTypeDockerConfigJson).WithData(ZTPConfig.HubPullSecret.Object.Data)
return spoke
}
// WithDefaultClusterDeployment creates a default clusterdeployment for the spoke cluster.
func (spoke *SpokeClusterResources) WithDefaultClusterDeployment() *SpokeClusterResources {
spoke.ClusterDeployment = hive.NewABMClusterDeploymentBuilder(
spoke.apiClient,
spoke.Name,
spoke.Name,
spoke.Name,
"assisted.test.com",
spoke.Name,
metav1.LabelSelector{
MatchLabels: map[string]string{
"dummy": "label",
},
}).WithPullSecret(fmt.Sprintf("%s-pull-secret", spoke.Name))
return spoke
}
// WithDefaultIPv4AgentClusterInstall creates a default agentclusterinstall with IPv4 networking for the spoke cluster.
func (spoke *SpokeClusterResources) WithDefaultIPv4AgentClusterInstall() *SpokeClusterResources {
spoke.AgentClusterInstall = assisted.NewAgentClusterInstallBuilder(
spoke.apiClient,
spoke.Name,
spoke.Name,
spoke.Name,
3,
2,
v1beta1.Networking{
ClusterNetwork: []v1beta1.ClusterNetworkEntry{{
CIDR: "10.128.0.0/14",
HostPrefix: 23,
}},
ServiceNetwork: []string{"172.30.0.0/16"},
}).WithImageSet(ZTPConfig.HubOCPXYVersion).WithAPIVip("192.168.254.5").WithIngressVip("192.168.254.10")
return spoke
}
// WithDefaultIPv6AgentClusterInstall creates a default agentclusterinstall with IPv6 networking for the spoke cluster.
func (spoke *SpokeClusterResources) WithDefaultIPv6AgentClusterInstall() *SpokeClusterResources {
spoke.AgentClusterInstall = assisted.NewAgentClusterInstallBuilder(
spoke.apiClient,
spoke.Name,
spoke.Name,
spoke.Name,
3,
2,
v1beta1.Networking{
ClusterNetwork: []v1beta1.ClusterNetworkEntry{{
CIDR: "fd01::/48",
HostPrefix: 64,
}},
ServiceNetwork: []string{"fd02::/112"},
}).WithImageSet(ZTPConfig.HubOCPXYVersion).WithAPIVip("fd2e:6f44:5dd8:1::5").WithIngressVip("fd2e:6f44:5dd8:1::10")
return spoke
}
// WithDefaultDualStackAgentClusterInstall creates a default agentclusterinstall
// with dual-stack networking for the spoke cluster.
func (spoke *SpokeClusterResources) WithDefaultDualStackAgentClusterInstall() *SpokeClusterResources {
spoke.AgentClusterInstall = assisted.NewAgentClusterInstallBuilder(
spoke.apiClient,
spoke.Name,
spoke.Name,
spoke.Name,
3,
2,
v1beta1.Networking{
ClusterNetwork: []v1beta1.ClusterNetworkEntry{
{
CIDR: "10.128.0.0/14",
HostPrefix: 23,
},
{
CIDR: "fd01::/48",
HostPrefix: 64,
},
},
ServiceNetwork: []string{"172.30.0.0/16", "fd02::/112"},
}).WithImageSet(ZTPConfig.HubOCPXYVersion).WithAPIVip("192.168.254.5").WithIngressVip("192.168.254.10")
return spoke
}
// WithDefaultInfraEnv creates a default infraenv for the spoke cluster.
func (spoke *SpokeClusterResources) WithDefaultInfraEnv() *SpokeClusterResources {
spoke.InfraEnv = assisted.NewInfraEnvBuilder(
spoke.apiClient,
spoke.Name,
spoke.Name,
fmt.Sprintf("%s-pull-secret", spoke.Name))
return spoke
}
// Create creates the instantiated spoke cluster resources.
func (spoke *SpokeClusterResources) Create() (*SpokeClusterResources, error) {
if spoke.Namespace != nil && spoke.err == nil {
spoke.Namespace, spoke.err = spoke.Namespace.Create()
}
if spoke.PullSecret != nil && spoke.err == nil {
spoke.PullSecret, spoke.err = spoke.PullSecret.Create()
}
if spoke.ClusterDeployment != nil && spoke.err == nil {
spoke.ClusterDeployment, spoke.err = spoke.ClusterDeployment.Create()
}
if spoke.AgentClusterInstall != nil && spoke.err == nil {
spoke.AgentClusterInstall, spoke.err = spoke.AgentClusterInstall.Create()
}
if spoke.InfraEnv != nil && spoke.err == nil {
spoke.InfraEnv, spoke.err = spoke.InfraEnv.Create()
}
return spoke, spoke.err
}
// Delete removes all instantiated spoke cluster resources.
func (spoke *SpokeClusterResources) Delete() error {
if spoke.InfraEnv != nil {
spoke.err = spoke.InfraEnv.Delete()
}
if spoke.AgentClusterInstall != nil {
spoke.err = spoke.AgentClusterInstall.Delete()
}
if spoke.ClusterDeployment != nil {
spoke.err = spoke.ClusterDeployment.Delete()
}
if spoke.PullSecret != nil {
spoke.err = spoke.PullSecret.Delete()
}
if spoke.Namespace != nil {
spoke.err = spoke.Namespace.DeleteAndWait(time.Second * 120)
}
return spoke.err
}
// generateName generates a random string matching the length supplied.
func generateName(n int) string {
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz")
b := make([]rune, n)
for i := range b {
b[i] = letterRunes[rand.Intn(len(letterRunes))]
}
return string(b)
}
| Package Name: package setup | ||||
function | eco-gotests | 486f4dbb-a112-402b-81fa-3951bba1f1fc | NewSpokeCluster | Imports Used: ['"github.com/openshift-kni/eco-goinfra/pkg/clients"'] | Structs Used: ['SpokeClusterResources'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/setup/setup.go | func NewSpokeCluster(apiClient *clients.Settings) *SpokeClusterResources {
return &SpokeClusterResources{apiClient: apiClient}
} | Package Name: setup | |||
function | eco-gotests | 969edfe0-c84f-411d-8e9a-73f4e6bf5b61 | WithName | Imports Used: ['"fmt"'] | Structs Used: ['SpokeClusterResources'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/setup/setup.go | func (spoke *SpokeClusterResources) WithName(name string) *SpokeClusterResources {
if name == "" {
spoke.err = fmt.Errorf("spoke name cannot be empty")
}
spoke.Name = name
return spoke
} | Package Name: setup | |||
function | eco-gotests | f317292e-bb93-4dc0-8ab1-a2dcb4fdea64 | WithAutoGeneratedName | Structs Used: ['SpokeClusterResources'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/setup/setup.go | func (spoke *SpokeClusterResources) WithAutoGeneratedName() *SpokeClusterResources {
spoke.Name = generateName(12)
return spoke
} | Package Name: setup | ||||
function | eco-gotests | 6475ccb9-778a-4334-8f64-1ca8fa4033b0 | WithDefaultNamespace | Imports Used: ['"github.com/openshift-kni/eco-goinfra/pkg/namespace"'] | Structs Used: ['SpokeClusterResources'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/setup/setup.go | func (spoke *SpokeClusterResources) WithDefaultNamespace() *SpokeClusterResources {
spoke.Namespace = namespace.NewBuilder(spoke.apiClient, spoke.Name)
return spoke
} | Package Name: setup | |||
function | eco-gotests | d18a4833-0b86-48bd-9a78-e34b012a533c | WithDefaultPullSecret | Imports Used: ['"fmt"', '"github.com/openshift-kni/eco-goinfra/pkg/secret"'] | Structs Used: ['SpokeClusterResources'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/setup/setup.go | func (spoke *SpokeClusterResources) WithDefaultPullSecret() *SpokeClusterResources {
spoke.PullSecret = secret.NewBuilder(
spoke.apiClient,
fmt.Sprintf("%s-pull-secret", spoke.Name),
spoke.Name,
corev1.SecretTypeDockerConfigJson).WithData(ZTPConfig.HubPullSecret.Object.Data)
return spoke
} | Package Name: setup | |||
function | eco-gotests | 9310f70d-81c0-478c-9d97-cb83a8808f66 | WithDefaultClusterDeployment | Imports Used: ['"fmt"', '"github.com/openshift-kni/eco-goinfra/pkg/assisted"', '"github.com/openshift-kni/eco-goinfra/pkg/hive"', '"github.com/openshift-kni/eco-goinfra/pkg/secret"'] | Structs Used: ['SpokeClusterResources'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/setup/setup.go | func (spoke *SpokeClusterResources) WithDefaultClusterDeployment() *SpokeClusterResources {
spoke.ClusterDeployment = hive.NewABMClusterDeploymentBuilder(
spoke.apiClient,
spoke.Name,
spoke.Name,
spoke.Name,
"assisted.test.com",
spoke.Name,
metav1.LabelSelector{
MatchLabels: map[string]string{
"dummy": "label",
},
}).WithPullSecret(fmt.Sprintf("%s-pull-secret", spoke.Name))
return spoke
} | Package Name: setup | |||
function | eco-gotests | 3063ea8b-46dc-40d4-8b48-1ede4ce9e16c | WithDefaultIPv4AgentClusterInstall | Imports Used: ['"github.com/openshift-kni/eco-goinfra/pkg/assisted"', '"github.com/openshift-kni/eco-goinfra/pkg/schemes/assisted/api/hiveextension/v1beta1"'] | Structs Used: ['SpokeClusterResources'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/setup/setup.go | func (spoke *SpokeClusterResources) WithDefaultIPv4AgentClusterInstall() *SpokeClusterResources {
spoke.AgentClusterInstall = assisted.NewAgentClusterInstallBuilder(
spoke.apiClient,
spoke.Name,
spoke.Name,
spoke.Name,
3,
2,
v1beta1.Networking{
ClusterNetwork: []v1beta1.ClusterNetworkEntry{{
CIDR: "10.128.0.0/14",
HostPrefix: 23,
}},
ServiceNetwork: []string{"172.30.0.0/16"},
}).WithImageSet(ZTPConfig.HubOCPXYVersion).WithAPIVip("192.168.254.5").WithIngressVip("192.168.254.10")
return spoke
} | Package Name: setup | |||
function | eco-gotests | 47cbcf4c-c57d-4333-a93a-c7e903772adc | WithDefaultIPv6AgentClusterInstall | Imports Used: ['"github.com/openshift-kni/eco-goinfra/pkg/assisted"', '"github.com/openshift-kni/eco-goinfra/pkg/schemes/assisted/api/hiveextension/v1beta1"'] | Structs Used: ['SpokeClusterResources'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/setup/setup.go | func (spoke *SpokeClusterResources) WithDefaultIPv6AgentClusterInstall() *SpokeClusterResources {
spoke.AgentClusterInstall = assisted.NewAgentClusterInstallBuilder(
spoke.apiClient,
spoke.Name,
spoke.Name,
spoke.Name,
3,
2,
v1beta1.Networking{
ClusterNetwork: []v1beta1.ClusterNetworkEntry{{
CIDR: "fd01::/48",
HostPrefix: 64,
}},
ServiceNetwork: []string{"fd02::/112"},
}).WithImageSet(ZTPConfig.HubOCPXYVersion).WithAPIVip("fd2e:6f44:5dd8:1::5").WithIngressVip("fd2e:6f44:5dd8:1::10")
return spoke
} | Package Name: setup | |||
function | eco-gotests | c951ea15-4372-499e-884a-81cc0f6fba2a | WithDefaultDualStackAgentClusterInstall | Imports Used: ['"github.com/openshift-kni/eco-goinfra/pkg/assisted"', '"github.com/openshift-kni/eco-goinfra/pkg/schemes/assisted/api/hiveextension/v1beta1"'] | Structs Used: ['SpokeClusterResources'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/setup/setup.go | func (spoke *SpokeClusterResources) WithDefaultDualStackAgentClusterInstall() *SpokeClusterResources {
spoke.AgentClusterInstall = assisted.NewAgentClusterInstallBuilder(
spoke.apiClient,
spoke.Name,
spoke.Name,
spoke.Name,
3,
2,
v1beta1.Networking{
ClusterNetwork: []v1beta1.ClusterNetworkEntry{
{
CIDR: "10.128.0.0/14",
HostPrefix: 23,
},
{
CIDR: "fd01::/48",
HostPrefix: 64,
},
},
ServiceNetwork: []string{"172.30.0.0/16", "fd02::/112"},
}).WithImageSet(ZTPConfig.HubOCPXYVersion).WithAPIVip("192.168.254.5").WithIngressVip("192.168.254.10")
return spoke
} | Package Name: setup | |||
function | eco-gotests | dcc0f193-0b42-4db5-b9ab-9f65f31fa977 | WithDefaultInfraEnv | Imports Used: ['"fmt"', '"github.com/openshift-kni/eco-goinfra/pkg/assisted"', '"github.com/openshift-kni/eco-goinfra/pkg/secret"'] | Structs Used: ['SpokeClusterResources'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/setup/setup.go | func (spoke *SpokeClusterResources) WithDefaultInfraEnv() *SpokeClusterResources {
spoke.InfraEnv = assisted.NewInfraEnvBuilder(
spoke.apiClient,
spoke.Name,
spoke.Name,
fmt.Sprintf("%s-pull-secret", spoke.Name))
return spoke
} | Package Name: setup | |||
function | eco-gotests | 268b7711-13f9-4ffe-92d6-2ec102971fe6 | Create | Structs Used: ['SpokeClusterResources'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/setup/setup.go | func (spoke *SpokeClusterResources) Create() (*SpokeClusterResources, error) {
if spoke.Namespace != nil && spoke.err == nil {
spoke.Namespace, spoke.err = spoke.Namespace.Create()
}
if spoke.PullSecret != nil && spoke.err == nil {
spoke.PullSecret, spoke.err = spoke.PullSecret.Create()
}
if spoke.ClusterDeployment != nil && spoke.err == nil {
spoke.ClusterDeployment, spoke.err = spoke.ClusterDeployment.Create()
}
if spoke.AgentClusterInstall != nil && spoke.err == nil {
spoke.AgentClusterInstall, spoke.err = spoke.AgentClusterInstall.Create()
}
if spoke.InfraEnv != nil && spoke.err == nil {
spoke.InfraEnv, spoke.err = spoke.InfraEnv.Create()
}
return spoke, spoke.err
} | Package Name: setup | ||||
function | eco-gotests | 62dd715d-9a7d-4625-98ed-f772d798f2ff | Delete | Imports Used: ['"time"'] | Structs Used: ['SpokeClusterResources'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/setup/setup.go | func (spoke *SpokeClusterResources) Delete() error {
if spoke.InfraEnv != nil {
spoke.err = spoke.InfraEnv.Delete()
}
if spoke.AgentClusterInstall != nil {
spoke.err = spoke.AgentClusterInstall.Delete()
}
if spoke.ClusterDeployment != nil {
spoke.err = spoke.ClusterDeployment.Delete()
}
if spoke.PullSecret != nil {
spoke.err = spoke.PullSecret.Delete()
}
if spoke.Namespace != nil {
spoke.err = spoke.Namespace.DeleteAndWait(time.Second * 120)
}
return spoke.err
} | Package Name: setup | |||
function | eco-gotests | b58ee7f0-5ab3-41ac-8f3d-303fbed2ff62 | generateName | Imports Used: ['"math/rand"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/setup/setup.go | func generateName(n int) string {
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz")
b := make([]rune, n)
for i := range b {
b[i] = letterRunes[rand.Intn(len(letterRunes))]
}
return string(b)
} | Package Name: setup | ||||
file | eco-gotests | 924af7fb-4bfa-4296-8316-0c75b379f5f2 | config.go | Imports Used: import (
"fmt"
"os"
"strings"
"github.com/golang/glog"
"github.com/kelseyhightower/envconfig"
"github.com/openshift-kni/eco-goinfra/pkg/assisted"
"github.com/openshift-kni/eco-goinfra/pkg/clients"
"github.com/openshift-kni/eco-goinfra/pkg/configmap"
"github.com/openshift-kni/eco-goinfra/pkg/hive"
"github.com/openshift-kni/eco-goinfra/pkg/pod"
"github.com/openshift-kni/eco-goinfra/pkg/secret"
"github.com/openshift-kni/eco-gotests/tests/assisted/internal/assistedconfig"
"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/find"
"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpparams"
"github.com/openshift-kni/eco-gotests/tests/internal/cluster"
. "github.com/openshift-kni/eco-gotests/tests/internal/inittools"
) | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/ztpconfig/config.go | package ztpconfig
import (
"fmt"
"os"
"strings"
"github.com/golang/glog"
"github.com/kelseyhightower/envconfig"
"github.com/openshift-kni/eco-goinfra/pkg/assisted"
"github.com/openshift-kni/eco-goinfra/pkg/clients"
"github.com/openshift-kni/eco-goinfra/pkg/configmap"
"github.com/openshift-kni/eco-goinfra/pkg/hive"
"github.com/openshift-kni/eco-goinfra/pkg/pod"
"github.com/openshift-kni/eco-goinfra/pkg/secret"
"github.com/openshift-kni/eco-gotests/tests/assisted/internal/assistedconfig"
"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/find"
"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpparams"
"github.com/openshift-kni/eco-gotests/tests/internal/cluster"
. "github.com/openshift-kni/eco-gotests/tests/internal/inittools"
)
// ZTPConfig type contains ztp configuration.
type ZTPConfig struct {
*assistedconfig.AssistedConfig
*HubConfig
*SpokeConfig
}
// HubConfig contains environment information related to the hub cluster.
type HubConfig struct {
HubOCPVersion string
HubOCPXYVersion string
HubAgentServiceConfig *assisted.AgentServiceConfigBuilder
hubAssistedServicePod *pod.Builder
hubAssistedImageServicePod *pod.Builder
HubPullSecret *secret.Builder
HubInstallConfig *configmap.Builder
HubPullSecretOverride map[string][]byte
HubPullSecretOverridePath string `envconfig:"ECO_ASSISTED_ZTP_HUB_PULL_SECRET_OVERRIDE_PATH"`
}
// SpokeConfig contains environment information related to the spoke cluster.
type SpokeConfig struct {
SpokeAPIClient *clients.Settings
SpokeOCPVersion string
SpokeOCPXYVersion string
SpokeClusterName string
SpokeKubeConfig string `envconfig:"ECO_ASSISTED_ZTP_SPOKE_KUBECONFIG"`
SpokeClusterImageSet string `envconfig:"ECO_ASSISTED_ZTP_SPOKE_CLUSTERIMAGESET"`
SpokeClusterDeployment *hive.ClusterDeploymentBuilder
SpokeAgentClusterInstall *assisted.AgentClusterInstallBuilder
SpokeInfraEnv *assisted.InfraEnvBuilder
SpokeInstallConfig *configmap.Builder
}
// NewZTPConfig returns instance of ZTPConfig type.
func NewZTPConfig() *ZTPConfig {
glog.V(ztpparams.ZTPLogLevel).Info("Creating new ZTPConfig struct")
var ztpconfig ZTPConfig
ztpconfig.AssistedConfig = assistedconfig.NewAssistedConfig()
ztpconfig.HubConfig = new(HubConfig)
ztpconfig.SpokeConfig = new(SpokeConfig)
if err := ztpconfig.newHubConfig(); err != nil {
APIClient = nil
return &ztpconfig
}
if err := ztpconfig.newSpokeConfig(); err != nil {
ztpconfig.SpokeConfig.SpokeAPIClient = nil
return &ztpconfig
}
return &ztpconfig
}
// newHubConfig creates a new HubConfig member for a ZTPConfig.
func (ztpconfig *ZTPConfig) newHubConfig() error {
glog.V(ztpparams.ZTPLogLevel).Info("Creating new HubConfig struct")
ztpconfig.HubConfig = new(HubConfig)
err := envconfig.Process("eco_assisted_ztp_hub_", ztpconfig.HubConfig)
if err != nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to instantiate HubConfig: %v", err)
}
if ztpconfig.HubConfig.HubPullSecretOverridePath != "" {
content, err := os.ReadFile(ztpconfig.HubConfig.HubPullSecretOverridePath)
if err != nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to read hub pull-secret override path: %v", err)
}
ztpconfig.HubConfig.HubPullSecretOverride = map[string][]byte{
".dockerconfigjson": content,
}
}
ztpconfig.HubConfig.HubOCPVersion, err = find.ClusterVersion(APIClient)
if err != nil {
return err
}
splitVersion := strings.Split(ztpconfig.HubConfig.HubOCPVersion, ".")
if len(splitVersion) >= 2 {
ztpconfig.HubConfig.HubOCPXYVersion = fmt.Sprintf("%s.%s", splitVersion[0], splitVersion[1])
}
ztpconfig.HubConfig.HubAgentServiceConfig, err = assisted.PullAgentServiceConfig(APIClient)
if err != nil {
return err
}
if ztpconfig.HubConfig.HubAgentServiceConfig != nil {
assistedPod := ztpconfig.HubAssistedServicePod()
if assistedPod == nil {
return fmt.Errorf("failed to find hub assisted service pod")
}
assistedImagePod := ztpconfig.HubAssistedImageServicePod()
if assistedImagePod == nil {
return fmt.Errorf("failed to find hub assisted image service pod")
}
}
ztpconfig.HubConfig.HubPullSecret, err = cluster.GetOCPPullSecret(APIClient)
if err != nil {
return err
}
if ztpconfig.DryRun {
return nil
}
ztpconfig.HubConfig.HubInstallConfig, err = configmap.Pull(APIClient, "cluster-config-v1", "kube-system")
if err != nil {
return err
}
return nil
}
// newSpokeConfig creates a new SpokeConfig member for a ZTPConfig.
func (ztpconfig *ZTPConfig) newSpokeConfig() error {
glog.V(ztpparams.ZTPLogLevel).Info("Creating new SpokeConfig struct")
err := envconfig.Process("eco_assisted_ztp_spoke_", ztpconfig.SpokeConfig)
if err != nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to instantiate SpokeConfig: %v", err)
return err
}
if ztpconfig.SpokeConfig.SpokeKubeConfig != "" {
glog.V(ztpparams.ZTPLogLevel).Infof("Creating spoke api client from %s", ztpconfig.SpokeConfig.SpokeKubeConfig)
if ztpconfig.SpokeConfig.SpokeAPIClient = clients.New(
ztpconfig.SpokeConfig.SpokeKubeConfig); ztpconfig.SpokeConfig.SpokeAPIClient == nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to load provided spoke kubeconfig: %v",
ztpconfig.SpokeConfig.SpokeKubeConfig)
return fmt.Errorf("failed to load provided spoke kubeconfig: %v", ztpconfig.SpokeConfig.SpokeKubeConfig)
}
ztpconfig.SpokeConfig.SpokeClusterName, err =
find.SpokeClusterName(APIClient, ztpconfig.SpokeConfig.SpokeAPIClient)
if err != nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to find spoke cluster name: %v", err)
return err
}
ztpconfig.SpokeConfig.SpokeOCPVersion, err = find.ClusterVersion(ztpconfig.SpokeConfig.SpokeAPIClient)
if err != nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to find spoke cluster version: %v", err)
return err
}
splitVersion := strings.Split(ztpconfig.SpokeConfig.SpokeOCPVersion, ".")
if len(splitVersion) >= 2 {
ztpconfig.SpokeConfig.SpokeOCPXYVersion = fmt.Sprintf("%s.%s", splitVersion[0], splitVersion[1])
}
ztpconfig.SpokeConfig.SpokeClusterDeployment, err = hive.PullClusterDeployment(APIClient,
ztpconfig.SpokeConfig.SpokeClusterName, ztpconfig.SpokeConfig.SpokeClusterName)
if err != nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to find spoke cluster deployment: %v", err)
return err
}
ztpconfig.SpokeConfig.SpokeAgentClusterInstall, err = assisted.PullAgentClusterInstall(APIClient,
ztpconfig.SpokeConfig.SpokeClusterName, ztpconfig.SpokeConfig.SpokeClusterName)
if err != nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to find spoke agent cluster install: %v", err)
return err
}
ztpconfig.SpokeConfig.SpokeInfraEnv, err = assisted.PullInfraEnvInstall(APIClient,
ztpconfig.SpokeConfig.SpokeClusterName, ztpconfig.SpokeConfig.SpokeClusterName)
if err != nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to find spoke infra env: %v", err)
return err
}
ztpconfig.SpokeConfig.SpokeInstallConfig, err = configmap.Pull(ztpconfig.SpokeConfig.SpokeAPIClient,
"cluster-config-v1", "kube-system")
if err != nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to find spoke install config: %v", err)
return err
}
} else {
ztpconfig.SpokeConfig.SpokeAPIClient = nil
}
if ztpconfig.SpokeConfig.SpokeClusterImageSet == "" {
ztpconfig.SpokeConfig.SpokeClusterImageSet = ztpconfig.HubOCPXYVersion
}
return nil
}
// HubAssistedServicePod retrieves the assisted service pod from the hub
// and populates hubAssistedServicePod.
func (ztpconfig *ZTPConfig) HubAssistedServicePod() *pod.Builder {
if ztpconfig.hubAssistedServicePod == nil || !ztpconfig.hubAssistedServicePod.Exists() {
ztpconfig.hubAssistedServicePod, _ = find.AssistedServicePod(APIClient)
}
return ztpconfig.hubAssistedServicePod
}
// HubAssistedImageServicePod retrieves the assisted image service pod from the hub
// and populates hubAssistedImageServicePod.
func (ztpconfig *ZTPConfig) HubAssistedImageServicePod() *pod.Builder {
if ztpconfig.hubAssistedImageServicePod == nil || !ztpconfig.hubAssistedImageServicePod.Exists() {
ztpconfig.hubAssistedImageServicePod, _ = find.AssistedImageServicePod(APIClient)
}
return ztpconfig.hubAssistedImageServicePod
}
| Package Name: package ztpconfig | ||||
function | eco-gotests | 46da86e5-f090-4f81-907f-e7c4867643c8 | NewZTPConfig | Imports Used: ['"github.com/golang/glog"', '"github.com/openshift-kni/eco-gotests/tests/assisted/internal/assistedconfig"', '"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpparams"'] | Structs Used: ['ZTPConfig', 'HubConfig', 'SpokeConfig'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/ztpconfig/config.go | func NewZTPConfig() *ZTPConfig {
glog.V(ztpparams.ZTPLogLevel).Info("Creating new ZTPConfig struct")
var ztpconfig ZTPConfig
ztpconfig.AssistedConfig = assistedconfig.NewAssistedConfig()
ztpconfig.HubConfig = new(HubConfig)
ztpconfig.SpokeConfig = new(SpokeConfig)
if err := ztpconfig.newHubConfig(); err != nil {
APIClient = nil
return &ztpconfig
}
if err := ztpconfig.newSpokeConfig(); err != nil {
ztpconfig.SpokeConfig.SpokeAPIClient = nil
return &ztpconfig
}
return &ztpconfig
} | Package Name: ztpconfig | |||
function | eco-gotests | bf197037-8796-4021-aca5-a9993cbbefcb | newHubConfig | Imports Used: ['"fmt"', '"os"', '"strings"', '"github.com/golang/glog"', '"github.com/kelseyhightower/envconfig"', '"github.com/openshift-kni/eco-goinfra/pkg/assisted"', '"github.com/openshift-kni/eco-goinfra/pkg/configmap"', '"github.com/openshift-kni/eco-goinfra/pkg/pod"', '"github.com/openshift-kni/eco-goinfra/pkg/secret"', '"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/find"', '"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpparams"', '"github.com/openshift-kni/eco-gotests/tests/internal/cluster"'] | Structs Used: ['ZTPConfig', 'HubConfig'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/ztpconfig/config.go | func (ztpconfig *ZTPConfig) newHubConfig() error {
glog.V(ztpparams.ZTPLogLevel).Info("Creating new HubConfig struct")
ztpconfig.HubConfig = new(HubConfig)
err := envconfig.Process("eco_assisted_ztp_hub_", ztpconfig.HubConfig)
if err != nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to instantiate HubConfig: %v", err)
}
if ztpconfig.HubConfig.HubPullSecretOverridePath != "" {
content, err := os.ReadFile(ztpconfig.HubConfig.HubPullSecretOverridePath)
if err != nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to read hub pull-secret override path: %v", err)
}
ztpconfig.HubConfig.HubPullSecretOverride = map[string][]byte{
".dockerconfigjson": content,
}
}
ztpconfig.HubConfig.HubOCPVersion, err = find.ClusterVersion(APIClient)
if err != nil {
return err
}
splitVersion := strings.Split(ztpconfig.HubConfig.HubOCPVersion, ".")
if len(splitVersion) >= 2 {
ztpconfig.HubConfig.HubOCPXYVersion = fmt.Sprintf("%s.%s", splitVersion[0], splitVersion[1])
}
ztpconfig.HubConfig.HubAgentServiceConfig, err = assisted.PullAgentServiceConfig(APIClient)
if err != nil {
return err
}
if ztpconfig.HubConfig.HubAgentServiceConfig != nil {
assistedPod := ztpconfig.HubAssistedServicePod()
if assistedPod == nil {
return fmt.Errorf("failed to find hub assisted service pod")
}
assistedImagePod := ztpconfig.HubAssistedImageServicePod()
if assistedImagePod == nil {
return fmt.Errorf("failed to find hub assisted image service pod")
}
}
ztpconfig.HubConfig.HubPullSecret, err = cluster.GetOCPPullSecret(APIClient)
if err != nil {
return err
}
if ztpconfig.DryRun {
return nil
}
ztpconfig.HubConfig.HubInstallConfig, err = configmap.Pull(APIClient, "cluster-config-v1", "kube-system")
if err != nil {
return err
}
return nil
} | Package Name: ztpconfig | |||
function | eco-gotests | e3903cf9-ab57-4297-b9f5-2fa39956ca9f | newSpokeConfig | Imports Used: ['"fmt"', '"strings"', '"github.com/golang/glog"', '"github.com/kelseyhightower/envconfig"', '"github.com/openshift-kni/eco-goinfra/pkg/assisted"', '"github.com/openshift-kni/eco-goinfra/pkg/clients"', '"github.com/openshift-kni/eco-goinfra/pkg/configmap"', '"github.com/openshift-kni/eco-goinfra/pkg/hive"', '"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/find"', '"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpparams"', '"github.com/openshift-kni/eco-gotests/tests/internal/cluster"'] | Structs Used: ['ZTPConfig', 'SpokeConfig'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/ztpconfig/config.go | func (ztpconfig *ZTPConfig) newSpokeConfig() error {
glog.V(ztpparams.ZTPLogLevel).Info("Creating new SpokeConfig struct")
err := envconfig.Process("eco_assisted_ztp_spoke_", ztpconfig.SpokeConfig)
if err != nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to instantiate SpokeConfig: %v", err)
return err
}
if ztpconfig.SpokeConfig.SpokeKubeConfig != "" {
glog.V(ztpparams.ZTPLogLevel).Infof("Creating spoke api client from %s", ztpconfig.SpokeConfig.SpokeKubeConfig)
if ztpconfig.SpokeConfig.SpokeAPIClient = clients.New(
ztpconfig.SpokeConfig.SpokeKubeConfig); ztpconfig.SpokeConfig.SpokeAPIClient == nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to load provided spoke kubeconfig: %v",
ztpconfig.SpokeConfig.SpokeKubeConfig)
return fmt.Errorf("failed to load provided spoke kubeconfig: %v", ztpconfig.SpokeConfig.SpokeKubeConfig)
}
ztpconfig.SpokeConfig.SpokeClusterName, err =
find.SpokeClusterName(APIClient, ztpconfig.SpokeConfig.SpokeAPIClient)
if err != nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to find spoke cluster name: %v", err)
return err
}
ztpconfig.SpokeConfig.SpokeOCPVersion, err = find.ClusterVersion(ztpconfig.SpokeConfig.SpokeAPIClient)
if err != nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to find spoke cluster version: %v", err)
return err
}
splitVersion := strings.Split(ztpconfig.SpokeConfig.SpokeOCPVersion, ".")
if len(splitVersion) >= 2 {
ztpconfig.SpokeConfig.SpokeOCPXYVersion = fmt.Sprintf("%s.%s", splitVersion[0], splitVersion[1])
}
ztpconfig.SpokeConfig.SpokeClusterDeployment, err = hive.PullClusterDeployment(APIClient,
ztpconfig.SpokeConfig.SpokeClusterName, ztpconfig.SpokeConfig.SpokeClusterName)
if err != nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to find spoke cluster deployment: %v", err)
return err
}
ztpconfig.SpokeConfig.SpokeAgentClusterInstall, err = assisted.PullAgentClusterInstall(APIClient,
ztpconfig.SpokeConfig.SpokeClusterName, ztpconfig.SpokeConfig.SpokeClusterName)
if err != nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to find spoke agent cluster install: %v", err)
return err
}
ztpconfig.SpokeConfig.SpokeInfraEnv, err = assisted.PullInfraEnvInstall(APIClient,
ztpconfig.SpokeConfig.SpokeClusterName, ztpconfig.SpokeConfig.SpokeClusterName)
if err != nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to find spoke infra env: %v", err)
return err
}
ztpconfig.SpokeConfig.SpokeInstallConfig, err = configmap.Pull(ztpconfig.SpokeConfig.SpokeAPIClient,
"cluster-config-v1", "kube-system")
if err != nil {
glog.V(ztpparams.ZTPLogLevel).Infof("failed to find spoke install config: %v", err)
return err
}
} else {
ztpconfig.SpokeConfig.SpokeAPIClient = nil
}
if ztpconfig.SpokeConfig.SpokeClusterImageSet == "" {
ztpconfig.SpokeConfig.SpokeClusterImageSet = ztpconfig.HubOCPXYVersion
}
return nil
} | Package Name: ztpconfig | |||
function | eco-gotests | a0162fcc-cfb8-4a30-84f1-d2a924c1d1fb | HubAssistedServicePod | Imports Used: ['"github.com/openshift-kni/eco-goinfra/pkg/pod"', '"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/find"'] | Structs Used: ['ZTPConfig'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/ztpconfig/config.go | func (ztpconfig *ZTPConfig) HubAssistedServicePod() *pod.Builder {
if ztpconfig.hubAssistedServicePod == nil || !ztpconfig.hubAssistedServicePod.Exists() {
ztpconfig.hubAssistedServicePod, _ = find.AssistedServicePod(APIClient)
}
return ztpconfig.hubAssistedServicePod
} | Package Name: ztpconfig | |||
function | eco-gotests | 31185000-e48c-45d9-a9cb-a90a28a8241b | HubAssistedImageServicePod | Imports Used: ['"github.com/openshift-kni/eco-goinfra/pkg/pod"', '"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/find"'] | Structs Used: ['ZTPConfig'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/ztpconfig/config.go | func (ztpconfig *ZTPConfig) HubAssistedImageServicePod() *pod.Builder {
if ztpconfig.hubAssistedImageServicePod == nil || !ztpconfig.hubAssistedImageServicePod.Exists() {
ztpconfig.hubAssistedImageServicePod, _ = find.AssistedImageServicePod(APIClient)
}
return ztpconfig.hubAssistedImageServicePod
} | Package Name: ztpconfig | |||
file | eco-gotests | 8a868acf-7f2a-4b92-bb39-f5dbc8b20132 | ztpinittools.go | Imports Used: import (
"github.com/openshift-kni/eco-goinfra/pkg/clients"
"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpconfig"
"github.com/openshift-kni/eco-gotests/tests/internal/inittools"
) | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/ztpinittools/ztpinittools.go | package ztpinittools
import (
"github.com/openshift-kni/eco-goinfra/pkg/clients"
"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpconfig"
"github.com/openshift-kni/eco-gotests/tests/internal/inittools"
)
var (
// HubAPIClient provides API access to hub cluster.
HubAPIClient *clients.Settings
// SpokeAPIClient provides API access to spoke cluster.
SpokeAPIClient *clients.Settings
// ZTPConfig provides access to general configuration parameters.
ZTPConfig *ztpconfig.ZTPConfig
)
func init() {
HubAPIClient = inittools.APIClient
ZTPConfig = ztpconfig.NewZTPConfig()
SpokeAPIClient = ZTPConfig.SpokeAPIClient
}
| Package Name: package ztpinittools | ||||
function | eco-gotests | 48fcf7f7-f65e-4fb8-81dc-e5e568ec858d | init | Imports Used: ['"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpconfig"', '"github.com/openshift-kni/eco-gotests/tests/internal/inittools"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/ztpinittools/ztpinittools.go | func init() {
HubAPIClient = inittools.APIClient
ZTPConfig = ztpconfig.NewZTPConfig()
SpokeAPIClient = ZTPConfig.SpokeAPIClient
} | Package Name: ztpinittools | ||||
file | eco-gotests | fc4892d0-5bcf-497c-8de0-9e455fbdbb03 | const.go | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/ztpparams/const.go | package ztpparams
const (
// Label represents ztp label that can be used for test cases selection.
Label = "ztp"
// ZTPLogLevel custom loglevel for the ZTP testing verbose mode.
ZTPLogLevel = 50
)
| Package Name: package ztpparams | |||||
file | eco-gotests | c3e41af3-5b08-45ec-9282-0e163392e00f | ztpvars.go | Imports Used: import "github.com/openshift-kni/eco-gotests/tests/assisted/internal/assistedparams" | File Location: github.com/eco-gotests/tests/assisted/ztp/internal/ztpparams/ztpvars.go | package ztpparams
import "github.com/openshift-kni/eco-gotests/tests/assisted/internal/assistedparams"
var (
// Labels represents the range of labels that can be used for test cases selection.
Labels = []string{assistedparams.Label, Label}
)
| Package Name: package ztpparams | ||||
file | eco-gotests | 99a05999-02b8-436b-8030-16291b9d2585 | operator_suite_test.go | Imports Used: import (
"testing"
"runtime"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/openshift-kni/eco-goinfra/pkg/reportxml"
"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/meets"
. "github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpinittools"
"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/operator/internal/tsparams"
_ "github.com/openshift-kni/eco-gotests/tests/assisted/ztp/operator/tests"
"github.com/openshift-kni/eco-gotests/tests/internal/reporter"
) | File Location: github.com/eco-gotests/tests/assisted/ztp/operator/operator_suite_test.go | package operator_test
import (
"testing"
"runtime"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/openshift-kni/eco-goinfra/pkg/reportxml"
"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/meets"
. "github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpinittools"
"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/operator/internal/tsparams"
_ "github.com/openshift-kni/eco-gotests/tests/assisted/ztp/operator/tests"
"github.com/openshift-kni/eco-gotests/tests/internal/reporter"
)
var _, currentFile, _, _ = runtime.Caller(0)
func TestOperator(t *testing.T) {
_, reporterConfig := GinkgoConfiguration()
reporterConfig.JUnitReport = ZTPConfig.GetJunitReportPath(currentFile)
RegisterFailHandler(Fail)
RunSpecs(t, "Operator Suite", Label(tsparams.Labels...), reporterConfig)
}
var _ = BeforeSuite(func() {
By("Check if hub has valid apiClient")
if HubAPIClient == nil {
Skip("Cannot run operator suite when hub has nil api client")
}
By("Check that assisted is running")
operandRunning, msg := meets.HubInfrastructureOperandRunningRequirement()
if !operandRunning {
Skip(msg)
}
})
var _ = ReportAfterSuite("", func(report Report) {
reportxml.Create(report, ZTPConfig.GetReportPath(), ZTPConfig.TCPrefix)
})
var _ = JustAfterEach(func() {
reporter.ReportIfFailed(
CurrentSpecReport(),
currentFile,
tsparams.ReporterNamespacesToDump,
tsparams.ReporterCRDsToDump)
})
| Package Name: package operator_test | ||||
function | eco-gotests | 8df0eb56-af37-45e5-b77a-1457fe0f1ab5 | TestOperator | Imports Used: ['"testing"', '"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/operator/internal/tsparams"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/operator/operator_suite_test.go | func TestOperator(t *testing.T) {
_, reporterConfig := GinkgoConfiguration()
reporterConfig.JUnitReport = ZTPConfig.GetJunitReportPath(currentFile)
RegisterFailHandler(Fail)
RunSpecs(t, "Operator Suite", Label(tsparams.Labels...), reporterConfig)
} | Global Variables: {'_': 'JustAfterEach(func() {\n\treporter.ReportIfFailed(\n\t\tCurrentSpecReport(),\n\t\tcurrentFile,\n\t\ttsparams.ReporterNamespacesToDump,\n\t\ttsparams.ReporterCRDsToDump)\n})'} | Package Name: operator_test | |||
file | eco-gotests | 10cde528-0100-487e-bf72-4a7c68b03249 | const.go | File Location: github.com/eco-gotests/tests/assisted/ztp/operator/internal/tsparams/const.go | package tsparams
const (
// LabelSuite represents operator label that can be used for test cases selection.
LabelSuite = "operator"
// LabelPlatformSelectionTestCases represents platform-selection label that can be used for test cases selection.
LabelPlatformSelectionTestCases = "platform-selection"
// LabelImageServiceStatefulsetTestCases represents image-service-statefulset label that can be used
// for test cases selection.
LabelImageServiceStatefulsetTestCases = "image-service-statefulset"
// LabelConvergedFlowEnabled represents converged-flow-enabled label that can be used for test cases selection.
LabelConvergedFlowEnabled = "converged-flow-enabled"
// LabelAssistedPodsNoRestartsTestCases represents assisted-pods-no-restarts label that can be used
// for test cases selection.
LabelAssistedPodsNoRestartsTestCases = "assisted-pods-no-restarts"
// LabelOSImageVersionTestCases represents os-image-version-match label that can be used
// for test case selection.
LabelOSImageVersionTestCases = "os-image-version-match"
// LabelUnauthenticatedRegistriesTestCases represents unauthenticated-registries label that can be used
// for test cases selection.
LabelUnauthenticatedRegistriesTestCases = "unauthenticated-registries"
// LabelFipsVerificationTestCases represents fips-verification label that can be used for test cases
// selection.
LabelFipsVerificationTestCases = "fips-verification"
// LabelAdditionalTrustBundle represents additional-trust-bundle label that can be used
// for test cases selection.
LabelAdditionalTrustBundle = "additional-trust-bundle"
// LabelMultiArchitectureImageTestCases represents infraenv-multiarch-image label that can be used
// for test cases selection.
LabelMultiArchitectureImageTestCases = "infraenv-multiarch-image"
// LabelBuildArtifcatRootFSTestCases represents build-artifact-rootfs label that can be used
// for test cases selection.
LabelBuildArtifcatRootFSTestCases = "build-artifact-rootfs"
// LabelHTTPWebserverSetup represents label-http-web-server-setup label that can be used for test cases selection.
LabelHTTPWebserverSetup = "label-http-web-server-setup"
// LabelDualstackIPv4FirstACI represents dualstack-ipv4-first-aci label that can be used
// for test case selection.
LabelDualstackIPv4FirstACI = "dualstack-ipv4-first-aci"
// LabelInfraOperatorProxyDeploy represents infrastructure-operator-proxy-deploy label that can be used
// for test case selection.
LabelInfraOperatorProxyDeploy = "infrastructure-operator-proxy-deploy"
// LabelClusterImageSetMatchingOSImage represents clusterimageset-matching-os-image label that can be used
// for test case selection.
LabelClusterImageSetMatchingOSImage = "clusterimageset-matching-os-image"
// LabelAssistedHiveOperatorDeploy represents assisted-hive-operator-deploy label that can be used
// for test case selection.
LabelAssistedHiveOperatorDeploy = "assisted-hive-operator-deploy"
)
| Package Name: package tsparams | |||||
file | eco-gotests | baf4cf3c-180a-4634-a37c-c3c979967140 | operatorvars.go | Imports Used: import (
bmhv1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
hiveextV1Beta1 "github.com/openshift-kni/eco-goinfra/pkg/schemes/assisted/api/hiveextension/v1beta1"
agentInstallV1Beta1 "github.com/openshift-kni/eco-goinfra/pkg/schemes/assisted/api/v1beta1"
hivev1 "github.com/openshift-kni/eco-goinfra/pkg/schemes/assisted/hive/api/v1"
"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpparams"
"github.com/openshift-kni/k8sreporter"
corev1 "k8s.io/api/core/v1"
) | File Location: github.com/eco-gotests/tests/assisted/ztp/operator/internal/tsparams/operatorvars.go | package tsparams
import (
bmhv1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
hiveextV1Beta1 "github.com/openshift-kni/eco-goinfra/pkg/schemes/assisted/api/hiveextension/v1beta1"
agentInstallV1Beta1 "github.com/openshift-kni/eco-goinfra/pkg/schemes/assisted/api/v1beta1"
hivev1 "github.com/openshift-kni/eco-goinfra/pkg/schemes/assisted/hive/api/v1"
"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpparams"
"github.com/openshift-kni/k8sreporter"
corev1 "k8s.io/api/core/v1"
)
var (
// Labels represents the range of labels that can be used for test cases selection.
Labels = append(ztpparams.Labels, LabelSuite)
// ReporterNamespacesToDump tells to the reporter from where to collect logs.
ReporterNamespacesToDump = map[string]string{
"multicluster-engine": "mce",
}
// ReporterCRDsToDump tells to the reporter what CRs to dump.
ReporterCRDsToDump = []k8sreporter.CRData{
{Cr: &corev1.PodList{}},
{Cr: &corev1.SecretList{}},
{Cr: &agentInstallV1Beta1.AgentServiceConfigList{}},
{Cr: &hivev1.ClusterDeploymentList{}},
{Cr: &hiveextV1Beta1.AgentClusterInstallList{}},
{Cr: &agentInstallV1Beta1.InfraEnvList{}},
{Cr: &bmhv1alpha1.BareMetalHostList{}},
{Cr: &agentInstallV1Beta1.AgentList{}},
}
// MCENameSpace is the namespace used by the assisted service.
MCENameSpace = "multicluster-engine"
)
| Package Name: package tsparams | ||||
file | eco-gotests | 9337d842-b6b2-47df-a14e-4f42dc6adbde | aci-dualstack-ipv4-first.go | Imports Used: import (
"fmt"
"time"
"github.com/golang/glog"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/openshift-kni/eco-goinfra/pkg/assisted"
"github.com/openshift-kni/eco-goinfra/pkg/hive"
"github.com/openshift-kni/eco-goinfra/pkg/namespace"
"github.com/openshift-kni/eco-goinfra/pkg/reportxml"
"github.com/openshift-kni/eco-goinfra/pkg/schemes/assisted/api/hiveextension/v1beta1"
"github.com/openshift-kni/eco-goinfra/pkg/secret"
. "github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpinittools"
"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpparams"
"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/operator/internal/tsparams"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) | File Location: github.com/eco-gotests/tests/assisted/ztp/operator/tests/aci-dualstack-ipv4-first.go | package operator_test
import (
"fmt"
"time"
"github.com/golang/glog"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/openshift-kni/eco-goinfra/pkg/assisted"
"github.com/openshift-kni/eco-goinfra/pkg/hive"
"github.com/openshift-kni/eco-goinfra/pkg/namespace"
"github.com/openshift-kni/eco-goinfra/pkg/reportxml"
"github.com/openshift-kni/eco-goinfra/pkg/schemes/assisted/api/hiveextension/v1beta1"
"github.com/openshift-kni/eco-goinfra/pkg/secret"
. "github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpinittools"
"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpparams"
"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/operator/internal/tsparams"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
dualstackTestSpoke = "dualstacktest"
)
var _ = Describe(
"DualstackIPv4First",
Ordered,
ContinueOnFailure,
Label(tsparams.LabelDualstackIPv4FirstACI), func() {
When("on MCE 2.0 and above", func() {
BeforeAll(func() {
By("Check that the ClusterImageSet exists")
_, err := hive.PullClusterImageSet(HubAPIClient, ZTPConfig.HubOCPXYVersion)
if err != nil {
Skip("The ClusterImageSet must exist")
}
nsBuilder = namespace.NewBuilder(HubAPIClient, dualstackTestSpoke)
tsparams.ReporterNamespacesToDump[dualstackTestSpoke] = "dualstacktest namespace"
})
AfterEach(func() {
By("Delete the temporary namespace after test")
if nsBuilder.Exists() {
err := nsBuilder.DeleteAndWait(time.Second * 300)
Expect(err).ToNot(HaveOccurred(), "error deleting the temporary namespace after test")
}
})
It("Validates that ACI with dualstack expects IPv4 first", reportxml.ID("44877"), func() {
agentClusterInstallBuilder := createDualstackSpokeClusterResources()
By("Waiting for specific error message from SpecSynced condition")
Eventually(func() (string, error) {
agentClusterInstallBuilder.Object, err = agentClusterInstallBuilder.Get()
if err != nil {
return "", err
}
for _, condition := range agentClusterInstallBuilder.Object.Status.Conditions {
if condition.Type == v1beta1.ClusterSpecSyncedCondition {
return condition.Message, nil
}
}
return "", nil
}).WithTimeout(time.Minute*2).Should(
Equal("The Spec could not be synced due to an input error: First machine network has to be IPv4 subnet"),
"didn't get the expected message from SpecSynced condition")
})
})
})
// createDualstackSpokeClusterResources is a helper function that creates
// spoke cluster resources required for the test.
func createDualstackSpokeClusterResources() *assisted.AgentClusterInstallBuilder {
By("Create namespace for the test")
if nsBuilder.Exists() {
glog.V(ztpparams.ZTPLogLevel).Infof("The namespace '%s' already exists",
nsBuilder.Object.Name)
} else {
// create the namespace
glog.V(ztpparams.ZTPLogLevel).Infof("Creating the namespace: %v", dualstackTestSpoke)
_, err := nsBuilder.Create()
Expect(err).ToNot(HaveOccurred(), "error creating namespace '%s' : %v ",
nsBuilder.Definition.Name, err)
}
By("Create pull-secret in the new namespace")
testSecret, err := secret.NewBuilder(
HubAPIClient,
fmt.Sprintf("%s-pull-secret", dualstackTestSpoke),
dualstackTestSpoke,
corev1.SecretTypeDockerConfigJson).WithData(ZTPConfig.HubPullSecret.Object.Data).Create()
Expect(err).ToNot(HaveOccurred(), "error occurred when creating pull-secret")
By("Create clusterdeployment in the new namespace")
testClusterDeployment, err := hive.NewABMClusterDeploymentBuilder(
HubAPIClient,
dualstackTestSpoke,
dualstackTestSpoke,
dualstackTestSpoke,
"assisted.test.com",
dualstackTestSpoke,
metav1.LabelSelector{
MatchLabels: map[string]string{
"dummy": "label",
},
}).WithPullSecret(testSecret.Definition.Name).Create()
Expect(err).ToNot(HaveOccurred(), "error occurred when creating clusterdeployment")
By("Create agentclusterinstall in the new namespace")
agentClusterInstallBuilder, err := assisted.NewAgentClusterInstallBuilder(
HubAPIClient,
dualstackTestSpoke,
dualstackTestSpoke,
testClusterDeployment.Object.Name,
3,
2,
v1beta1.Networking{
ClusterNetwork: []v1beta1.ClusterNetworkEntry{
{
CIDR: "fd01::/48",
HostPrefix: 64,
},
{
CIDR: "11.128.0.0/14",
HostPrefix: 24,
},
},
MachineNetwork: []v1beta1.MachineNetworkEntry{
{
CIDR: "fd2e:6f44:5dd8:5::/64",
},
{
CIDR: "192.168.254.0/24",
},
},
ServiceNetwork: []string{"172.30.0.0/16", "fd02::/112"},
}).WithImageSet(ZTPConfig.HubOCPXYVersion).WithAPIVip("192.168.254.5").
WithIngressVip("192.168.254.10").Create()
Expect(err).ToNot(HaveOccurred(), "error creating agentclusterinstall")
return agentClusterInstallBuilder
}
| Package Name: package operator_test | ||||
function | eco-gotests | da7c7a8e-7774-4cae-9ee8-e1063f71676f | createDualstackSpokeClusterResources | Imports Used: ['"fmt"', '"github.com/golang/glog"', '"github.com/openshift-kni/eco-goinfra/pkg/assisted"', '"github.com/openshift-kni/eco-goinfra/pkg/hive"', '"github.com/openshift-kni/eco-goinfra/pkg/namespace"', '"github.com/openshift-kni/eco-goinfra/pkg/schemes/assisted/api/hiveextension/v1beta1"', '"github.com/openshift-kni/eco-goinfra/pkg/secret"', '"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/internal/ztpparams"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/operator/tests/aci-dualstack-ipv4-first.go | func createDualstackSpokeClusterResources() *assisted.AgentClusterInstallBuilder {
By("Create namespace for the test")
if nsBuilder.Exists() {
glog.V(ztpparams.ZTPLogLevel).Infof("The namespace '%s' already exists",
nsBuilder.Object.Name)
} else {
// create the namespace
glog.V(ztpparams.ZTPLogLevel).Infof("Creating the namespace: %v", dualstackTestSpoke)
_, err := nsBuilder.Create()
Expect(err).ToNot(HaveOccurred(), "error creating namespace '%s' : %v ",
nsBuilder.Definition.Name, err)
}
By("Create pull-secret in the new namespace")
testSecret, err := secret.NewBuilder(
HubAPIClient,
fmt.Sprintf("%s-pull-secret", dualstackTestSpoke),
dualstackTestSpoke,
corev1.SecretTypeDockerConfigJson).WithData(ZTPConfig.HubPullSecret.Object.Data).Create()
Expect(err).ToNot(HaveOccurred(), "error occurred when creating pull-secret")
By("Create clusterdeployment in the new namespace")
testClusterDeployment, err := hive.NewABMClusterDeploymentBuilder(
HubAPIClient,
dualstackTestSpoke,
dualstackTestSpoke,
dualstackTestSpoke,
"assisted.test.com",
dualstackTestSpoke,
metav1.LabelSelector{
MatchLabels: map[string]string{
"dummy": "label",
},
}).WithPullSecret(testSecret.Definition.Name).Create()
Expect(err).ToNot(HaveOccurred(), "error occurred when creating clusterdeployment")
By("Create agentclusterinstall in the new namespace")
agentClusterInstallBuilder, err := assisted.NewAgentClusterInstallBuilder(
HubAPIClient,
dualstackTestSpoke,
dualstackTestSpoke,
testClusterDeployment.Object.Name,
3,
2,
v1beta1.Networking{
ClusterNetwork: []v1beta1.ClusterNetworkEntry{
{
CIDR: "fd01::/48",
HostPrefix: 64,
},
{
CIDR: "11.128.0.0/14",
HostPrefix: 24,
},
},
MachineNetwork: []v1beta1.MachineNetworkEntry{
{
CIDR: "fd2e:6f44:5dd8:5::/64",
},
{
CIDR: "192.168.254.0/24",
},
},
ServiceNetwork: []string{"172.30.0.0/16", "fd02::/112"},
}).WithImageSet(ZTPConfig.HubOCPXYVersion).WithAPIVip("192.168.254.5").
WithIngressVip("192.168.254.10").Create()
Expect(err).ToNot(HaveOccurred(), "error creating agentclusterinstall")
return agentClusterInstallBuilder
} | Global Variables: {'_': 'Describe(\n\t"DualstackIPv4First",\n\tOrdered,\n\tContinueOnFailure,\n\tLabel(tsparams.LabelDualstackIPv4FirstACI), func() {\n\t\tWhen("on MCE 2.0 and above", func() {\n\t\t\tBeforeAll(func() {\n\n\t\t\t\tBy("Check that the ClusterImageSet exists")\n\t\t\t\t_, err := hive.PullClusterImageSet(HubAPIClient, ZTPConfig.HubOCPXYVersion)\n\t\t\t\tif err != nil {\n\t\t\t\t\tSkip("The ClusterImageSet must exist")\n\t\t\t\t}\n\n\t\t\t\tnsBuilder = namespace.NewBuilder(HubAPIClient, dualstackTestSpoke)\n\n\t\t\t\ttsparams.ReporterNamespacesToDump[dualstackTestSpoke] = "dualstacktest namespace"\n\t\t\t})\n\t\t\tAfterEach(func() {\n\t\t\t\tBy("Delete the temporary namespace after test")\n\t\t\t\tif nsBuilder.Exists() {\n\t\t\t\t\terr := nsBuilder.DeleteAndWait(time.Second * 300)\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred(), "error deleting the temporary namespace after test")\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tIt("Validates that ACI with dualstack expects IPv4 first", reportxml.ID("44877"), func() {\n\t\t\t\tagentClusterInstallBuilder := createDualstackSpokeClusterResources()\n\n\t\t\t\tBy("Waiting for specific error message from SpecSynced condition")\n\t\t\t\tEventually(func() (string, error) {\n\t\t\t\t\tagentClusterInstallBuilder.Object, err = agentClusterInstallBuilder.Get()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn "", err\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, condition := range agentClusterInstallBuilder.Object.Status.Conditions {\n\t\t\t\t\t\tif condition.Type == v1beta1.ClusterSpecSyncedCondition {\n\t\t\t\t\t\t\treturn condition.Message, nil\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn "", nil\n\t\t\t\t}).WithTimeout(time.Minute*2).Should(\n\t\t\t\t\tEqual("The Spec could not be synced due to an input error: First machine network has to be IPv4 subnet"),\n\t\t\t\t\t"didn\'t get the expected message from SpecSynced condition")\n\t\t\t})\n\n\t\t})\n\t})'} | Package Name: operator_test | |||
test | eco-gotests | 74386da3-ccb3-4983-940f-1e7f9c07c13a | DualstackIPv4First | Imports Used: ['"time"', '"github.com/openshift-kni/eco-goinfra/pkg/hive"', '"github.com/openshift-kni/eco-goinfra/pkg/namespace"', '"github.com/openshift-kni/eco-goinfra/pkg/reportxml"', '"github.com/openshift-kni/eco-goinfra/pkg/schemes/assisted/api/hiveextension/v1beta1"', '"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/operator/internal/tsparams"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/operator/tests/aci-dualstack-ipv4-first.go | Describe(
"DualstackIPv4First",
Ordered,
ContinueOnFailure,
Label(tsparams.LabelDualstackIPv4FirstACI), func() {
When("on MCE 2.0 and above", func() {
BeforeAll(func() {
By("Check that the ClusterImageSet exists")
_, err := hive.PullClusterImageSet(HubAPIClient, ZTPConfig.HubOCPXYVersion)
if err != nil {
Skip("The ClusterImageSet must exist")
}
nsBuilder = namespace.NewBuilder(HubAPIClient, dualstackTestSpoke)
tsparams.ReporterNamespacesToDump[dualstackTestSpoke] = "dualstacktest namespace"
})
AfterEach(func() {
By("Delete the temporary namespace after test")
if nsBuilder.Exists() {
err := nsBuilder.DeleteAndWait(time.Second * 300)
Expect(err).ToNot(HaveOccurred(), "error deleting the temporary namespace after test")
}
})
It("Validates that ACI with dualstack expects IPv4 first", reportxml.ID("44877"), func() {
agentClusterInstallBuilder := createDualstackSpokeClusterResources()
By("Waiting for specific error message from SpecSynced condition")
Eventually(func() (string, error) {
agentClusterInstallBuilder.Object, err = agentClusterInstallBuilder.Get()
if err != nil {
return "", err
}
for _, condition := range agentClusterInstallBuilder.Object.Status.Conditions {
if condition.Type == v1beta1.ClusterSpecSyncedCondition {
return condition.Message, nil
}
}
return "", nil
}).WithTimeout(time.Minute*2).Should(
Equal("The Spec could not be synced due to an input error: First machine network has to be IPv4 subnet"),
"didn't get the expected message from SpecSynced condition")
})
})
}) | Global Variables: {'_': 'Describe(\n\t"DualstackIPv4First",\n\tOrdered,\n\tContinueOnFailure,\n\tLabel(tsparams.LabelDualstackIPv4FirstACI), func() {\n\t\tWhen("on MCE 2.0 and above", func() {\n\t\t\tBeforeAll(func() {\n\n\t\t\t\tBy("Check that the ClusterImageSet exists")\n\t\t\t\t_, err := hive.PullClusterImageSet(HubAPIClient, ZTPConfig.HubOCPXYVersion)\n\t\t\t\tif err != nil {\n\t\t\t\t\tSkip("The ClusterImageSet must exist")\n\t\t\t\t}\n\n\t\t\t\tnsBuilder = namespace.NewBuilder(HubAPIClient, dualstackTestSpoke)\n\n\t\t\t\ttsparams.ReporterNamespacesToDump[dualstackTestSpoke] = "dualstacktest namespace"\n\t\t\t})\n\t\t\tAfterEach(func() {\n\t\t\t\tBy("Delete the temporary namespace after test")\n\t\t\t\tif nsBuilder.Exists() {\n\t\t\t\t\terr := nsBuilder.DeleteAndWait(time.Second * 300)\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred(), "error deleting the temporary namespace after test")\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tIt("Validates that ACI with dualstack expects IPv4 first", reportxml.ID("44877"), func() {\n\t\t\t\tagentClusterInstallBuilder := createDualstackSpokeClusterResources()\n\n\t\t\t\tBy("Waiting for specific error message from SpecSynced condition")\n\t\t\t\tEventually(func() (string, error) {\n\t\t\t\t\tagentClusterInstallBuilder.Object, err = agentClusterInstallBuilder.Get()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn "", err\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, condition := range agentClusterInstallBuilder.Object.Status.Conditions {\n\t\t\t\t\t\tif condition.Type == v1beta1.ClusterSpecSyncedCondition {\n\t\t\t\t\t\t\treturn condition.Message, nil\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn "", nil\n\t\t\t\t}).WithTimeout(time.Minute*2).Should(\n\t\t\t\t\tEqual("The Spec could not be synced due to an input error: First machine network has to be IPv4 subnet"),\n\t\t\t\t\t"didn\'t get the expected message from SpecSynced condition")\n\t\t\t})\n\n\t\t})\n\t})'} | Package Name: operator_test | |||
test | eco-gotests | f7666a4c-59c5-48c5-9501-1ed42aae69a0 | on MCE 2.0 and above | Imports Used: ['"time"', '"github.com/openshift-kni/eco-goinfra/pkg/hive"', '"github.com/openshift-kni/eco-goinfra/pkg/namespace"', '"github.com/openshift-kni/eco-goinfra/pkg/reportxml"', '"github.com/openshift-kni/eco-goinfra/pkg/schemes/assisted/api/hiveextension/v1beta1"', '"github.com/openshift-kni/eco-gotests/tests/assisted/ztp/operator/internal/tsparams"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/operator/tests/aci-dualstack-ipv4-first.go | When("on MCE 2.0 and above", func() {
BeforeAll(func() {
By("Check that the ClusterImageSet exists")
_, err := hive.PullClusterImageSet(HubAPIClient, ZTPConfig.HubOCPXYVersion)
if err != nil {
Skip("The ClusterImageSet must exist")
}
nsBuilder = namespace.NewBuilder(HubAPIClient, dualstackTestSpoke)
tsparams.ReporterNamespacesToDump[dualstackTestSpoke] = "dualstacktest namespace"
})
AfterEach(func() {
By("Delete the temporary namespace after test")
if nsBuilder.Exists() {
err := nsBuilder.DeleteAndWait(time.Second * 300)
Expect(err).ToNot(HaveOccurred(), "error deleting the temporary namespace after test")
}
})
It("Validates that ACI with dualstack expects IPv4 first", reportxml.ID("44877"), func() {
agentClusterInstallBuilder := createDualstackSpokeClusterResources()
By("Waiting for specific error message from SpecSynced condition")
Eventually(func() (string, error) {
agentClusterInstallBuilder.Object, err = agentClusterInstallBuilder.Get()
if err != nil {
return "", err
}
for _, condition := range agentClusterInstallBuilder.Object.Status.Conditions {
if condition.Type == v1beta1.ClusterSpecSyncedCondition {
return condition.Message, nil
}
}
return "", nil
}).WithTimeout(time.Minute*2).Should(
Equal("The Spec could not be synced due to an input error: First machine network has to be IPv4 subnet"),
"didn't get the expected message from SpecSynced condition")
})
}) | Global Variables: {'_': 'Describe(\n\t"DualstackIPv4First",\n\tOrdered,\n\tContinueOnFailure,\n\tLabel(tsparams.LabelDualstackIPv4FirstACI), func() {\n\t\tWhen("on MCE 2.0 and above", func() {\n\t\t\tBeforeAll(func() {\n\n\t\t\t\tBy("Check that the ClusterImageSet exists")\n\t\t\t\t_, err := hive.PullClusterImageSet(HubAPIClient, ZTPConfig.HubOCPXYVersion)\n\t\t\t\tif err != nil {\n\t\t\t\t\tSkip("The ClusterImageSet must exist")\n\t\t\t\t}\n\n\t\t\t\tnsBuilder = namespace.NewBuilder(HubAPIClient, dualstackTestSpoke)\n\n\t\t\t\ttsparams.ReporterNamespacesToDump[dualstackTestSpoke] = "dualstacktest namespace"\n\t\t\t})\n\t\t\tAfterEach(func() {\n\t\t\t\tBy("Delete the temporary namespace after test")\n\t\t\t\tif nsBuilder.Exists() {\n\t\t\t\t\terr := nsBuilder.DeleteAndWait(time.Second * 300)\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred(), "error deleting the temporary namespace after test")\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tIt("Validates that ACI with dualstack expects IPv4 first", reportxml.ID("44877"), func() {\n\t\t\t\tagentClusterInstallBuilder := createDualstackSpokeClusterResources()\n\n\t\t\t\tBy("Waiting for specific error message from SpecSynced condition")\n\t\t\t\tEventually(func() (string, error) {\n\t\t\t\t\tagentClusterInstallBuilder.Object, err = agentClusterInstallBuilder.Get()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn "", err\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, condition := range agentClusterInstallBuilder.Object.Status.Conditions {\n\t\t\t\t\t\tif condition.Type == v1beta1.ClusterSpecSyncedCondition {\n\t\t\t\t\t\t\treturn condition.Message, nil\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn "", nil\n\t\t\t\t}).WithTimeout(time.Minute*2).Should(\n\t\t\t\t\tEqual("The Spec could not be synced due to an input error: First machine network has to be IPv4 subnet"),\n\t\t\t\t\t"didn\'t get the expected message from SpecSynced condition")\n\t\t\t})\n\n\t\t})\n\t})'} | Package Name: operator_test | |||
test case | eco-gotests | 759dbb67-03f6-4453-a0a5-0c3e5e3549b9 | Validates that ACI with dualstack expects IPv4 first | Imports Used: ['"time"', '"github.com/openshift-kni/eco-goinfra/pkg/reportxml"', '"github.com/openshift-kni/eco-goinfra/pkg/schemes/assisted/api/hiveextension/v1beta1"'] | File Location: github.com/eco-gotests/tests/assisted/ztp/operator/tests/aci-dualstack-ipv4-first.go | It("Validates that ACI with dualstack expects IPv4 first", reportxml.ID("44877"), func() {
agentClusterInstallBuilder := createDualstackSpokeClusterResources()
By("Waiting for specific error message from SpecSynced condition")
Eventually(func() (string, error) {
agentClusterInstallBuilder.Object, err = agentClusterInstallBuilder.Get()
if err != nil {
return "", err
}
for _, condition := range agentClusterInstallBuilder.Object.Status.Conditions {
if condition.Type == v1beta1.ClusterSpecSyncedCondition {
return condition.Message, nil
}
}
return "", nil
}).WithTimeout(time.Minute*2).Should(
Equal("The Spec could not be synced due to an input error: First machine network has to be IPv4 subnet"),
"didn't get the expected message from SpecSynced condition")
}) |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
- Downloads last month
- 138