parent
29842b920a
commit
fcf182a623
@ -0,0 +1,25 @@ |
||||
# BLOCK STORAGE COLLECTOR |
||||
|
||||
OpenStack Block Storage collector written in go |
||||
|
||||
## How to build |
||||
|
||||
The building of the service is carried by a multistage docker build, we build everything in an image containing everything needed to build Golang applications and then the executable is moved to a new image that contains the bare minimum starting from the scratch image. |
||||
|
||||
Within the folder build at the root of the repo there's a script call start.sh, it's invokation admits "Dev" as parameter. Running the script with the parameter will use the local version in the repository as the base for the building of the service's docker image, however doing it without providing it will make the building of the service taking everything from sources. |
||||
|
||||
``` |
||||
./start.sh [Dev] |
||||
``` |
||||
|
||||
The initial branch used when building from sources is "master", it can be changed with a few other parameters by editing the script. |
||||
|
||||
Using the [Dev] optional argument will take the code present in the repo at the moment of invokation. |
||||
|
||||
## How to run |
||||
|
||||
Within the folder run at the root of the repo there's a docker-compose sample file and a config.toml sample file. Once configured with appropriate data to start the service just issue the following command: |
||||
|
||||
``` |
||||
docker-compose up -d |
||||
``` |
@ -0,0 +1,445 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"os" |
||||
"strconv" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/gophercloud/gophercloud" |
||||
"github.com/gophercloud/gophercloud/openstack" |
||||
"github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes" |
||||
"github.com/gophercloud/gophercloud/openstack/identity/v3/projects" |
||||
"github.com/gophercloud/gophercloud/pagination" |
||||
"github.com/prometheus/client_golang/prometheus" |
||||
datamodels "gitlab.com/cyclops-utilities/datamodels" |
||||
eeEvent "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/eventsengine/client/event_management" |
||||
eeModels "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/eventsengine/models" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
var ( |
||||
collector = "Blockstorage" |
||||
objects = "volume" |
||||
remotelist []*eeModels.MinimalState |
||||
collectionStart int64 |
||||
) |
||||
|
||||
// collect handles the process of retrieving the information from the system.
|
||||
func collect() { |
||||
|
||||
l.Trace.Printf("[COLLECTION] The collection process has been started.\n") |
||||
|
||||
collectionStart = time.Now().UnixNano() |
||||
|
||||
metricTime.With(prometheus.Labels{"type": "Collection Start Time"}).Set(float64(collectionStart)) |
||||
|
||||
// Here comes the logic to retrieve the information from the system
|
||||
opts := gophercloud.AuthOptions{ |
||||
DomainName: cfg.OpenStack.Domain, |
||||
IdentityEndpoint: cfg.OpenStack.Keystone, |
||||
Password: cfg.OpenStack.Password, |
||||
Username: cfg.OpenStack.User, |
||||
} |
||||
|
||||
if len(cfg.OpenStack.Project) > 0 { |
||||
|
||||
opts.TenantName = cfg.OpenStack.Project |
||||
|
||||
} |
||||
|
||||
provider, e := openstack.AuthenticatedClient(opts) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Error.Printf("[COLLECTION] Error authenticating against OpenStack. Error: %v\n", e) |
||||
|
||||
os.Exit(1) |
||||
|
||||
} |
||||
|
||||
listOpts := projects.ListOpts{ |
||||
Enabled: gophercloud.Enabled, |
||||
} |
||||
|
||||
authClient, e := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Error.Printf("[COLLECTION] Error creating the collector client. Error: %v\n", e) |
||||
|
||||
os.Exit(1) |
||||
|
||||
} |
||||
|
||||
allPages, e := projects.List(authClient, listOpts).AllPages() |
||||
|
||||
if e != nil { |
||||
|
||||
l.Error.Printf("[COLLECTION] Error listing all the pages. Error: %v\n", e) |
||||
|
||||
os.Exit(1) |
||||
|
||||
} |
||||
|
||||
allProjects, e := projects.ExtractProjects(allPages) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Error.Printf("[COLLECTION] Error listing all the projects. Error: %v\n", e) |
||||
|
||||
os.Exit(1) |
||||
|
||||
} |
||||
|
||||
l.Trace.Printf("[COLLECTION] Querying events engine service for list of known and not terminated volumes") |
||||
|
||||
resourceType := "blockstorage" |
||||
eeParams := eeEvent.NewListStatesParams().WithResource(&resourceType).WithRegion(&cfg.OpenStack.Region) |
||||
|
||||
ctx, _ := context.WithTimeout(context.Background(), 300*time.Second) |
||||
r, e := reportClient.EventManagement.ListStates(ctx, eeParams) |
||||
|
||||
// Clears the remotelist between runs
|
||||
remotelist = nil |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[COLLECTION] Something went wrong while retrieving the usage from EventsEngine, check with the administrator. Error: %v.\n", e) |
||||
|
||||
} else { |
||||
|
||||
remotelist = r.Payload |
||||
|
||||
} |
||||
|
||||
resourceType = "blockstorage_ssd" |
||||
eeParams = eeEvent.NewListStatesParams().WithResource(&resourceType).WithRegion(&cfg.OpenStack.Region) |
||||
|
||||
ctx, _ = context.WithTimeout(context.Background(), 300*time.Second) |
||||
r, e = reportClient.EventManagement.ListStates(ctx, eeParams) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[COLLECTION] Something went wrong while retrieving the usage(ssd) from EventsEngine, check with the administrator. Error: %v.\n", e) |
||||
|
||||
} else { |
||||
|
||||
remotelist = append(remotelist, r.Payload...) |
||||
|
||||
} |
||||
|
||||
eeCount := len(remotelist) |
||||
apiCount := 0 |
||||
ssdCount := 0 |
||||
|
||||
l.Trace.Printf("[COLLECTION] (BEFORE) Existing count of blockstorage elements at remote [ %v ].\n", len(remotelist)) |
||||
|
||||
allProjectsLoop: |
||||
for _, project := range allProjects { |
||||
|
||||
l.Trace.Printf("[COLLECTION] Found project [ %v ] with ID [ %v ]. Proceeding to get list of volumes.\n", project.Name, project.ID) |
||||
|
||||
volumeClient, e := openstack.NewBlockStorageV3(provider, gophercloud.EndpointOpts{ |
||||
Region: cfg.OpenStack.Region, |
||||
}) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Error.Printf("[COLLECTION] Error creating block storage client. Error: %v\n", e) |
||||
|
||||
continue |
||||
|
||||
} |
||||
|
||||
// Filter by project id:
|
||||
for _, filter := range cfg.ProjectFilters { |
||||
|
||||
if strings.Contains(project.ID, filter) && filter != "" { |
||||
|
||||
l.Debug.Printf("[COLLECTION] The Project [ %v ] matches filter [ %v ] and won't be further processed.", project.ID, filter) |
||||
|
||||
continue allProjectsLoop |
||||
|
||||
} |
||||
|
||||
} |
||||
|
||||
// Filter by project name:
|
||||
for _, filter := range cfg.NameFilters { |
||||
|
||||
if strings.Contains(strings.ToLower(project.Name), strings.ToLower(filter)) && filter != "" { |
||||
|
||||
l.Debug.Printf("[COLLECTION] The Project [ %v ] matches filter [ %v ] and won't be further processed.", project.Name, filter) |
||||
|
||||
continue allProjectsLoop |
||||
|
||||
} |
||||
|
||||
} |
||||
|
||||
opts := volumes.ListOpts{ |
||||
AllTenants: true, |
||||
TenantID: project.ID, |
||||
} |
||||
|
||||
pager := volumes.List(volumeClient, opts) |
||||
|
||||
e = pager.EachPage(func(page pagination.Page) (bool, error) { |
||||
|
||||
vList, e := volumes.ExtractVolumes(page) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Error.Printf("[COLLECTION] Error processing the lists of active resources. Error: %v\n", e) |
||||
|
||||
return false, e |
||||
|
||||
} |
||||
|
||||
apiCount += len(vList) |
||||
|
||||
for _, v := range vList { |
||||
|
||||
// "v" will be a volumes.Volume
|
||||
l.Trace.Printf("[COLLECTION] Found volume [ %v ] with ID [ %v ]. Parameters: Status [ %v ], Type [ %v ], Size [ %v ] Zone [ %v ] and Replication [ %v ].\n", |
||||
strings.TrimSpace(v.Name), v.ID, v.Status, v.VolumeType, v.Size, v.AvailabilityZone, v.ReplicationStatus) |
||||
|
||||
// Here comes the transformation of the information retrieved into either
|
||||
md := make(datamodels.JSONdb) |
||||
md["size"] = strconv.Itoa(v.Size) |
||||
md["availabilityzone"] = v.AvailabilityZone |
||||
md["replication"] = v.ReplicationStatus |
||||
md["volumetype"] = v.VolumeType |
||||
md["region"] = cfg.OpenStack.Region |
||||
|
||||
evTime := int64(time.Now().Unix()) |
||||
evLast := getStatus(v.Status) |
||||
|
||||
objectType := "blockstorage" |
||||
|
||||
if strings.Contains(v.VolumeType, "ssd") { |
||||
|
||||
objectType = "blockstorage_ssd" |
||||
ssdCount++ |
||||
|
||||
} |
||||
|
||||
// events or usage reports to be sent.
|
||||
event := eeModels.Event{ |
||||
Account: project.ID, |
||||
EventTime: &evTime, |
||||
LastEvent: &evLast, |
||||
MetaData: md, |
||||
Region: cfg.OpenStack.Region, |
||||
ResourceID: v.ID, |
||||
ResourceName: strings.TrimSpace(v.Name), |
||||
ResourceType: objectType, |
||||
} |
||||
|
||||
report(event) |
||||
|
||||
//if this object exists in remote list then lets remove it
|
||||
for i, object := range remotelist { |
||||
|
||||
metadata := object.MetaData |
||||
|
||||
if strings.Compare(object.Account, project.ID) == 0 && |
||||
strings.Compare(object.ResourceID, v.ID) == 0 && |
||||
strings.Compare(object.ResourceName, v.Name) == 0 && |
||||
strings.Compare(metadata["size"].(string), strconv.Itoa(v.Size)) == 0 { |
||||
|
||||
l.Debug.Printf("[COLLECTION] Event send cleaned from the processing list..\n") |
||||
|
||||
remotelist = append(remotelist[:i], remotelist[i+1:]...) |
||||
|
||||
break |
||||
|
||||
} |
||||
|
||||
} |
||||
|
||||
} |
||||
|
||||
return true, nil |
||||
|
||||
}) |
||||
|
||||
} |
||||
|
||||
l.Trace.Printf("[COLLECTION] (AFTER) Remaining count of blockstorage elements at remote which were left unprocessed [ %v ].\n", len(remotelist)) |
||||
|
||||
for _, object := range remotelist { |
||||
|
||||
l.Debug.Printf("[COLLECTION] Sending termination for zombie data in the system..\n") |
||||
|
||||
evTime := int64(time.Now().Unix()) |
||||
evLast := getStatus("terminated") |
||||
|
||||
objectType := "blockstorage" |
||||
|
||||
if strings.Contains(object.MetaData["volumetype"].(string), "ssd") { |
||||
|
||||
objectType = "blockstorage_ssd" |
||||
|
||||
} |
||||
|
||||
// events or usage reports to be sent.
|
||||
event := eeModels.Event{ |
||||
Account: object.Account, |
||||
EventTime: &evTime, |
||||
LastEvent: &evLast, |
||||
MetaData: object.MetaData, |
||||
Region: cfg.OpenStack.Region, |
||||
ResourceID: object.ResourceID, |
||||
ResourceName: object.ResourceName, |
||||
ResourceType: objectType, |
||||
} |
||||
|
||||
report(event) |
||||
|
||||
} |
||||
|
||||
metricCount.With(prometheus.Labels{"type": "Total Storage Blocks reported by OS API"}).Set(float64(apiCount)) |
||||
|
||||
metricCount.With(prometheus.Labels{"type": "Total Storage Blocks (only SSD) reported by OS API"}).Set(float64(ssdCount)) |
||||
|
||||
metricCount.With(prometheus.Labels{"type": "Total Storage Blocks from EventEngine"}).Set(float64(eeCount)) |
||||
|
||||
metricCount.With(prometheus.Labels{"type": "Total Storage Blocks forcefully TERMINATED"}).Set(float64(len(remotelist))) |
||||
|
||||
metricTime.With(prometheus.Labels{"type": "Collection Processing Time"}).Set(float64(time.Now().UnixNano()-collectionStart) / float64(time.Millisecond)) |
||||
|
||||
l.Warning.Printf("[COLLECTION] Completed.\n - OS Report: %v\n - EE Report: %v\n - Forced Termination: %v\n - Processing Time: %v[ms]\n", apiCount, eeCount, len(remotelist), float64(time.Now().UnixNano()-collectionStart)/float64(time.Millisecond)) |
||||
|
||||
l.Trace.Printf("[COLLECTION] The collection process has been finished.\n") |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
// getStatus job is to normalize the event state returned by the collectors.
|
||||
// Parameters:
|
||||
// - state: string returned by the system.
|
||||
// Returns:
|
||||
// - status: normalized state to be returned.
|
||||
func getStatus(state string) (status string) { |
||||
|
||||
switch strings.ToUpper(state) { |
||||
|
||||
case "ACTIVE": |
||||
|
||||
status = "active" |
||||
|
||||
case "ATTACHING": |
||||
|
||||
status = "active" |
||||
|
||||
case "AVAILABLE": |
||||
|
||||
status = "active" |
||||
|
||||
case "BUILD": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "CREATING": |
||||
|
||||
status = "active" |
||||
|
||||
case "DELETED": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "DELETING": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "DETACHING": |
||||
|
||||
status = "active" |
||||
|
||||
case "DOWN": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "ERROR": |
||||
|
||||
status = "error" |
||||
|
||||
case "ERROR_DELETING": |
||||
|
||||
status = "error" |
||||
|
||||
case "EXTENDING": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "HARD_DELETED": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "IN-USE": |
||||
|
||||
status = "active" |
||||
|
||||
case "MAINTENANCE": |
||||
|
||||
status = "active" |
||||
|
||||
case "PAUSED": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "RESCUED": |
||||
|
||||
status = "active" |
||||
|
||||
case "RESIZE": |
||||
|
||||
status = "active" |
||||
|
||||
case "RESIZED": |
||||
|
||||
status = "active" |
||||
|
||||
case "RESERVED": |
||||
|
||||
status = "active" |
||||
|
||||
case "RETYPING": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "SHUTOFF": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "SOFT_DELETED": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "STOPPED": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "SUSPENDED": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "TERMINATED": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "VERIFY_RESIZE": |
||||
|
||||
status = "active" |
||||
|
||||
} |
||||
|
||||
l.Trace.Printf("[REPORT] State received from the system [ %v ] normalized to [ %v ]", state, status) |
||||
|
||||
return status |
||||
|
||||
} |
@ -0,0 +1,246 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"strings" |
||||
|
||||
"github.com/spf13/viper" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
// The following structs are part of the configuration struct which
|
||||
// acts as the main reference for configuration parameters in the system.
|
||||
type apiKey struct { |
||||
Enabled bool |
||||
Key string |
||||
Place string |
||||
Token string |
||||
} |
||||
|
||||
type configuration struct { |
||||
APIKey apiKey |
||||
General generalConfig |
||||
Heappe heappeConfig |
||||
Kafka kafkaConfig |
||||
Keycloak keycloakConfig |
||||
Lieutenant lieutenantConfig |
||||
OpenStack openStackConfig |
||||
Prometheus prometheusConfig |
||||
RGW rgwConfig |
||||
NameFilters []string |
||||
ProjectFilters []string |
||||
Services map[string]string |
||||
} |
||||
|
||||
type generalConfig struct { |
||||
InsecureSkipVerify bool |
||||
LogFile string |
||||
LogLevel string |
||||
LogToConsole bool |
||||
ObjectsPeriodicity int |
||||
Periodicity int |
||||
PrometheusPeriodicity int |
||||
} |
||||
|
||||
type heappeConfig struct { |
||||
Username string |
||||
Password string |
||||
GroupResourceUsageReportURI string |
||||
AuthenticateUserPasswordURI string |
||||
} |
||||
|
||||
type kafkaConfig struct { |
||||
Brokers []string |
||||
MaxBytes int |
||||
MinBytes int |
||||
Offset int64 |
||||
Partition int |
||||
TLSEnabled bool |
||||
TopicUDR string |
||||
TopicEEngine string |
||||
} |
||||
|
||||
type keycloakConfig struct { |
||||
ClientID string `json:"client_id"` |
||||
ClientSecret string `json:"client_secret"` |
||||
Enabled bool `json:"enabled"` |
||||
Host string `json:"host"` |
||||
Port int `json:"port"` |
||||
Realm string `json:"realm"` |
||||
RedirectURL string `json:"redirect_url"` |
||||
UseHTTP bool `json:"use_http"` |
||||
} |
||||
|
||||
type lieutenantConfig struct { |
||||
Host string |
||||
Token string |
||||
} |
||||
|
||||
type openStackConfig struct { |
||||
Domain string |
||||
Keystone string |
||||
Password string |
||||
Project string |
||||
Region string |
||||
User string |
||||
} |
||||
|
||||
type prometheusConfig struct { |
||||
Host string |
||||
MetricsExport bool |
||||
MetricsPort string |
||||
MetricsRoute string |
||||
} |
||||
|
||||
type rgwConfig struct { |
||||
AccessKeyID string |
||||
AdminPath string |
||||
Region string |
||||
SecretAccessKey string |
||||
ServerURL string |
||||
} |
||||
|
||||
// dumpConfig 's job is to dumps the configuration in JSON format to the log
|
||||
// system. It makes use of the masking function to keep some secrecy in the log.
|
||||
// Parameters:
|
||||
// - c: configuration type containing the config present in the system.
|
||||
func dumpConfig(c configuration) { |
||||
cfgCopy := c |
||||
|
||||
// deal with configuration params that should be masked
|
||||
cfgCopy.APIKey.Token = masked(c.APIKey.Token, 4) |
||||
cfgCopy.Heappe.Username = masked(c.Heappe.Username, 4) |
||||
cfgCopy.Heappe.Password = masked(c.Heappe.Password, 4) |
||||
cfgCopy.Keycloak.ClientSecret = masked(c.Keycloak.ClientSecret, 4) |
||||
cfgCopy.Lieutenant.Token = masked(c.Lieutenant.Token, 4) |
||||
cfgCopy.OpenStack.Password = masked(c.OpenStack.Password, 4) |
||||
cfgCopy.RGW.AccessKeyID = masked(c.RGW.AccessKeyID, 4) |
||||
cfgCopy.RGW.SecretAccessKey = masked(c.RGW.SecretAccessKey, 4) |
||||
|
||||
// mmrshalindent creates a string containing newlines; each line starts with
|
||||
// two spaces and two spaces are added for each indent...
|
||||
configJSON, _ := json.MarshalIndent(cfgCopy, " ", " ") |
||||
|
||||
l.Info.Printf("[CONFIG] Configuration settings:\n") |
||||
l.Info.Printf("%v\n", string(configJSON)) |
||||
|
||||
} |
||||
|
||||
// masked 's job is to return asterisks in place of the characters in a
|
||||
// string with the exception of the last indicated.
|
||||
// Parameters:
|
||||
// - s: string to be masked
|
||||
// - unmaskedChars: int with the amount (counting from the end of the string) of
|
||||
// characters to keep unmasked.
|
||||
// Returns:
|
||||
// - returnString: the s string passed as parameter masked.
|
||||
func masked(s string, unmaskedChars int) (returnString string) { |
||||
|
||||
if len(s) <= unmaskedChars { |
||||
|
||||
returnString = s |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
asteriskString := strings.Repeat("*", (len(s) - unmaskedChars)) |
||||
returnString = asteriskString + string(s[len(s)-unmaskedChars:]) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
// parseConfig handles the filling of the config struct with the data Viper gets
|
||||
// from the configuration file.
|
||||
// Returns:
|
||||
// - c: the configuration struct filled with the relevant parsed configuration.
|
||||
func parseConfig() (c configuration) { |
||||
|
||||
l.Trace.Printf("[CONFIG] Retrieving configuration.\n") |
||||
|
||||
c = configuration{ |
||||
|
||||
APIKey: apiKey{ |
||||
Enabled: viper.GetBool("apikey.enabled"), |
||||
Key: viper.GetString("apikey.key"), |
||||
Place: viper.GetString("apikey.place"), |
||||
Token: viper.GetString("apikey.token"), |
||||
}, |
||||
|
||||
General: generalConfig{ |
||||
InsecureSkipVerify: viper.GetBool("general.insecureskipverify"), |
||||
LogFile: viper.GetString("general.logfile"), |
||||
LogLevel: viper.GetString("general.loglevel"), |
||||
LogToConsole: viper.GetBool("general.logtoconsole"), |
||||
ObjectsPeriodicity: viper.GetInt("general.objectsperiodicity"), |
||||
Periodicity: viper.GetInt("general.periodicity"), |
||||
PrometheusPeriodicity: viper.GetInt("general.prometheusperiodicity"), |
||||
}, |
||||
|
||||
Heappe: heappeConfig{ |
||||
Username: viper.GetString("heappe.username"), |
||||
Password: viper.GetString("heappe.password"), |
||||
GroupResourceUsageReportURI: viper.GetString("heappe.groupResourceUsageReportUri"), |
||||
AuthenticateUserPasswordURI: viper.GetString("heappe.authenticateUserPasswordUri"), |
||||
}, |
||||
|
||||
Kafka: kafkaConfig{ |
||||
Brokers: viper.GetStringSlice("kafka.brokers"), |
||||
MaxBytes: viper.GetInt("kafka.sizemax"), |
||||
MinBytes: viper.GetInt("kafka.sizemin"), |
||||
Offset: viper.GetInt64("kafka.offset"), |
||||
Partition: viper.GetInt("kafka.partition"), |
||||
TLSEnabled: viper.GetBool("kafka.tlsenabled"), |
||||
TopicUDR: viper.GetString("kafka.topicudr"), |
||||
TopicEEngine: viper.GetString("kafka.topiceengine"), |
||||
}, |
||||
|
||||
Keycloak: keycloakConfig{ |
||||
ClientID: viper.GetString("keycloak.clientid"), |
||||
ClientSecret: viper.GetString("keycloak.clientsecret"), |
||||
Enabled: viper.GetBool("keycloak.enabled"), |
||||
Host: viper.GetString("keycloak.host"), |
||||
Port: viper.GetInt("keycloak.port"), |
||||
Realm: viper.GetString("keycloak.realm"), |
||||
RedirectURL: viper.GetString("keycloak.redirecturl"), |
||||
UseHTTP: viper.GetBool("keycloak.usehttp"), |
||||
}, |
||||
|
||||
Lieutenant: lieutenantConfig{ |
||||
Host: viper.GetString("lieutenant.host"), |
||||
Token: viper.GetString("lieutenant.token"), |
||||
}, |
||||
|
||||
OpenStack: openStackConfig{ |
||||
Domain: viper.GetString("openstack.domain"), |
||||
Keystone: viper.GetString("openstack.keystone"), |
||||
Password: viper.GetString("openstack.password"), |
||||
Project: viper.GetString("openstack.project"), |
||||
Region: viper.GetString("openstack.region"), |
||||
User: viper.GetString("openstack.user"), |
||||
}, |
||||
|
||||
Prometheus: prometheusConfig{ |
||||
Host: viper.GetString("prometheus.host"), |
||||
MetricsExport: viper.GetBool("prometheus.metricsexport"), |
||||
MetricsPort: viper.GetString("prometheus.metricsport"), |
||||
MetricsRoute: viper.GetString("prometheus.metricsroute"), |
||||
}, |
||||
|
||||
RGW: rgwConfig{ |
||||
AccessKeyID: viper.GetString("rgw.accesskey"), |
||||
AdminPath: viper.GetString("rgw.adminpath"), |
||||
Region: viper.GetString("rgw.region"), |
||||
SecretAccessKey: viper.GetString("rgw.secretaccesskey"), |
||||
ServerURL: viper.GetString("rgw.serverurl"), |
||||
}, |
||||
|
||||
NameFilters: viper.GetStringSlice("events.namefilters"), |
||||
ProjectFilters: viper.GetStringSlice("events.projectfilters"), |
||||
Services: viper.GetStringMapString("services"), |
||||
} |
||||
|
||||
return |
||||
|
||||
} |
@ -0,0 +1,137 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"crypto/tls" |
||||
"encoding/json" |
||||
"strconv" |
||||
"time" |
||||
|
||||
"github.com/prometheus/client_golang/prometheus" |
||||
"github.com/segmentio/kafka-go" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
type kafkaHandlerConf struct { |
||||
out []kafkaPackage |
||||
} |
||||
|
||||
type kafkaPackage struct { |
||||
topic string |
||||
partition int |
||||
channel chan interface{} |
||||
} |
||||
|
||||
// kafkaHandler job is to check the config that it receives and initialize the
|
||||
// go rutines necesaries to satisfay the configuration it receives.
|
||||
// Paramenters:
|
||||
// - kH: kafkaHandlerConf struct with the specific configuration used by the
|
||||
// service.
|
||||
func kafkaHandler(kH kafkaHandlerConf) { |
||||
|
||||
l.Trace.Printf("[KAFKA] Initializing the receivers/senders according to the provided configuration.\n") |
||||
|
||||
if kH.out != nil { |
||||
|
||||
for _, p := range kH.out { |
||||
|
||||
go kafkaSender(p.topic, p.partition, p.channel) |
||||
|
||||
} |
||||
|
||||
} |
||||
} |
||||
|
||||
// kafkaSender is the abstracted interface handling the sending of data through
|
||||
// kafka topics.
|
||||
// Paramenters:
|
||||
// - t: string containing the kafka-topic in use.
|
||||
// - p: int containing the kafka-topic partition.
|
||||
// - c: interface{} channel to receive the data that will be marshalled into
|
||||
// JSON and then transmitted via kafka.
|
||||
func kafkaSender(t string, p int, c chan interface{}) { |
||||
|
||||
l.Trace.Printf("[KAFKA] Initializing kafka sender for topic: %v.\n", t) |
||||
|
||||
conf := kafka.WriterConfig{ |
||||
Brokers: cfg.Kafka.Brokers, |
||||
Topic: t, |
||||
Balancer: &kafka.LeastBytes{}, |
||||
} |
||||
|
||||
if cfg.Kafka.TLSEnabled { |
||||
|
||||
dialer := &kafka.Dialer{ |
||||
Timeout: 10 * time.Second, |
||||
DualStack: true, |
||||
TLS: &tls.Config{ |
||||
MinVersion: tls.VersionTLS12, |
||||
CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, |
||||
PreferServerCipherSuites: true, |
||||
InsecureSkipVerify: cfg.General.InsecureSkipVerify, |
||||
}, |
||||
} |
||||
|
||||
conf.Dialer = dialer |
||||
|
||||
} |
||||
|
||||
w := kafka.NewWriter(conf) |
||||
defer w.Close() |
||||
|
||||
for { |
||||
|
||||
v, ok := <-c |
||||
|
||||
if !ok { |
||||
|
||||
metricReporting.With(prometheus.Labels{"topic": t, "state": "FAIL", "reason": "Go Channel Problems"}).Inc() |
||||
|
||||
break |
||||
|
||||
} |
||||
|
||||
go func() { |
||||
|
||||
m, e := json.Marshal(&v) |
||||
|
||||
if e == nil { |
||||
|
||||
l.Info.Printf("[KAFKA] Object received through the channel. Starting its processing.\n") |
||||
|
||||
err := w.WriteMessages(context.Background(), |
||||
kafka.Message{ |
||||
Key: []byte(t + "-" + strconv.Itoa(p)), |
||||
Value: m, |
||||
}, |
||||
) |
||||
|
||||
if err != nil { |
||||
|
||||
l.Warning.Printf("[KAFKA] There was a problem when sending the record through the stream. Error: %v\n", err) |
||||
|
||||
metricReporting.With(prometheus.Labels{"topic": t, "state": "FAIL", "reason": "Kafka Stream Problems"}).Inc() |
||||
|
||||
} else { |
||||
|
||||
l.Info.Printf("[KAFKA] Object added to the stream succesfully. Topic: %v.\n", t) |
||||
|
||||
metricReporting.With(prometheus.Labels{"topic": t, "state": "OK", "reason": "Object sent"}).Inc() |
||||
|
||||
} |
||||
|
||||
} else { |
||||
|
||||
l.Warning.Printf("[KAFKA] The information to be sent into the stream cannot be marshalled, please check with the administrator. Error: %v\n", e) |
||||
|
||||
metricReporting.With(prometheus.Labels{"topic": t, "state": "FAIL", "reason": "JSON Marshalling"}).Inc() |
||||
|
||||
} |
||||
|
||||
return |
||||
|
||||
}() |
||||
|
||||
} |
||||
|
||||
} |
@ -0,0 +1,188 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"flag" |
||||
"fmt" |
||||
"net/url" |
||||
"os" |
||||
"reflect" |
||||
"time" |
||||
|
||||
httptransport "github.com/go-openapi/runtime/client" |
||||
"github.com/spf13/viper" |
||||
cusClient "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/customerdb/client" |
||||
eeClient "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/eventsengine/client" |
||||
eeModels "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/eventsengine/models" |
||||
udrModels "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/udr/models" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
var ( |
||||
version string |
||||
cfg configuration |
||||
pipeU chan interface{} |
||||
pipeE chan interface{} |
||||
reportClient *eeClient.EventEngineManagementAPI |
||||
zombiesClient *cusClient.CustomerDatabaseManagement |
||||
) |
||||
|
||||
// kafkaStart handles the initialization of the kafka service.
|
||||
// This is a sample function with the most basic usage of the kafka service, it
|
||||
// should be redefined to match the needs of the service.
|
||||
// Returns:
|
||||
// - ch: a interface{} channel to be able to send things through the kafka topic
|
||||
// generated.
|
||||
func kafkaStart() (chUDR, chEvents chan interface{}) { |
||||
|
||||
l.Trace.Printf("[MAIN] Intializing Kafka\n") |
||||
|
||||
chUDR = make(chan interface{}, 1000) |
||||
chEvents = make(chan interface{}, 1000) |
||||
|
||||
handler := kafkaHandlerConf{ |
||||
out: []kafkaPackage{ |
||||
{ |
||||
topic: cfg.Kafka.TopicUDR, |
||||
channel: chUDR, |
||||
}, |
||||
{ |
||||
topic: cfg.Kafka.TopicEEngine, |
||||
channel: chEvents, |
||||
}, |
||||
}, |
||||
} |
||||
|
||||
kafkaHandler(handler) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
// report handles the process of sending the event or usage to the respective
|
||||
// service.
|
||||
// Parameters:
|
||||
// - object: an interface{} reference with the event/usage to be sent.
|
||||
func report(object interface{}) { |
||||
|
||||
l.Trace.Printf("[REPORT] The reporting process has been started.\n") |
||||
|
||||
if reflect.TypeOf(object) == reflect.TypeOf(udrModels.Usage{}) { |
||||
|
||||
l.Trace.Printf("[REPORT] UDR Object detected. Sending through kafka.\n") |
||||
|
||||
pipeU <- object |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
if reflect.TypeOf(object) == reflect.TypeOf(eeModels.Event{}) { |
||||
|
||||
l.Trace.Printf("[REPORT] Event Object detected. Sending through kafka.\n") |
||||
|
||||
pipeE <- object |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
fail := "the provided object doesn't belong to UDR or EE models" |
||||
|
||||
l.Warning.Printf("[REPORT] Something went wrong while processing the object, check with the administrator. Error: %v.\n", fail) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
func init() { |
||||
|
||||
confFile := flag.String("conf", "./config", "configuration file path (without toml extension)") |
||||
|
||||
flag.Parse() |
||||
|
||||
//placeholder code as the default value will ensure this situation will never arise
|
||||
if len(*confFile) == 0 { |
||||
|
||||
fmt.Printf("Usage: Collector-TYPE -conf=/path/to/configuration/file\n") |
||||
|
||||
os.Exit(0) |
||||
|
||||
} |
||||
|
||||
// err := gcfg.ReadFileInto(&cfg, *confFile)
|
||||
viper.SetConfigName(*confFile) // name of config file (without extension)
|
||||
viper.SetConfigType("toml") |
||||
viper.AddConfigPath(".") // path to look for the config file in
|
||||
|
||||
err := viper.ReadInConfig() // Find and read the config file
|
||||
|
||||
if err != nil { |
||||
|
||||
// TODO(murp) - differentiate between file not found and formatting error in
|
||||
// config file)
|
||||
fmt.Printf("[MAIN] Failed to parse configuration data: %s\nCorrect usage: Collector-TYPE -conf=/path/to/configuration/file\n", err) |
||||
|
||||
os.Exit(1) |
||||
|
||||
} |
||||
|
||||
cfg = parseConfig() |
||||
|
||||
e := l.InitLogger(cfg.General.LogFile, cfg.General.LogLevel, cfg.General.LogToConsole) |
||||
|
||||
if e != nil { |
||||
|
||||
fmt.Printf("[MAIN] Initialization of the logger failed. Error: %v\n", e) |
||||
|
||||
} |
||||
|
||||
l.Info.Printf("Cyclops Labs Collector TYPE version %v initialized\n", version) |
||||
|
||||
dumpConfig(cfg) |
||||
|
||||
// Let's start the HTTP Server and Gauges for Prometheus
|
||||
prometheusStart() |
||||
|
||||
} |
||||
|
||||
func main() { |
||||
|
||||
// If needed here is the initialization for the kafka sender:
|
||||
pipeU, pipeE = kafkaStart() |
||||
|
||||
// Here we start the client instantiation to send reports to the EventsEngine.
|
||||
eeConfig := eeClient.Config{ |
||||
URL: &url.URL{ |
||||
Host: cfg.Services["eventsengine"], |
||||
Path: eeClient.DefaultBasePath, |
||||
Scheme: "http", |
||||
}, |
||||
AuthInfo: httptransport.APIKeyAuth(cfg.APIKey.Key, cfg.APIKey.Place, cfg.APIKey.Token), |
||||
} |
||||
|
||||
reportClient = eeClient.New(eeConfig) |
||||
|
||||
// Here we start the client instantiation to get the canceled customers to check for zombies.
|
||||
cusConfig := cusClient.Config{ |
||||
URL: &url.URL{ |
||||
Host: cfg.Services["customerdb"], |
||||
Path: cusClient.DefaultBasePath, |
||||
Scheme: "http", |
||||
}, |
||||
AuthInfo: httptransport.APIKeyAuth(cfg.APIKey.Key, cfg.APIKey.Place, cfg.APIKey.Token), |
||||
} |
||||
|
||||
zombiesClient = cusClient.New(cusConfig) |
||||
|
||||
// Let's lunch the first collection process..
|
||||
go collect() |
||||
|
||||
// cfg.General.Periodicity should be changed to cfg.General.ObjectPeriodicity
|
||||
// in the case you need the long (8h) periodicity.
|
||||
for range time.NewTicker(time.Duration(cfg.General.Periodicity) * time.Minute).C { |
||||
|
||||
go collect() |
||||
|
||||
} |
||||
|
||||
} |
@ -0,0 +1,95 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"log" |
||||
"net/http" |
||||
|
||||
"github.com/prometheus/client_golang/prometheus" |
||||
"github.com/prometheus/client_golang/prometheus/promhttp" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
var ( |
||||
metricReporting *prometheus.GaugeVec |
||||
metricCollection *prometheus.GaugeVec |
||||
metricTime *prometheus.GaugeVec |
||||
metricCount *prometheus.GaugeVec |
||||
) |
||||
|
||||
func prometheusStart() { |
||||
|
||||
reg := prometheus.NewPedanticRegistry() |
||||
|
||||
metricReporting = prometheus.NewGaugeVec( |
||||
prometheus.GaugeOpts{ |
||||
Namespace: "CYCLOPS", |
||||
Subsystem: collector + "_Collector", |
||||
Name: "kafka_send_state", |
||||
Help: "Reporting information and Kafka topics usage", |
||||
}, |
||||
[]string{ |
||||
"reason", |
||||
"state", |
||||
"topic", |
||||
}, |
||||
) |
||||
|
||||
metricCollection = prometheus.NewGaugeVec( |
||||
prometheus.GaugeOpts{ |
||||
Namespace: "CYCLOPS", |
||||
Subsystem: collector + "_Collector", |
||||
Name: "Collection", |
||||
Help: "Collection information and usages data", |
||||
}, |
||||
[]string{ |
||||
"account", |
||||
"event", |
||||
"reason", |
||||
"state", |
||||
"type", |
||||
}, |
||||
) |
||||
|
||||
metricTime = prometheus.NewGaugeVec( |
||||
prometheus.GaugeOpts{ |
||||
Namespace: "CYCLOPS", |
||||
Subsystem: collector + "_Collector", |
||||
Name: "collection_time", |
||||
Help: "Different timing metrics", |
||||
}, |
||||
[]string{ |
||||
"type", |
||||
}, |
||||
) |
||||
|
||||
metricCount = prometheus.NewGaugeVec( |
||||
prometheus.GaugeOpts{ |
||||
Namespace: "CYCLOPS", |
||||
Subsystem: collector + "_Collector", |
||||
Name: objects + "_count", |
||||
Help: "Different VM Counts", |
||||
}, |
||||
[]string{ |
||||
"type", |
||||
}, |
||||
) |
||||
|
||||
reg.MustRegister(metricReporting, metricCollection, metricTime, metricCount) |
||||
//prometheus.MustRegister(metricReporting, metricCollection)
|
||||
|
||||
l.Trace.Printf("[Prometheus] Starting to serve the metrics.\n") |
||||
|
||||
go func() { |
||||
|
||||
if cfg.Prometheus.MetricsExport { |
||||
|
||||
//http.Handle(cfg.Prometheus.MetricsRoute, promhttp.Handler())
|
||||
http.Handle(cfg.Prometheus.MetricsRoute, promhttp.HandlerFor(reg, promhttp.HandlerOpts{})) |
||||
|
||||
go log.Fatal(http.ListenAndServe(":"+cfg.Prometheus.MetricsPort, nil)) |
||||
|
||||
} |
||||
|
||||
}() |
||||
|
||||
} |
@ -0,0 +1,23 @@ |
||||
module github.com/Cyclops-Labs/cyclops-4-hpc.git/cyclops-collectors/blockstorage-collector |
||||
|
||||
go 1.13 |
||||
|
||||
require ( |
||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect |
||||
github.com/go-openapi/runtime v0.21.0 |
||||
github.com/go-openapi/swag v0.19.15 // indirect |
||||
github.com/golang/protobuf v1.5.2 // indirect |
||||
github.com/gophercloud/gophercloud v0.23.0 |
||||
github.com/magiconair/properties v1.8.5 // indirect |
||||
github.com/mailru/easyjson v0.7.7 // indirect |
||||
github.com/prometheus/client_golang v1.11.0 |
||||
github.com/segmentio/kafka-go v0.4.23 |
||||
github.com/spf13/afero v1.6.0 // indirect |
||||
github.com/spf13/viper v1.9.0 |
||||
gitlab.com/cyclops-utilities/datamodels v0.0.0-20191016132854-e9313e683e5b |
||||
github.com/Cyclops-Labs/cyclops-4-hpc.git/services/customerdb v0.0.1 |
||||
github.com/Cyclops-Labs/cyclops-4-hpc.git/services/eventsengine v0.0.1 |
||||
github.com/Cyclops-Labs/cyclops-4-hpc.git/services/udr v0.0.1 |
||||
gitlab.com/cyclops-utilities/logging v0.0.0-20200914110347-ca1d02efd346 |
||||
golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1 // indirect |
||||
) |
@ -0,0 +1 @@ |
||||
DUMMY FILE! |
@ -0,0 +1,93 @@ |
||||
# Welcome to the configuration file for this |
||||
# |
||||
# ██████╗██╗ ██╗ ██████╗██╗ ██████╗ ██████╗ ███████╗ |
||||
# ██╔════╝╚██╗ ██╔╝██╔════╝██║ ██╔═══██╗██╔══██╗██╔════╝ |
||||
# ██║ ╚████╔╝ ██║ ██║ ██║ ██║██████╔╝███████╗ |
||||
# ██║ ╚██╔╝ ██║ ██║ ██║ ██║██╔═══╝ ╚════██║ |
||||
# ╚██████╗ ██║ ╚██████╗███████╗╚██████╔╝██║ ███████║ |
||||
# ╚═════╝ ╚═╝ ╚═════╝╚══════╝ ╚═════╝ ╚═╝ ╚══════╝ |
||||
# |
||||
# ██╗ █████╗ ██████╗ ███████╗ |
||||
# ██║ ██╔══██╗██╔══██╗██╔════╝ |
||||
# ██║ ███████║██████╔╝███████╗ |
||||
# ██║ ██╔══██║██╔══██╗╚════██║ |
||||
# ███████╗██║ ██║██████╔╝███████║ |
||||
# ╚══════╝╚═╝ ╚═╝╚═════╝ ╚══════╝ |
||||
# |
||||
# collector! |
||||
|
||||
[APIKEY] |
||||
Enabled = true |
||||
Key = "X-API-KEY" |
||||
Place = "header" |
||||
Token = "1234567890abcdefghi" |
||||
|
||||
[EVENTS] |
||||
Filters = [ "filter1", "filter2", "filter3" ] |
||||
|
||||
[GENERAL] |
||||
LogFile = "" |
||||
LogToConsole = true |
||||
# loglevel values can be one of the following: TRACE, DEBUG, INFO, WARNING, ERROR |
||||
LogLevel = "TRACE" |
||||
ObjectsPeriodicity = 480 |
||||
Periodicity = 15 |
||||
PrometheusPeriodicity = 60 |
||||
|
||||
[HEAPPE] |
||||
Username = "" |
||||
Password = "" |
||||
GroupResourceUsageReportUri = "" |
||||
AuthenticateUserPasswordUri = "" |
||||
|
||||
[KAFKA] |
||||
Brokers = [ "broker-1-IP:broker-1-PORT", "broker-2-IP:broker-2-PORT", "broker-3-IP:broker-3-PORT" ] |
||||
# -1 for the most recent |
||||
# -2 for the first in the partition |
||||
# Anyother for a specific offset |
||||
Offset = "-1" |
||||
Partition = "0" |
||||
SizeMax = 10e6 |
||||
SizeMin = 10e3 |
||||
TLSEnabled = false |
||||
TopicEEngine = "Events" |
||||
TopicUDR = "UDR" |
||||
|
||||
[KEYCLOAK] |
||||
ClientID = "SERVICE" |
||||
ClientSecret = "00000000-0000-0000-0000-00000000" |
||||
Enabled = true |
||||
Host = "0.0.0.0" |
||||
Port = 8080 |
||||
Realm = "Development" |
||||
RedirectURL = "" |
||||
UseHttp = true |
||||
|
||||
[LIEUTENANT] |
||||
Host = "lieutenant:4010" |
||||
Token = "" |
||||
|
||||
[OPENSTACK] |
||||
Domain = "" |
||||
Keystone = "" |
||||
Password = "" |
||||
Project = "" |
||||
Region = "" |
||||
User = "" |
||||
|
||||
[PROMETHEUS] |
||||
Host = "prometheus:9000" |
||||
MetricsExport = true |
||||
MetricsPort = "9000" |
||||
MetricsRoute = "/metrics" |
||||
|
||||
[RGW] |
||||
AccessKey = "" |
||||
AdminPath = "" |
||||
Region = "" |
||||
SecretAccessKey = "" |
||||
ServerURL = "" |
||||
|
||||
[SERVICES] |
||||
CustomerDB = "localhost:8400" |
||||
EventsEngine = "localhost:8500" |
@ -0,0 +1,17 @@ |
||||
version: '3' |
||||
|
||||
services: |
||||
|
||||
collectors: |
||||
environment: |
||||
WAIT_AFTER_HOSTS: 30 |
||||
image: blockstorage-collector:latest |
||||
networks: |
||||
- collectorsnet |
||||
restart: always |
||||
volumes: |
||||
- ${PWD}/config.toml:/config.toml |
||||
|
||||
networks: |
||||
collectorsnet: |
||||
driver: bridge |
@ -0,0 +1 @@ |
||||
DUMMY FILE! |
@ -0,0 +1,25 @@ |
||||
# NETWORK COLLECTOR |
||||
|
||||
OpenStack Network collector written in go |
||||
|
||||
## How to build |
||||
|
||||
The building of the service is carried by a multistage docker build, we build everything in an image containing everything needed to build Golang applications and then the executable is moved to a new image that contains the bare minimum starting from the scratch image. |
||||
|
||||
Within the folder build at the root of the repo there's a script call start.sh, it's invokation admits "Dev" as parameter. Running the script with the parameter will use the local version in the repository as the base for the building of the service's docker image, however doing it without providing it will make the building of the service taking everything from sources. |
||||
|
||||
``` |
||||
./start.sh [Dev] |
||||
``` |
||||
|
||||
The initial branch used when building from sources is "master", it can be changed with a few other parameters by editing the script. |
||||
|
||||
Using the [Dev] optional argument will take the code present in the repo at the moment of invokation. |
||||
|
||||
## How to run |
||||
|
||||
Within the folder run at the root of the repo there's a docker-compose sample file and a config.toml sample file. Once configured with appropriate data to start the service just issue the following command: |
||||
|
||||
``` |
||||
docker-compose up -d |
||||
``` |
@ -0,0 +1,402 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"os" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/gophercloud/gophercloud" |
||||
"github.com/gophercloud/gophercloud/openstack" |
||||
"github.com/gophercloud/gophercloud/openstack/identity/v3/projects" |
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" |
||||
"github.com/gophercloud/gophercloud/pagination" |
||||
"github.com/prometheus/client_golang/prometheus" |
||||
datamodels "gitlab.com/cyclops-utilities/datamodels" |
||||
eeEvent "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/eventsengine/client/event_management" |
||||
eeModels "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/eventsengine/models" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
var ( |
||||
collector = "Network" |
||||
objects = "ip" |
||||
remotelist []*eeModels.MinimalState |
||||
collectionStart int64 |
||||
) |
||||
|
||||
// collect handles the process of retrieving the information from the system.
|
||||
func collect() { |
||||
|
||||
l.Trace.Printf("[COLLECTION] The collection process has been started.\n") |
||||
|
||||
collectionStart = time.Now().UnixNano() |
||||
|
||||
metricTime.With(prometheus.Labels{"type": "Collection Start Time"}).Set(float64(collectionStart)) |
||||
|
||||
opts := gophercloud.AuthOptions{ |
||||
IdentityEndpoint: cfg.OpenStack.Keystone, |
||||
Username: cfg.OpenStack.User, |
||||
Password: cfg.OpenStack.Password, |
||||
DomainName: cfg.OpenStack.Domain, |
||||
} |
||||
|
||||
if len(cfg.OpenStack.Project) > 0 { |
||||
|
||||
opts.TenantName = cfg.OpenStack.Project |
||||
|
||||
} |
||||
|
||||
provider, e := openstack.AuthenticatedClient(opts) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Error.Printf("[COLLECTION] Error authenticating against OpenStack. Error: %v\n", e) |
||||
|
||||
os.Exit(1) |
||||
|
||||
} |
||||
|
||||
listOpts := projects.ListOpts{ |
||||
Enabled: gophercloud.Enabled, |
||||
} |
||||
|
||||
authClient, e := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Error.Printf("[COLLECTION] Error creating the collector client. Error: %v\n", e) |
||||
|
||||
os.Exit(1) |
||||
|
||||
} |
||||
|
||||
allPages, e := projects.List(authClient, listOpts).AllPages() |
||||
|
||||
if e != nil { |
||||
|
||||
l.Error.Printf("[COLLECTION] Error listing all the pages. Error: %v\n", e) |
||||
|
||||
os.Exit(1) |
||||
|
||||
} |
||||
|
||||
allProjects, e := projects.ExtractProjects(allPages) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Error.Printf("[COLLECTION] Error listing all the projects. Error: %v\n", e) |
||||
|
||||
os.Exit(1) |
||||
|
||||
} |
||||
|
||||
l.Trace.Printf("[COLLECTION] Querying events engine service for list of known and not terminated floating ips") |
||||
|
||||
resourceType := "floatingip" |
||||
|
||||
eeParams := eeEvent.NewListStatesParams().WithResource(&resourceType).WithRegion(&cfg.OpenStack.Region) |
||||
|
||||
ctx, _ := context.WithTimeout(context.Background(), 300*time.Second) |
||||
r, e := reportClient.EventManagement.ListStates(ctx, eeParams) |
||||
|
||||
// Clears the remotelist between runs
|
||||
remotelist = nil |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[COLLECTION] Something went wrong while retrieving the usage from the system, check with the administrator. Error: %v.\n", e) |
||||
|
||||
} else { |
||||
|
||||
remotelist = r.Payload |
||||
|
||||
} |
||||
|
||||
eeCount := len(remotelist) |
||||
apiCount := 0 |
||||
|
||||
l.Trace.Printf("[COLLECTION] (BEFORE) Existing count of floating IPs at remote [ %v ].\n", len(remotelist)) |
||||
|
||||
allProjectsLoop: |
||||
for _, project := range allProjects { |
||||
|
||||
l.Trace.Printf("[COLLECTION] Found project [ %v ] with ID [ %v ]. Proceeding to get list of floating IPs.\n", project.Name, project.ID) |
||||
|
||||
networkClient, e := openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{ |
||||
Region: cfg.OpenStack.Region, |
||||
}) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Error.Printf("[COLLECTION] Error creating network client. Error: %v\n", e) |
||||
|
||||
continue |
||||
|
||||
} |
||||
|
||||
opts := floatingips.ListOpts{ |
||||
TenantID: project.ID, |
||||
} |
||||
|
||||
// Filter by project id:
|
||||
for _, filter := range cfg.ProjectFilters { |
||||
|
||||
if strings.Contains(strings.ToLower(project.ID), strings.ToLower(filter)) && filter != "" { |
||||
|
||||
l.Debug.Printf("[COLLECTION] The Project [ %v ] matches filter [ %v ] and won't be further processed.", project.ID, filter) |
||||
|
||||
continue allProjectsLoop |
||||
|
||||
} |
||||
|
||||
} |
||||
|
||||
// Filter by project name:
|
||||
for _, filter := range cfg.NameFilters { |
||||
|
||||
if strings.Contains(strings.ToLower(project.Name), strings.ToLower(filter)) && filter != "" { |
||||
|
||||
l.Debug.Printf("[COLLECTION] The Project [ %v ] matches filter [ %v ] and won't be further processed.", project.Name, filter) |
||||
|
||||
continue allProjectsLoop |
||||
|
||||
} |
||||
|
||||
} |
||||
|
||||
pager := floatingips.List(networkClient, opts) |
||||
|
||||
e = pager.EachPage(func(page pagination.Page) (bool, error) { |
||||
|
||||
ipList, e := floatingips.ExtractFloatingIPs(page) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Error.Printf("[COLLECTION] Error processing the lists of active resources. Error: %+v\n", e) |
||||
|
||||
return false, e |
||||
|
||||
} |
||||
|
||||
apiCount += len(ipList) |
||||
|
||||
for _, ip := range ipList { |
||||
|
||||
// "ip" will be IP.IP
|
||||
l.Trace.Printf("[COLLECTION] Found floating IP [ %v ] with ID [ %v ]. Parameters: Status [ %v ], Fixed IP [ %v ] and Floating Network ID [ %v ].\n", |
||||
ip.FloatingIP, ip.ID, ip.Status, ip.FixedIP, ip.FloatingNetworkID) |
||||
|
||||
// Here comes the transformation of the information retrieved into either
|
||||
md := make(datamodels.JSONdb) |
||||
md["region"] = cfg.OpenStack.Region |
||||
md["floatingnetworkid"] = ip.FloatingNetworkID |
||||
|
||||
evTime := int64(time.Now().Unix()) |
||||
evLast := getStatus(ip.Status) |
||||
|
||||
// events or usage reports to be sent.
|
||||
event := eeModels.Event{ |
||||
Account: project.ID, |
||||
EventTime: &evTime, |
||||
LastEvent: &evLast, |
||||
MetaData: md, |
||||
Region: cfg.OpenStack.Region, |
||||
ResourceID: ip.ID, |
||||
ResourceName: ip.FloatingIP, |
||||
ResourceType: "floatingip", |
||||
} |
||||
|
||||
report(event) |
||||
|
||||
//if this object exists in remote list then lets remove it
|
||||
for i, object := range remotelist { |
||||
|
||||
if strings.Compare(object.Account, project.ID) == 0 && |
||||
strings.Compare(object.ResourceID, ip.ID) == 0 && |
||||
strings.Compare(object.ResourceName, ip.FloatingIP) == 0 { |
||||
|
||||
l.Debug.Printf("[COLLECTION] Event send cleaned from the processing list..\n") |
||||
|
||||
remotelist = append(remotelist[:i], remotelist[i+1:]...) |
||||
|
||||
break |
||||
|
||||
} |
||||
|
||||
} |
||||
|
||||
} |
||||
|
||||
return true, nil |
||||
|
||||
}) |
||||
|
||||
} |
||||
|
||||
l.Trace.Printf("[COLLECTION] (AFTER) Remaining count of floating IPs at remote which were left unprocessed [ %v ].\n", len(remotelist)) |
||||
|
||||
//now for all remaining servers send terminated status
|
||||
for _, object := range remotelist { |
||||
|
||||
l.Debug.Printf("[COLLECTION] Sending termination for zombie data in the system..\n") |
||||
|
||||
evTime := int64(time.Now().Unix()) |
||||
evLast := getStatus("terminated") |
||||
|
||||
// events or usage reports to be sent.
|
||||
event := eeModels.Event{ |
||||
Account: object.Account, |
||||
EventTime: &evTime, |
||||
LastEvent: &evLast, |
||||
MetaData: object.MetaData, |
||||
Region: cfg.OpenStack.Region, |
||||
ResourceID: object.ResourceID, |
||||
ResourceName: object.ResourceName, |
||||
ResourceType: "floatingip", |
||||
} |
||||
|
||||
report(event) |
||||
|
||||
} |
||||
|
||||
metricCount.With(prometheus.Labels{"type": "Total IPs reported by OS API"}).Set(float64(apiCount)) |
||||
|
||||
metricCount.With(prometheus.Labels{"type": "Total IPs from EventEngine"}).Set(float64(eeCount)) |
||||
|
||||
metricCount.With(prometheus.Labels{"type": "Total IPs forcefully TERMINATED"}).Set(float64(len(remotelist))) |
||||
|
||||
metricTime.With(prometheus.Labels{"type": "Collection Processing Time"}).Set(float64(time.Now().UnixNano()-collectionStart) / float64(time.Millisecond)) |
||||
|
||||
l.Warning.Printf("[COLLECTION] Completed.\n - OS Report: %v\n - EE Report: %v\n - Forced Termination: %v\n - Processing Time: %v[ms]\n", apiCount, eeCount, len(remotelist), float64(time.Now().UnixNano()-collectionStart)/float64(time.Millisecond)) |
||||
|
||||
l.Trace.Printf("[COLLECTION] The collection process has been finished.\n") |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
// getStatus job is to normalize the event state returned by the collectors.
|
||||
// Parameters:
|
||||
// - state: string returned by the system.
|
||||
// Returns:
|
||||
// - status: normalized state to be returned.
|
||||
func getStatus(state string) (status string) { |
||||
|
||||
switch strings.ToUpper(state) { |
||||
|
||||
case "ACTIVE": |
||||
|
||||
status = "active" |
||||
|
||||
case "ATTACHING": |
||||
|
||||
status = "active" |
||||
|
||||
case "AVAILABLE": |
||||
|
||||
status = "active" |
||||
|
||||
case "BUILD": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "CREATING": |
||||
|
||||
status = "active" |
||||
|
||||
case "DELETED": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "DELETING": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "DETACHING": |
||||
|
||||
status = "active" |
||||
|
||||
case "DOWN": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "ERROR": |
||||
|
||||
status = "error" |
||||
|
||||
case "ERROR_DELETING": |
||||
|
||||
status = "error" |
||||
|
||||
case "EXTENDING": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "HARD_DELETED": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "IN-USE": |
||||
|
||||
status = "active" |
||||
|
||||
case "MAINTENANCE": |
||||
|
||||
status = "active" |
||||
|
||||
case "PAUSED": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "RESCUED": |
||||
|
||||
status = "active" |
||||
|
||||
case "RESIZE": |
||||
|
||||
status = "active" |
||||
|
||||
case "RESIZED": |
||||
|
||||
status = "active" |
||||
|
||||
case "RESERVED": |
||||
|
||||
status = "active" |
||||
|
||||
case "RETYPING": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "SHUTOFF": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "SOFT_DELETED": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "STOPPED": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "SUSPENDED": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "TERMINATED": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "VERIFY_RESIZE": |
||||
|
||||
status = "active" |
||||
|
||||
} |
||||
|
||||
l.Trace.Printf("[REPORT] State received from the system [ %v ] normalized to [ %v ]", state, status) |
||||
|
||||
return status |
||||
|
||||
} |
@ -0,0 +1,246 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"strings" |
||||
|
||||
"github.com/spf13/viper" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
// The following structs are part of the configuration struct which
|
||||
// acts as the main reference for configuration parameters in the system.
|
||||
type apiKey struct { |
||||
Enabled bool |
||||
Key string |
||||
Place string |
||||
Token string |
||||
} |
||||
|
||||
type configuration struct { |
||||
APIKey apiKey |
||||
General generalConfig |
||||
Heappe heappeConfig |
||||
Kafka kafkaConfig |
||||
Keycloak keycloakConfig |
||||
Lieutenant lieutenantConfig |
||||
OpenStack openStackConfig |
||||
Prometheus prometheusConfig |
||||
RGW rgwConfig |
||||
NameFilters []string |
||||
ProjectFilters []string |
||||
Services map[string]string |
||||
} |
||||
|
||||
type generalConfig struct { |
||||
InsecureSkipVerify bool |
||||
LogFile string |
||||
LogLevel string |
||||
LogToConsole bool |
||||
ObjectsPeriodicity int |
||||
Periodicity int |
||||
PrometheusPeriodicity int |
||||
} |
||||
|
||||
type heappeConfig struct { |
||||
Username string |
||||
Password string |
||||
GroupResourceUsageReportURI string |
||||
AuthenticateUserPasswordURI string |
||||
} |
||||
|
||||
type kafkaConfig struct { |
||||
Brokers []string |
||||
MaxBytes int |
||||
MinBytes int |
||||
Offset int64 |
||||
Partition int |
||||
TLSEnabled bool |
||||
TopicUDR string |
||||
TopicEEngine string |
||||
} |
||||
|
||||
type keycloakConfig struct { |
||||
ClientID string `json:"client_id"` |
||||
ClientSecret string `json:"client_secret"` |
||||
Enabled bool `json:"enabled"` |
||||
Host string `json:"host"` |
||||
Port int `json:"port"` |
||||
Realm string `json:"realm"` |
||||
RedirectURL string `json:"redirect_url"` |
||||
UseHTTP bool `json:"use_http"` |
||||
} |
||||
|
||||
type lieutenantConfig struct { |
||||
Host string |
||||
Token string |
||||
} |
||||
|
||||
type openStackConfig struct { |
||||
Domain string |
||||
Keystone string |
||||
Password string |
||||
Project string |
||||
Region string |
||||
User string |
||||
} |
||||
|
||||
type prometheusConfig struct { |
||||
Host string |
||||
MetricsExport bool |
||||
MetricsPort string |
||||
MetricsRoute string |
||||
} |
||||
|
||||
type rgwConfig struct { |
||||
AccessKeyID string |
||||
AdminPath string |
||||
Region string |
||||
SecretAccessKey string |
||||
ServerURL string |
||||
} |
||||
|
||||
// dumpConfig 's job is to dumps the configuration in JSON format to the log
|
||||
// system. It makes use of the masking function to keep some secrecy in the log.
|
||||
// Parameters:
|
||||
// - c: configuration type containing the config present in the system.
|
||||
func dumpConfig(c configuration) { |
||||
cfgCopy := c |
||||
|
||||
// deal with configuration params that should be masked
|
||||
cfgCopy.APIKey.Token = masked(c.APIKey.Token, 4) |
||||
cfgCopy.Heappe.Username = masked(c.Heappe.Username, 4) |
||||
cfgCopy.Heappe.Password = masked(c.Heappe.Password, 4) |
||||
cfgCopy.Keycloak.ClientSecret = masked(c.Keycloak.ClientSecret, 4) |
||||
cfgCopy.Lieutenant.Token = masked(c.Lieutenant.Token, 4) |
||||
cfgCopy.OpenStack.Password = masked(c.OpenStack.Password, 4) |
||||
cfgCopy.RGW.AccessKeyID = masked(c.RGW.AccessKeyID, 4) |
||||
cfgCopy.RGW.SecretAccessKey = masked(c.RGW.SecretAccessKey, 4) |
||||
|
||||
// mmrshalindent creates a string containing newlines; each line starts with
|
||||
// two spaces and two spaces are added for each indent...
|
||||
configJSON, _ := json.MarshalIndent(cfgCopy, " ", " ") |
||||
|
||||
l.Info.Printf("[CONFIG] Configuration settings:\n") |
||||
l.Info.Printf("%v\n", string(configJSON)) |
||||
|
||||
} |
||||
|
||||
// masked 's job is to return asterisks in place of the characters in a
|
||||
// string with the exception of the last indicated.
|
||||
// Parameters:
|
||||
// - s: string to be masked
|
||||
// - unmaskedChars: int with the amount (counting from the end of the string) of
|
||||
// characters to keep unmasked.
|
||||
// Returns:
|
||||
// - returnString: the s string passed as parameter masked.
|
||||
func masked(s string, unmaskedChars int) (returnString string) { |
||||
|
||||
if len(s) <= unmaskedChars { |
||||
|
||||
returnString = s |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
asteriskString := strings.Repeat("*", (len(s) - unmaskedChars)) |
||||
returnString = asteriskString + string(s[len(s)-unmaskedChars:]) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
// parseConfig handles the filling of the config struct with the data Viper gets
|
||||
// from the configuration file.
|
||||
// Returns:
|
||||
// - c: the configuration struct filled with the relevant parsed configuration.
|
||||
func parseConfig() (c configuration) { |
||||
|
||||
l.Trace.Printf("[CONFIG] Retrieving configuration.\n") |
||||
|
||||
c = configuration{ |
||||
|
||||
APIKey: apiKey{ |
||||
Enabled: viper.GetBool("apikey.enabled"), |
||||
Key: viper.GetString("apikey.key"), |
||||
Place: viper.GetString("apikey.place"), |
||||
Token: viper.GetString("apikey.token"), |
||||
}, |
||||
|
||||
General: generalConfig{ |
||||
InsecureSkipVerify: viper.GetBool("general.insecureskipverify"), |
||||
LogFile: viper.GetString("general.logfile"), |
||||
LogLevel: viper.GetString("general.loglevel"), |
||||
LogToConsole: viper.GetBool("general.logtoconsole"), |
||||
ObjectsPeriodicity: viper.GetInt("general.objectsperiodicity"), |
||||
Periodicity: viper.GetInt("general.periodicity"), |
||||
PrometheusPeriodicity: viper.GetInt("general.prometheusperiodicity"), |
||||
}, |
||||
|
||||
Heappe: heappeConfig{ |
||||
Username: viper.GetString("heappe.username"), |
||||
Password: viper.GetString("heappe.password"), |
||||
GroupResourceUsageReportURI: viper.GetString("heappe.groupResourceUsageReportUri"), |
||||
AuthenticateUserPasswordURI: viper.GetString("heappe.authenticateUserPasswordUri"), |
||||
}, |
||||
|
||||
Kafka: kafkaConfig{ |
||||
Brokers: viper.GetStringSlice("kafka.brokers"), |
||||
MaxBytes: viper.GetInt("kafka.sizemax"), |
||||
MinBytes: viper.GetInt("kafka.sizemin"), |
||||
Offset: viper.GetInt64("kafka.offset"), |
||||
Partition: viper.GetInt("kafka.partition"), |
||||
TLSEnabled: viper.GetBool("kafka.tlsenabled"), |
||||
TopicUDR: viper.GetString("kafka.topicudr"), |
||||
TopicEEngine: viper.GetString("kafka.topiceengine"), |
||||
}, |
||||
|
||||
Keycloak: keycloakConfig{ |
||||
ClientID: viper.GetString("keycloak.clientid"), |
||||
ClientSecret: viper.GetString("keycloak.clientsecret"), |
||||
Enabled: viper.GetBool("keycloak.enabled"), |
||||
Host: viper.GetString("keycloak.host"), |
||||
Port: viper.GetInt("keycloak.port"), |
||||
Realm: viper.GetString("keycloak.realm"), |
||||
RedirectURL: viper.GetString("keycloak.redirecturl"), |
||||
UseHTTP: viper.GetBool("keycloak.usehttp"), |
||||
}, |
||||
|
||||
Lieutenant: lieutenantConfig{ |
||||
Host: viper.GetString("lieutenant.host"), |
||||
Token: viper.GetString("lieutenant.token"), |
||||
}, |
||||
|
||||
OpenStack: openStackConfig{ |
||||
Domain: viper.GetString("openstack.domain"), |
||||
Keystone: viper.GetString("openstack.keystone"), |
||||
Password: viper.GetString("openstack.password"), |
||||
Project: viper.GetString("openstack.project"), |
||||
Region: viper.GetString("openstack.region"), |
||||
User: viper.GetString("openstack.user"), |
||||
}, |
||||
|
||||
Prometheus: prometheusConfig{ |
||||
Host: viper.GetString("prometheus.host"), |
||||
MetricsExport: viper.GetBool("prometheus.metricsexport"), |
||||
MetricsPort: viper.GetString("prometheus.metricsport"), |
||||
MetricsRoute: viper.GetString("prometheus.metricsroute"), |
||||
}, |
||||
|
||||
RGW: rgwConfig{ |
||||
AccessKeyID: viper.GetString("rgw.accesskey"), |
||||
AdminPath: viper.GetString("rgw.adminpath"), |
||||
Region: viper.GetString("rgw.region"), |
||||
SecretAccessKey: viper.GetString("rgw.secretaccesskey"), |
||||
ServerURL: viper.GetString("rgw.serverurl"), |
||||
}, |
||||
|
||||
NameFilters: viper.GetStringSlice("events.namefilters"), |
||||
ProjectFilters: viper.GetStringSlice("events.projectfilters"), |
||||
Services: viper.GetStringMapString("services"), |
||||
} |
||||
|
||||
return |
||||
|
||||
} |
@ -0,0 +1,137 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"crypto/tls" |
||||
"encoding/json" |
||||
"strconv" |
||||
"time" |
||||
|
||||
"github.com/prometheus/client_golang/prometheus" |
||||
"github.com/segmentio/kafka-go" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
type kafkaHandlerConf struct { |
||||
out []kafkaPackage |
||||
} |
||||
|
||||
type kafkaPackage struct { |
||||
topic string |
||||
partition int |
||||
channel chan interface{} |
||||
} |
||||
|
||||
// kafkaHandler job is to check the config that it receives and initialize the
|
||||
// go rutines necesaries to satisfay the configuration it receives.
|
||||
// Paramenters:
|
||||
// - kH: kafkaHandlerConf struct with the specific configuration used by the
|
||||
// service.
|
||||
func kafkaHandler(kH kafkaHandlerConf) { |
||||
|
||||
l.Trace.Printf("[KAFKA] Initializing the receivers/senders according to the provided configuration.\n") |
||||
|
||||
if kH.out != nil { |
||||
|
||||
for _, p := range kH.out { |
||||
|
||||
go kafkaSender(p.topic, p.partition, p.channel) |
||||
|
||||
} |
||||
|
||||
} |
||||
} |
||||
|
||||
// kafkaSender is the abstracted interface handling the sending of data through
|
||||
// kafka topics.
|
||||
// Paramenters:
|
||||
// - t: string containing the kafka-topic in use.
|
||||
// - p: int containing the kafka-topic partition.
|
||||
// - c: interface{} channel to receive the data that will be marshalled into
|
||||
// JSON and then transmitted via kafka.
|
||||
func kafkaSender(t string, p int, c chan interface{}) { |
||||
|
||||
l.Trace.Printf("[KAFKA] Initializing kafka sender for topic: %v.\n", t) |
||||
|
||||
conf := kafka.WriterConfig{ |
||||
Brokers: cfg.Kafka.Brokers, |
||||
Topic: t, |
||||
Balancer: &kafka.LeastBytes{}, |
||||
} |
||||
|
||||
if cfg.Kafka.TLSEnabled { |
||||
|
||||
dialer := &kafka.Dialer{ |
||||
Timeout: 10 * time.Second, |
||||
DualStack: true, |
||||
TLS: &tls.Config{ |
||||
MinVersion: tls.VersionTLS12, |
||||
CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, |
||||
PreferServerCipherSuites: true, |
||||
InsecureSkipVerify: cfg.General.InsecureSkipVerify, |
||||
}, |
||||
} |
||||
|
||||
conf.Dialer = dialer |
||||
|
||||
} |
||||
|
||||
w := kafka.NewWriter(conf) |
||||
defer w.Close() |
||||
|
||||
for { |
||||
|
||||
v, ok := <-c |
||||
|
||||
if !ok { |
||||
|
||||
metricReporting.With(prometheus.Labels{"topic": t, "state": "FAIL", "reason": "Go Channel Problems"}).Inc() |
||||
|
||||
break |
||||
|
||||
} |
||||
|
||||
go func() { |
||||
|
||||
m, e := json.Marshal(&v) |
||||
|
||||
if e == nil { |
||||
|
||||
l.Info.Printf("[KAFKA] Object received through the channel. Starting its processing.\n") |
||||
|
||||
err := w.WriteMessages(context.Background(), |
||||
kafka.Message{ |
||||
Key: []byte(t + "-" + strconv.Itoa(p)), |
||||
Value: m, |
||||
}, |
||||
) |
||||
|
||||
if err != nil { |
||||
|
||||
l.Warning.Printf("[KAFKA] There was a problem when sending the record through the stream. Error: %v\n", err) |
||||
|
||||
metricReporting.With(prometheus.Labels{"topic": t, "state": "FAIL", "reason": "Kafka Stream Problems"}).Inc() |
||||
|
||||
} else { |
||||
|
||||
l.Info.Printf("[KAFKA] Object added to the stream succesfully. Topic: %v.\n", t) |
||||
|
||||
metricReporting.With(prometheus.Labels{"topic": t, "state": "OK", "reason": "Object sent"}).Inc() |
||||
|
||||
} |
||||
|
||||
} else { |
||||
|
||||
l.Warning.Printf("[KAFKA] The information to be sent into the stream cannot be marshalled, please check with the administrator. Error: %v\n", e) |
||||
|
||||
metricReporting.With(prometheus.Labels{"topic": t, "state": "FAIL", "reason": "JSON Marshalling"}).Inc() |
||||
|
||||
} |
||||
|
||||
return |
||||
|
||||
}() |
||||
|
||||
} |
||||
|
||||
} |
@ -0,0 +1,188 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"flag" |
||||
"fmt" |
||||
"net/url" |
||||
"os" |
||||
"reflect" |
||||
"time" |
||||
|
||||
httptransport "github.com/go-openapi/runtime/client" |
||||
"github.com/spf13/viper" |
||||
cusClient "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/customerdb/client" |
||||
eeClient "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/eventsengine/client" |
||||
eeModels "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/eventsengine/models" |
||||
udrModels "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/udr/models" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
var ( |
||||
version string |
||||
cfg configuration |
||||
pipeU chan interface{} |
||||
pipeE chan interface{} |
||||
reportClient *eeClient.EventEngineManagementAPI |
||||
zombiesClient *cusClient.CustomerDatabaseManagement |
||||
) |
||||
|
||||
// kafkaStart handles the initialization of the kafka service.
|
||||
// This is a sample function with the most basic usage of the kafka service, it
|
||||
// should be redefined to match the needs of the service.
|
||||
// Returns:
|
||||
// - ch: a interface{} channel to be able to send things through the kafka topic
|
||||
// generated.
|
||||
func kafkaStart() (chUDR, chEvents chan interface{}) { |
||||
|
||||
l.Trace.Printf("[MAIN] Intializing Kafka\n") |
||||
|
||||
chUDR = make(chan interface{}, 1000) |
||||
chEvents = make(chan interface{}, 1000) |
||||
|
||||
handler := kafkaHandlerConf{ |
||||
out: []kafkaPackage{ |
||||
{ |
||||
topic: cfg.Kafka.TopicUDR, |
||||
channel: chUDR, |
||||
}, |
||||
{ |
||||
topic: cfg.Kafka.TopicEEngine, |
||||
channel: chEvents, |
||||
}, |
||||
}, |
||||
} |
||||
|
||||
kafkaHandler(handler) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
// report handles the process of sending the event or usage to the respective
|
||||
// service.
|
||||
// Parameters:
|
||||
// - object: an interface{} reference with the event/usage to be sent.
|
||||
func report(object interface{}) { |
||||
|
||||
l.Trace.Printf("[REPORT] The reporting process has been started.\n") |
||||
|
||||
if reflect.TypeOf(object) == reflect.TypeOf(udrModels.Usage{}) { |
||||
|
||||
l.Trace.Printf("[REPORT] UDR Object detected. Sending through kafka.\n") |
||||
|
||||
pipeU <- object |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
if reflect.TypeOf(object) == reflect.TypeOf(eeModels.Event{}) { |
||||
|
||||
l.Trace.Printf("[REPORT] Event Object detected. Sending through kafka.\n") |
||||
|
||||
pipeE <- object |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
fail := "the provided object doesn't belong to UDR or EE models" |
||||
|
||||
l.Warning.Printf("[REPORT] Something went wrong while processing the object, check with the administrator. Error: %v.\n", fail) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
func init() { |
||||
|
||||
confFile := flag.String("conf", "./config", "configuration file path (without toml extension)") |
||||
|
||||
flag.Parse() |
||||
|
||||
//placeholder code as the default value will ensure this situation will never arise
|
||||
if len(*confFile) == 0 { |
||||
|
||||
fmt.Printf("Usage: Collector-TYPE -conf=/path/to/configuration/file\n") |
||||
|
||||
os.Exit(0) |
||||
|
||||
} |
||||
|
||||
// err := gcfg.ReadFileInto(&cfg, *confFile)
|
||||
viper.SetConfigName(*confFile) // name of config file (without extension)
|
||||
viper.SetConfigType("toml") |
||||
viper.AddConfigPath(".") // path to look for the config file in
|
||||
|
||||
err := viper.ReadInConfig() // Find and read the config file
|
||||
|
||||
if err != nil { |
||||
|
||||
// TODO(murp) - differentiate between file not found and formatting error in
|
||||
// config file)
|
||||
fmt.Printf("[MAIN] Failed to parse configuration data: %s\nCorrect usage: Collector-TYPE -conf=/path/to/configuration/file\n", err) |
||||
|
||||
os.Exit(1) |
||||
|
||||
} |
||||
|
||||
cfg = parseConfig() |
||||
|
||||
e := l.InitLogger(cfg.General.LogFile, cfg.General.LogLevel, cfg.General.LogToConsole) |
||||
|
||||
if e != nil { |
||||
|
||||
fmt.Printf("[MAIN] Initialization of the logger failed. Error: %v\n", e) |
||||
|
||||
} |
||||
|
||||
l.Info.Printf("Cyclops Labs Collector TYPE version %v initialized\n", version) |
||||
|
||||
dumpConfig(cfg) |
||||
|
||||
// Let's start the HTTP Server and Gauges for Prometheus
|
||||
prometheusStart() |
||||
|
||||
} |
||||
|
||||
func main() { |
||||
|
||||
// If needed here is the initialization for the kafka sender:
|
||||
pipeU, pipeE = kafkaStart() |
||||
|
||||
// Here we start the client instantiation to send reports to the EventsEngine.
|
||||
eeConfig := eeClient.Config{ |
||||
URL: &url.URL{ |
||||
Host: cfg.Services["eventsengine"], |
||||
Path: eeClient.DefaultBasePath, |
||||
Scheme: "http", |
||||
}, |
||||
AuthInfo: httptransport.APIKeyAuth(cfg.APIKey.Key, cfg.APIKey.Place, cfg.APIKey.Token), |
||||
} |
||||
|
||||
reportClient = eeClient.New(eeConfig) |
||||
|
||||
// Here we start the client instantiation to get the canceled customers to check for zombies.
|
||||
cusConfig := cusClient.Config{ |
||||
URL: &url.URL{ |
||||
Host: cfg.Services["customerdb"], |
||||
Path: cusClient.DefaultBasePath, |
||||
Scheme: "http", |
||||
}, |
||||
AuthInfo: httptransport.APIKeyAuth(cfg.APIKey.Key, cfg.APIKey.Place, cfg.APIKey.Token), |
||||
} |
||||
|
||||
zombiesClient = cusClient.New(cusConfig) |
||||
|
||||
// Let's lunch the first collection process..
|
||||
go collect() |
||||
|
||||
// cfg.General.Periodicity should be changed to cfg.General.ObjectPeriodicity
|
||||
// in the case you need the long (8h) periodicity.
|
||||
for range time.NewTicker(time.Duration(cfg.General.Periodicity) * time.Minute).C { |
||||
|
||||
go collect() |
||||
|
||||
} |
||||
|
||||
} |
@ -0,0 +1,95 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"log" |
||||
"net/http" |
||||
|
||||
"github.com/prometheus/client_golang/prometheus" |
||||
"github.com/prometheus/client_golang/prometheus/promhttp" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
var ( |
||||
metricReporting *prometheus.GaugeVec |
||||
metricCollection *prometheus.GaugeVec |
||||
metricTime *prometheus.GaugeVec |
||||
metricCount *prometheus.GaugeVec |
||||
) |
||||
|
||||
func prometheusStart() { |
||||
|
||||
reg := prometheus.NewPedanticRegistry() |
||||
|
||||
metricReporting = prometheus.NewGaugeVec( |
||||
prometheus.GaugeOpts{ |
||||
Namespace: "CYCLOPS", |
||||
Subsystem: collector + "_Collector", |
||||
Name: "kafka_send_state", |
||||
Help: "Reporting information and Kafka topics usage", |
||||
}, |
||||
[]string{ |
||||
"reason", |
||||
"state", |
||||
"topic", |
||||
}, |
||||
) |
||||
|
||||
metricCollection = prometheus.NewGaugeVec( |
||||
prometheus.GaugeOpts{ |
||||
Namespace: "CYCLOPS", |
||||
Subsystem: collector + "_Collector", |
||||
Name: "Collection", |
||||
Help: "Collection information and usages data", |
||||
}, |
||||
[]string{ |
||||
"account", |
||||
"event", |
||||
"reason", |
||||
"state", |
||||
"type", |
||||
}, |
||||
) |
||||
|
||||
metricTime = prometheus.NewGaugeVec( |
||||
prometheus.GaugeOpts{ |
||||
Namespace: "CYCLOPS", |
||||
Subsystem: collector + "_Collector", |
||||
Name: "collection_time", |
||||
Help: "Different timing metrics", |
||||
}, |
||||
[]string{ |
||||
"type", |
||||
}, |
||||
) |
||||
|
||||
metricCount = prometheus.NewGaugeVec( |
||||
prometheus.GaugeOpts{ |
||||
Namespace: "CYCLOPS", |
||||
Subsystem: collector + "_Collector", |
||||
Name: objects + "_count", |
||||
Help: "Different VM Counts", |
||||
}, |
||||
[]string{ |
||||
"type", |
||||
}, |
||||
) |
||||
|
||||
reg.MustRegister(metricReporting, metricCollection, metricTime, metricCount) |
||||
//prometheus.MustRegister(metricReporting, metricCollection)
|
||||
|
||||
l.Trace.Printf("[Prometheus] Starting to serve the metrics.\n") |
||||
|
||||
go func() { |
||||
|
||||
if cfg.Prometheus.MetricsExport { |
||||
|
||||
//http.Handle(cfg.Prometheus.MetricsRoute, promhttp.Handler())
|
||||
http.Handle(cfg.Prometheus.MetricsRoute, promhttp.HandlerFor(reg, promhttp.HandlerOpts{})) |
||||
|
||||
go log.Fatal(http.ListenAndServe(":"+cfg.Prometheus.MetricsPort, nil)) |
||||
|
||||
} |
||||
|
||||
}() |
||||
|
||||
} |
@ -0,0 +1,23 @@ |
||||
module github.com/Cyclops-Labs/cyclops-4-hpc.git/cyclops-collectors/network-collector |
||||
|
||||
go 1.13 |
||||
|
||||
require ( |
||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect |
||||
github.com/go-openapi/runtime v0.21.0 |
||||
github.com/go-openapi/swag v0.19.15 // indirect |
||||
github.com/golang/protobuf v1.5.2 // indirect |
||||
github.com/gophercloud/gophercloud v0.23.0 |
||||
github.com/magiconair/properties v1.8.5 // indirect |
||||
github.com/mailru/easyjson v0.7.7 // indirect |
||||
github.com/prometheus/client_golang v1.11.0 |
||||
github.com/segmentio/kafka-go v0.4.23 |
||||
github.com/spf13/afero v1.6.0 // indirect |
||||
github.com/spf13/viper v1.9.0 |
||||
gitlab.com/cyclops-utilities/datamodels v0.0.0-20191016132854-e9313e683e5b |
||||
github.com/Cyclops-Labs/cyclops-4-hpc.git/services/customerdb v0.0.1 |
||||
github.com/Cyclops-Labs/cyclops-4-hpc.git/services/eventsengine v0.0.1 |
||||
github.com/Cyclops-Labs/cyclops-4-hpc.git/services/udr v0.0.1 |
||||
gitlab.com/cyclops-utilities/logging v0.0.0-20200914110347-ca1d02efd346 |
||||
golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1 // indirect |
||||
) |
@ -0,0 +1 @@ |
||||
DUMMY FILE! |
@ -0,0 +1,93 @@ |
||||
# Welcome to the configuration file for this |
||||
# |
||||
# ██████╗██╗ ██╗ ██████╗██╗ ██████╗ ██████╗ ███████╗ |
||||
# ██╔════╝╚██╗ ██╔╝██╔════╝██║ ██╔═══██╗██╔══██╗██╔════╝ |
||||
# ██║ ╚████╔╝ ██║ ██║ ██║ ██║██████╔╝███████╗ |
||||
# ██║ ╚██╔╝ ██║ ██║ ██║ ██║██╔═══╝ ╚════██║ |
||||
# ╚██████╗ ██║ ╚██████╗███████╗╚██████╔╝██║ ███████║ |
||||
# ╚═════╝ ╚═╝ ╚═════╝╚══════╝ ╚═════╝ ╚═╝ ╚══════╝ |
||||
# |
||||
# ██╗ █████╗ ██████╗ ███████╗ |
||||
# ██║ ██╔══██╗██╔══██╗██╔════╝ |
||||
# ██║ ███████║██████╔╝███████╗ |
||||
# ██║ ██╔══██║██╔══██╗╚════██║ |
||||
# ███████╗██║ ██║██████╔╝███████║ |
||||
# ╚══════╝╚═╝ ╚═╝╚═════╝ ╚══════╝ |
||||
# |
||||
# collector! |
||||
|
||||
[APIKEY] |
||||
Enabled = true |
||||
Key = "X-API-KEY" |
||||
Place = "header" |
||||
Token = "1234567890abcdefghi" |
||||
|
||||
[EVENTS] |
||||
Filters = [ "filter1", "filter2", "filter3" ] |
||||
|
||||
[GENERAL] |
||||
LogFile = "" |
||||
LogToConsole = true |
||||
# loglevel values can be one of the following: TRACE, DEBUG, INFO, WARNING, ERROR |
||||
LogLevel = "TRACE" |
||||
ObjectsPeriodicity = 480 |
||||
Periodicity = 15 |
||||
PrometheusPeriodicity = 60 |
||||
|
||||
[HEAPPE] |
||||
Username = "" |
||||
Password = "" |
||||
GroupResourceUsageReportUri = "" |
||||
AuthenticateUserPasswordUri = "" |
||||
|
||||
[KAFKA] |
||||
Brokers = [ "broker-1-IP:broker-1-PORT", "broker-2-IP:broker-2-PORT", "broker-3-IP:broker-3-PORT" ] |
||||
# -1 for the most recent |
||||
# -2 for the first in the partition |
||||
# Anyother for a specific offset |
||||
Offset = "-1" |
||||
Partition = "0" |
||||
SizeMax = 10e6 |
||||
SizeMin = 10e3 |
||||
TLSEnabled = false |
||||
TopicEEngine = "Events" |
||||
TopicUDR = "UDR" |
||||
|
||||
[KEYCLOAK] |
||||
ClientID = "SERVICE" |
||||
ClientSecret = "00000000-0000-0000-0000-00000000" |
||||
Enabled = true |
||||
Host = "0.0.0.0" |
||||
Port = 8080 |
||||
Realm = "Development" |
||||
RedirectURL = "" |
||||
UseHttp = true |
||||
|
||||
[LIEUTENANT] |
||||
Host = "lieutenant:4010" |
||||
Token = "" |
||||
|
||||
[OPENSTACK] |
||||
Domain = "" |
||||
Keystone = "" |
||||
Password = "" |
||||
Project = "" |
||||
Region = "" |
||||
User = "" |
||||
|
||||
[PROMETHEUS] |
||||
Host = "prometheus:9000" |
||||
MetricsExport = true |
||||
MetricsPort = "9000" |
||||
MetricsRoute = "/metrics" |
||||
|
||||
[RGW] |
||||
AccessKey = "" |
||||
AdminPath = "" |
||||
Region = "" |
||||
SecretAccessKey = "" |
||||
ServerURL = "" |
||||
|
||||
[SERVICES] |
||||
CustomerDB = "localhost:8400" |
||||
EventsEngine = "localhost:8500" |
@ -0,0 +1,17 @@ |
||||
version: '3' |
||||
|
||||
services: |
||||
|
||||
collectors: |
||||
environment: |
||||
WAIT_AFTER_HOSTS: 30 |
||||
image: network-collector:latest |
||||
networks: |
||||
- collectorsnet |
||||
restart: always |
||||
volumes: |
||||
- ${PWD}/config.toml:/config.toml |
||||
|
||||
networks: |
||||
collectorsnet: |
||||
driver: bridge |
@ -0,0 +1 @@ |
||||
DUMMY FILE! |
@ -0,0 +1,25 @@ |
||||
# OBJECTS COLLECTOR |
||||
|
||||
OpenStack Objects collector written in go |
||||
|
||||
## How to build |
||||
|
||||
The building of the service is carried by a multistage docker build, we build everything in an image containing everything needed to build Golang applications and then the executable is moved to a new image that contains the bare minimum starting from the scratch image. |
||||
|
||||
Within the folder build at the root of the repo there's a script call start.sh, it's invokation admits "Dev" as parameter. Running the script with the parameter will use the local version in the repository as the base for the building of the service's docker image, however doing it without providing it will make the building of the service taking everything from sources. |
||||
|
||||
``` |
||||
./start.sh [Dev] |
||||
``` |
||||
|
||||
The initial branch used when building from sources is "master", it can be changed with a few other parameters by editing the script. |
||||
|
||||
Using the [Dev] optional argument will take the code present in the repo at the moment of invokation. |
||||
|
||||
## How to run |
||||
|
||||
Within the folder run at the root of the repo there's a docker-compose sample file and a config.toml sample file. Once configured with appropriate data to start the service just issue the following command: |
||||
|
||||
``` |
||||
docker-compose up -d |
||||
``` |
@ -0,0 +1,317 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/hex" |
||||
"strings" |
||||
"time" |
||||
|
||||
rgw "github.com/myENA/radosgwadmin" |
||||
rcl "github.com/myENA/restclient" |
||||
"github.com/prometheus/client_golang/prometheus" |
||||
"github.com/remeh/sizedwaitgroup" |
||||
datamodels "gitlab.com/cyclops-utilities/datamodels" |
||||
udrModels "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/udr/models" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
var ( |
||||
collector = "Objects" |
||||
objects = "object" |
||||
collectionStart int64 |
||||
) |
||||
|
||||
// collect handles the process of retrieving the information from the system.
|
||||
func collect() { |
||||
|
||||
l.Trace.Printf("[COLLECTION] The collection process has been started.\n") |
||||
|
||||
collectionStart = time.Now().UnixNano() |
||||
|
||||
metricTime.With(prometheus.Labels{"type": "Collection Start Time"}).Set(float64(collectionStart)) |
||||
|
||||
// Here comes the logic to retrieve the information from the system.
|
||||
rcfg := &rgw.Config{ |
||||
AdminPath: cfg.RGW.AdminPath, |
||||
AccessKeyID: cfg.RGW.AccessKeyID, |
||||
ClientConfig: rcl.ClientConfig{ |
||||
ClientTimeout: rcl.Duration(time.Second * 30), |
||||
}, |
||||
SecretAccessKey: cfg.RGW.SecretAccessKey, |
||||
ServerURL: cfg.RGW.ServerURL, |
||||
} |
||||
|
||||
a, e := rgw.NewAdminAPI(rcfg) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[COLLECTION] Could not authenticate with RadosGW. Not obtaining data for this period. Error: %v\n", e.Error()) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
users, e := a.MListUsers(context.Background()) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[COLLECTION] Error reading project lists from RGW. Not obtaining data for this period. Error: %v\n", e.Error()) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
apiCount := 0 |
||||
dropCount := 0 |
||||
|
||||
swg := sizedwaitgroup.New(8) |
||||
|
||||
// fmt.Printf("Users = %v\n", users)
|
||||
for _, user := range users { |
||||
|
||||
// Goroutines start
|
||||
swg.Add() |
||||
go func(u string) { |
||||
|
||||
defer swg.Done() |
||||
|
||||
// filter out users which are not Openstack projects
|
||||
if filterOpenstackProject(u) { |
||||
|
||||
// get all buckets for given user
|
||||
bucketList, e := a.BucketList(context.Background(), u) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[COLLECTION] Error reading bucket list for user [ %v ] from RGW. Error: %v\n", u, e.Error()) |
||||
|
||||
} |
||||
|
||||
// l.Info.Printf("bucketlist = %v\n", bucketList)
|
||||
|
||||
totalBuckets := len(bucketList) |
||||
aggregateBucketSize := float64(0.0) |
||||
|
||||
apiCount += len(bucketList) |
||||
|
||||
for _, b := range bucketList { |
||||
|
||||
bucketStats, e := a.BucketStats(context.Background(), u, b) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[COLLECTION] Error reading bucket list for user [ %v ] from RGW. Error: %v\n", u, e.Error()) |
||||
|
||||
} |
||||
|
||||
if len(bucketStats) == 0 { |
||||
|
||||
l.Warning.Printf("[COLLECTION] The length of bucket stats = 0 for the bucket [ %v ]\n", b) |
||||
|
||||
} else { |
||||
|
||||
d := datamodels.JSONdb{ |
||||
"region": cfg.RGW.Region, |
||||
"bucket": b, |
||||
} |
||||
|
||||
// the Usage struct contains support for a number of different RGWs...
|
||||
// RGWMain, RGWShadow and a couple of others; here we just take
|
||||
// RGWMain as the base - if it's null, we don't publish anything to UDR
|
||||
// RGWMain contains SizeKbActual and SizeKb - SizeKBActual is used here;
|
||||
// this is the usage as perceived by the user, SizeKb is a bit larger with
|
||||
// the delta dependent on the basic minimum object size allocation
|
||||
if bucketStats[0].Usage.RGWMain != nil { |
||||
|
||||
usage := float64(bucketStats[0].Usage.RGWMain.SizeKb) |
||||
|
||||
if usage != 0 { |
||||
|
||||
// Here comes the transformation of the information retrieved into either
|
||||
// events or usage reports to be sent.
|
||||
usageReport := udrModels.Usage{ |
||||
Account: u, |
||||
Metadata: d, |
||||
ResourceType: "objectstorage", |
||||
Time: time.Now().Unix(), |
||||
Unit: "GB", |
||||
Usage: float64((usage / float64(1024)) / float64(1024)), |
||||
} |
||||
|
||||
report(usageReport) |
||||
|
||||
aggregateBucketSize += usageReport.Usage |
||||
|
||||
} |
||||
|
||||
} else { |
||||
|
||||
l.Warning.Printf("[COLLECTION] There's no information for RGWMain for the bucket [ %v ]. Ignoring...\n", b) |
||||
|
||||
dropCount++ |
||||
|
||||
} |
||||
} |
||||
} |
||||
|
||||
l.Info.Printf("[COLLECTION] Wrote data for user [ %v ]. AggregateBucketSize [ %v ] distributed over [ %v ] buckets.\n", u, aggregateBucketSize, totalBuckets) |
||||
|
||||
} |
||||
|
||||
}(user) |
||||
|
||||
} |
||||
|
||||
swg.Wait() |
||||
|
||||
metricCount.With(prometheus.Labels{"type": "Total Objects reported by OS API"}).Set(float64(apiCount)) |
||||
|
||||
metricCount.With(prometheus.Labels{"type": "Total Objects DROPPED due to missing information"}).Set(float64(dropCount)) |
||||
|
||||
metricTime.With(prometheus.Labels{"type": "Collection Processing Time"}).Set(float64(time.Now().UnixNano()-collectionStart) / float64(time.Millisecond)) |
||||
|
||||
l.Warning.Printf("[COLLECTION] Completed.\n OS Report: %v\n, Dropped: %v\n, Processing Time: %v[ms]\n", apiCount, dropCount, float64(time.Now().UnixNano()-collectionStart)/float64(time.Millisecond)) |
||||
|
||||
l.Trace.Printf("[COLLECTION] The collection process has been finished.\n") |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
// filterOpenstackProject checks if the user name looks like an Openstack
|
||||
// project.
|
||||
// Parameters:
|
||||
// - p: string representing the user.
|
||||
// Returns:
|
||||
// - a bool with the result of the check.
|
||||
func filterOpenstackProject(p string) bool { |
||||
|
||||
_, e := hex.DecodeString(p) |
||||
|
||||
return (len(p) == 32) && (e == nil) |
||||
} |
||||
|
||||
// getStatus job is to normalize the event state returned by the collectors.
|
||||
// Parameters:
|
||||
// - state: string returned by the system.
|
||||
// Returns:
|
||||
// - status: normalized state to be returned.
|
||||
func getStatus(state string) (status string) { |
||||
|
||||
switch strings.ToUpper(state) { |
||||
|
||||
case "ACTIVE": |
||||
|
||||
status = "active" |
||||
|
||||
case "ATTACHING": |
||||
|
||||
status = "active" |
||||
|
||||
case "AVAILABLE": |
||||
|
||||
status = "active" |
||||
|
||||
case "BUILD": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "CREATING": |
||||
|
||||
status = "active" |
||||
|
||||
case "DELETED": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "DELETING": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "DETACHING": |
||||
|
||||
status = "active" |
||||
|
||||
case "DOWN": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "ERROR": |
||||
|
||||
status = "error" |
||||
|
||||
case "ERROR_DELETING": |
||||
|
||||
status = "error" |
||||
|
||||
case "EXTENDING": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "HARD_DELETED": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "IN-USE": |
||||
|
||||
status = "active" |
||||
|
||||
case "MAINTENANCE": |
||||
|
||||
status = "active" |
||||
|
||||
case "PAUSED": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "RESCUED": |
||||
|
||||
status = "active" |
||||
|
||||
case "RESIZE": |
||||
|
||||
status = "active" |
||||
|
||||
case "RESIZED": |
||||
|
||||
status = "active" |
||||
|
||||
case "RESERVED": |
||||
|
||||
status = "active" |
||||
|
||||
case "RETYPING": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "SHUTOFF": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "SOFT_DELETED": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "STOPPED": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "SUSPENDED": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "TERMINATED": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "VERIFY_RESIZE": |
||||
|
||||
status = "active" |
||||
|
||||
} |
||||
|
||||
l.Trace.Printf("[REPORT] State received from the system [ %v ] normalized to [ %v ]", state, status) |
||||
|
||||
return status |
||||
|
||||
} |
@ -0,0 +1,246 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"strings" |
||||
|
||||
"github.com/spf13/viper" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
// The following structs are part of the configuration struct which
|
||||
// acts as the main reference for configuration parameters in the system.
|
||||
type apiKey struct { |
||||
Enabled bool |
||||
Key string |
||||
Place string |
||||
Token string |
||||
} |
||||
|
||||
type configuration struct { |
||||
APIKey apiKey |
||||
General generalConfig |
||||
Heappe heappeConfig |
||||
Kafka kafkaConfig |
||||
Keycloak keycloakConfig |
||||
Lieutenant lieutenantConfig |
||||
OpenStack openStackConfig |
||||
Prometheus prometheusConfig |
||||
RGW rgwConfig |
||||
NameFilters []string |
||||
ProjectFilters []string |
||||
Services map[string]string |
||||
} |
||||
|
||||
type generalConfig struct { |
||||
InsecureSkipVerify bool |
||||
LogFile string |
||||
LogLevel string |
||||
LogToConsole bool |
||||
ObjectsPeriodicity int |
||||
Periodicity int |
||||
PrometheusPeriodicity int |
||||
} |
||||
|
||||
type heappeConfig struct { |
||||
Username string |
||||
Password string |
||||
GroupResourceUsageReportURI string |
||||
AuthenticateUserPasswordURI string |
||||
} |
||||
|
||||
type kafkaConfig struct { |
||||
Brokers []string |
||||
MaxBytes int |
||||
MinBytes int |
||||
Offset int64 |
||||
Partition int |
||||
TLSEnabled bool |
||||
TopicUDR string |
||||
TopicEEngine string |
||||
} |
||||
|
||||
type keycloakConfig struct { |
||||
ClientID string `json:"client_id"` |
||||
ClientSecret string `json:"client_secret"` |
||||
Enabled bool `json:"enabled"` |
||||
Host string `json:"host"` |
||||
Port int `json:"port"` |
||||
Realm string `json:"realm"` |
||||
RedirectURL string `json:"redirect_url"` |
||||
UseHTTP bool `json:"use_http"` |
||||
} |
||||
|
||||
type lieutenantConfig struct { |
||||
Host string |
||||
Token string |
||||
} |
||||
|
||||
type openStackConfig struct { |
||||
Domain string |
||||
Keystone string |
||||
Password string |
||||
Project string |
||||
Region string |
||||
User string |
||||
} |
||||
|
||||
type prometheusConfig struct { |
||||
Host string |
||||
MetricsExport bool |
||||
MetricsPort string |
||||
MetricsRoute string |
||||
} |
||||
|
||||
type rgwConfig struct { |
||||
AccessKeyID string |
||||
AdminPath string |
||||
Region string |
||||
SecretAccessKey string |
||||
ServerURL string |
||||
} |
||||
|
||||
// dumpConfig 's job is to dumps the configuration in JSON format to the log
|
||||
// system. It makes use of the masking function to keep some secrecy in the log.
|
||||
// Parameters:
|
||||
// - c: configuration type containing the config present in the system.
|
||||
func dumpConfig(c configuration) { |
||||
cfgCopy := c |
||||
|
||||
// deal with configuration params that should be masked
|
||||
cfgCopy.APIKey.Token = masked(c.APIKey.Token, 4) |
||||
cfgCopy.Heappe.Username = masked(c.Heappe.Username, 4) |
||||
cfgCopy.Heappe.Password = masked(c.Heappe.Password, 4) |
||||
cfgCopy.Keycloak.ClientSecret = masked(c.Keycloak.ClientSecret, 4) |
||||
cfgCopy.Lieutenant.Token = masked(c.Lieutenant.Token, 4) |
||||
cfgCopy.OpenStack.Password = masked(c.OpenStack.Password, 4) |
||||
cfgCopy.RGW.AccessKeyID = masked(c.RGW.AccessKeyID, 4) |
||||
cfgCopy.RGW.SecretAccessKey = masked(c.RGW.SecretAccessKey, 4) |
||||
|
||||
// mmrshalindent creates a string containing newlines; each line starts with
|
||||
// two spaces and two spaces are added for each indent...
|
||||
configJSON, _ := json.MarshalIndent(cfgCopy, " ", " ") |
||||
|
||||
l.Info.Printf("[CONFIG] Configuration settings:\n") |
||||
l.Info.Printf("%v\n", string(configJSON)) |
||||
|
||||
} |
||||
|
||||
// masked 's job is to return asterisks in place of the characters in a
|
||||
// string with the exception of the last indicated.
|
||||
// Parameters:
|
||||
// - s: string to be masked
|
||||
// - unmaskedChars: int with the amount (counting from the end of the string) of
|
||||
// characters to keep unmasked.
|
||||
// Returns:
|
||||
// - returnString: the s string passed as parameter masked.
|
||||
func masked(s string, unmaskedChars int) (returnString string) { |
||||
|
||||
if len(s) <= unmaskedChars { |
||||
|
||||
returnString = s |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
asteriskString := strings.Repeat("*", (len(s) - unmaskedChars)) |
||||
returnString = asteriskString + string(s[len(s)-unmaskedChars:]) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
// parseConfig handles the filling of the config struct with the data Viper gets
|
||||
// from the configuration file.
|
||||
// Returns:
|
||||
// - c: the configuration struct filled with the relevant parsed configuration.
|
||||
func parseConfig() (c configuration) { |
||||
|
||||
l.Trace.Printf("[CONFIG] Retrieving configuration.\n") |
||||
|
||||
c = configuration{ |
||||
|
||||
APIKey: apiKey{ |
||||
Enabled: viper.GetBool("apikey.enabled"), |
||||
Key: viper.GetString("apikey.key"), |
||||
Place: viper.GetString("apikey.place"), |
||||
Token: viper.GetString("apikey.token"), |
||||
}, |
||||
|
||||
General: generalConfig{ |
||||
InsecureSkipVerify: viper.GetBool("general.insecureskipverify"), |
||||
LogFile: viper.GetString("general.logfile"), |
||||
LogLevel: viper.GetString("general.loglevel"), |
||||
LogToConsole: viper.GetBool("general.logtoconsole"), |
||||
ObjectsPeriodicity: viper.GetInt("general.objectsperiodicity"), |
||||
Periodicity: viper.GetInt("general.periodicity"), |
||||
PrometheusPeriodicity: viper.GetInt("general.prometheusperiodicity"), |
||||
}, |
||||
|
||||
Heappe: heappeConfig{ |
||||
Username: viper.GetString("heappe.username"), |
||||
Password: viper.GetString("heappe.password"), |
||||
GroupResourceUsageReportURI: viper.GetString("heappe.groupResourceUsageReportUri"), |
||||
AuthenticateUserPasswordURI: viper.GetString("heappe.authenticateUserPasswordUri"), |
||||
}, |
||||
|
||||
Kafka: kafkaConfig{ |
||||
Brokers: viper.GetStringSlice("kafka.brokers"), |
||||
MaxBytes: viper.GetInt("kafka.sizemax"), |
||||
MinBytes: viper.GetInt("kafka.sizemin"), |
||||
Offset: viper.GetInt64("kafka.offset"), |
||||
Partition: viper.GetInt("kafka.partition"), |
||||
TLSEnabled: viper.GetBool("kafka.tlsenabled"), |
||||
TopicUDR: viper.GetString("kafka.topicudr"), |
||||
TopicEEngine: viper.GetString("kafka.topiceengine"), |
||||
}, |
||||
|
||||
Keycloak: keycloakConfig{ |
||||
ClientID: viper.GetString("keycloak.clientid"), |
||||
ClientSecret: viper.GetString("keycloak.clientsecret"), |
||||
Enabled: viper.GetBool("keycloak.enabled"), |
||||
Host: viper.GetString("keycloak.host"), |
||||
Port: viper.GetInt("keycloak.port"), |
||||
Realm: viper.GetString("keycloak.realm"), |
||||
RedirectURL: viper.GetString("keycloak.redirecturl"), |
||||
UseHTTP: viper.GetBool("keycloak.usehttp"), |
||||
}, |
||||
|
||||
Lieutenant: lieutenantConfig{ |
||||
Host: viper.GetString("lieutenant.host"), |
||||
Token: viper.GetString("lieutenant.token"), |
||||
}, |
||||
|
||||
OpenStack: openStackConfig{ |
||||
Domain: viper.GetString("openstack.domain"), |
||||
Keystone: viper.GetString("openstack.keystone"), |
||||
Password: viper.GetString("openstack.password"), |
||||
Project: viper.GetString("openstack.project"), |
||||
Region: viper.GetString("openstack.region"), |
||||
User: viper.GetString("openstack.user"), |
||||
}, |
||||
|
||||
Prometheus: prometheusConfig{ |
||||
Host: viper.GetString("prometheus.host"), |
||||
MetricsExport: viper.GetBool("prometheus.metricsexport"), |
||||
MetricsPort: viper.GetString("prometheus.metricsport"), |
||||
MetricsRoute: viper.GetString("prometheus.metricsroute"), |
||||
}, |
||||
|
||||
RGW: rgwConfig{ |
||||
AccessKeyID: viper.GetString("rgw.accesskey"), |
||||
AdminPath: viper.GetString("rgw.adminpath"), |
||||
Region: viper.GetString("rgw.region"), |
||||
SecretAccessKey: viper.GetString("rgw.secretaccesskey"), |
||||
ServerURL: viper.GetString("rgw.serverurl"), |
||||
}, |
||||
|
||||
NameFilters: viper.GetStringSlice("events.namefilters"), |
||||
ProjectFilters: viper.GetStringSlice("events.projectfilters"), |
||||
Services: viper.GetStringMapString("services"), |
||||
} |
||||
|
||||
return |
||||
|
||||
} |
@ -0,0 +1,137 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"crypto/tls" |
||||
"encoding/json" |
||||
"strconv" |
||||
"time" |
||||
|
||||
"github.com/prometheus/client_golang/prometheus" |
||||
"github.com/segmentio/kafka-go" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
type kafkaHandlerConf struct { |
||||
out []kafkaPackage |
||||
} |
||||
|
||||
type kafkaPackage struct { |
||||
topic string |
||||
partition int |
||||
channel chan interface{} |
||||
} |
||||
|
||||
// kafkaHandler job is to check the config that it receives and initialize the
|
||||
// go rutines necesaries to satisfay the configuration it receives.
|
||||
// Paramenters:
|
||||
// - kH: kafkaHandlerConf struct with the specific configuration used by the
|
||||
// service.
|
||||
func kafkaHandler(kH kafkaHandlerConf) { |
||||
|
||||
l.Trace.Printf("[KAFKA] Initializing the receivers/senders according to the provided configuration.\n") |
||||
|
||||
if kH.out != nil { |
||||
|
||||
for _, p := range kH.out { |
||||
|
||||
go kafkaSender(p.topic, p.partition, p.channel) |
||||
|
||||
} |
||||
|
||||
} |
||||
} |
||||
|
||||
// kafkaSender is the abstracted interface handling the sending of data through
|
||||
// kafka topics.
|
||||
// Paramenters:
|
||||
// - t: string containing the kafka-topic in use.
|
||||
// - p: int containing the kafka-topic partition.
|
||||
// - c: interface{} channel to receive the data that will be marshalled into
|
||||
// JSON and then transmitted via kafka.
|
||||
func kafkaSender(t string, p int, c chan interface{}) { |
||||
|
||||
l.Trace.Printf("[KAFKA] Initializing kafka sender for topic: %v.\n", t) |
||||
|
||||
conf := kafka.WriterConfig{ |
||||
Brokers: cfg.Kafka.Brokers, |
||||
Topic: t, |
||||
Balancer: &kafka.LeastBytes{}, |
||||
} |
||||
|
||||
if cfg.Kafka.TLSEnabled { |
||||
|
||||
dialer := &kafka.Dialer{ |
||||
Timeout: 10 * time.Second, |
||||
DualStack: true, |
||||
TLS: &tls.Config{ |
||||
MinVersion: tls.VersionTLS12, |
||||
CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, |
||||
PreferServerCipherSuites: true, |
||||
InsecureSkipVerify: cfg.General.InsecureSkipVerify, |
||||
}, |
||||
} |
||||
|
||||
conf.Dialer = dialer |
||||
|
||||
} |
||||
|
||||
w := kafka.NewWriter(conf) |
||||
defer w.Close() |
||||
|
||||
for { |
||||
|
||||
v, ok := <-c |
||||
|
||||
if !ok { |
||||
|
||||
metricReporting.With(prometheus.Labels{"topic": t, "state": "FAIL", "reason": "Go Channel Problems"}).Inc() |
||||
|
||||
break |
||||
|
||||
} |
||||
|
||||
go func() { |
||||
|
||||
m, e := json.Marshal(&v) |
||||
|
||||
if e == nil { |
||||
|
||||
l.Info.Printf("[KAFKA] Object received through the channel. Starting its processing.\n") |
||||
|
||||
err := w.WriteMessages(context.Background(), |
||||
kafka.Message{ |
||||
Key: []byte(t + "-" + strconv.Itoa(p)), |
||||
Value: m, |
||||
}, |
||||
) |
||||
|
||||
if err != nil { |
||||
|
||||
l.Warning.Printf("[KAFKA] There was a problem when sending the record through the stream. Error: %v\n", err) |
||||
|
||||
metricReporting.With(prometheus.Labels{"topic": t, "state": "FAIL", "reason": "Kafka Stream Problems"}).Inc() |
||||
|
||||
} else { |
||||
|
||||
l.Info.Printf("[KAFKA] Object added to the stream succesfully. Topic: %v.\n", t) |
||||
|
||||
metricReporting.With(prometheus.Labels{"topic": t, "state": "OK", "reason": "Object sent"}).Inc() |
||||
|
||||
} |
||||
|
||||
} else { |
||||
|
||||
l.Warning.Printf("[KAFKA] The information to be sent into the stream cannot be marshalled, please check with the administrator. Error: %v\n", e) |
||||
|
||||
metricReporting.With(prometheus.Labels{"topic": t, "state": "FAIL", "reason": "JSON Marshalling"}).Inc() |
||||
|
||||
} |
||||
|
||||
return |
||||
|
||||
}() |
||||
|
||||
} |
||||
|
||||
} |
@ -0,0 +1,188 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"flag" |
||||
"fmt" |
||||
"net/url" |
||||
"os" |
||||
"reflect" |
||||
"time" |
||||
|
||||
httptransport "github.com/go-openapi/runtime/client" |
||||
"github.com/spf13/viper" |
||||
cusClient "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/customerdb/client" |
||||
eeClient "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/eventsengine/client" |
||||
eeModels "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/eventsengine/models" |
||||
udrModels "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/udr/models" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
var ( |
||||
version string |
||||
cfg configuration |
||||
pipeU chan interface{} |
||||
pipeE chan interface{} |
||||
reportClient *eeClient.EventEngineManagementAPI |
||||
zombiesClient *cusClient.CustomerDatabaseManagement |
||||
) |
||||
|
||||
// kafkaStart handles the initialization of the kafka service.
|
||||
// This is a sample function with the most basic usage of the kafka service, it
|
||||
// should be redefined to match the needs of the service.
|
||||
// Returns:
|
||||
// - ch: a interface{} channel to be able to send things through the kafka topic
|
||||
// generated.
|
||||
func kafkaStart() (chUDR, chEvents chan interface{}) { |
||||
|
||||
l.Trace.Printf("[MAIN] Intializing Kafka\n") |
||||
|
||||
chUDR = make(chan interface{}, 1000) |
||||
chEvents = make(chan interface{}, 1000) |
||||
|
||||
handler := kafkaHandlerConf{ |
||||
out: []kafkaPackage{ |
||||
{ |
||||
topic: cfg.Kafka.TopicUDR, |
||||
channel: chUDR, |
||||
}, |
||||
{ |
||||
topic: cfg.Kafka.TopicEEngine, |
||||
channel: chEvents, |
||||
}, |
||||
}, |
||||
} |
||||
|
||||
kafkaHandler(handler) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
// report handles the process of sending the event or usage to the respective
|
||||
// service.
|
||||
// Parameters:
|
||||
// - object: an interface{} reference with the event/usage to be sent.
|
||||
func report(object interface{}) { |
||||
|
||||
l.Trace.Printf("[REPORT] The reporting process has been started.\n") |
||||
|
||||
if reflect.TypeOf(object) == reflect.TypeOf(udrModels.Usage{}) { |
||||
|
||||
l.Trace.Printf("[REPORT] UDR Object detected. Sending through kafka.\n") |
||||
|
||||
pipeU <- object |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
if reflect.TypeOf(object) == reflect.TypeOf(eeModels.Event{}) { |
||||
|
||||
l.Trace.Printf("[REPORT] Event Object detected. Sending through kafka.\n") |
||||
|
||||
pipeE <- object |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
fail := "the provided object doesn't belong to UDR or EE models" |
||||
|
||||
l.Warning.Printf("[REPORT] Something went wrong while processing the object, check with the administrator. Error: %v.\n", fail) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
func init() { |
||||
|
||||
confFile := flag.String("conf", "./config", "configuration file path (without toml extension)") |
||||
|
||||
flag.Parse() |
||||
|
||||
//placeholder code as the default value will ensure this situation will never arise
|
||||
if len(*confFile) == 0 { |
||||
|
||||
fmt.Printf("Usage: Collector-TYPE -conf=/path/to/configuration/file\n") |
||||
|
||||
os.Exit(0) |
||||
|
||||
} |
||||
|
||||
// err := gcfg.ReadFileInto(&cfg, *confFile)
|
||||
viper.SetConfigName(*confFile) // name of config file (without extension)
|
||||
viper.SetConfigType("toml") |
||||
viper.AddConfigPath(".") // path to look for the config file in
|
||||
|
||||
err := viper.ReadInConfig() // Find and read the config file
|
||||
|
||||
if err != nil { |
||||
|
||||
// TODO(murp) - differentiate between file not found and formatting error in
|
||||
// config file)
|
||||
fmt.Printf("[MAIN] Failed to parse configuration data: %s\nCorrect usage: Collector-TYPE -conf=/path/to/configuration/file\n", err) |
||||
|
||||
os.Exit(1) |
||||
|
||||
} |
||||
|
||||
cfg = parseConfig() |
||||
|
||||
e := l.InitLogger(cfg.General.LogFile, cfg.General.LogLevel, cfg.General.LogToConsole) |
||||
|
||||
if e != nil { |
||||
|
||||
fmt.Printf("[MAIN] Initialization of the logger failed. Error: %v\n", e) |
||||
|
||||
} |
||||
|
||||
l.Info.Printf("Cyclops Labs Collector TYPE version %v initialized\n", version) |
||||
|
||||
dumpConfig(cfg) |
||||
|
||||
// Let's start the HTTP Server and Gauges for Prometheus
|
||||
prometheusStart() |
||||
|
||||
} |
||||
|
||||
func main() { |
||||
|
||||
// If needed here is the initialization for the kafka sender:
|
||||
pipeU, pipeE = kafkaStart() |
||||
|
||||
// Here we start the client instantiation to send reports to the EventsEngine.
|
||||
eeConfig := eeClient.Config{ |
||||
URL: &url.URL{ |
||||
Host: cfg.Services["eventsengine"], |
||||
Path: eeClient.DefaultBasePath, |
||||
Scheme: "http", |
||||
}, |
||||
AuthInfo: httptransport.APIKeyAuth(cfg.APIKey.Key, cfg.APIKey.Place, cfg.APIKey.Token), |
||||
} |
||||
|
||||
reportClient = eeClient.New(eeConfig) |
||||
|
||||
// Here we start the client instantiation to get the canceled customers to check for zombies.
|
||||
cusConfig := cusClient.Config{ |
||||
URL: &url.URL{ |
||||
Host: cfg.Services["customerdb"], |
||||
Path: cusClient.DefaultBasePath, |
||||
Scheme: "http", |
||||
}, |
||||
AuthInfo: httptransport.APIKeyAuth(cfg.APIKey.Key, cfg.APIKey.Place, cfg.APIKey.Token), |
||||
} |
||||
|
||||
zombiesClient = cusClient.New(cusConfig) |
||||
|
||||
// Let's lunch the first collection process..
|
||||
go collect() |
||||
|
||||
// cfg.General.Periodicity should be changed to cfg.General.ObjectPeriodicity
|
||||
// in the case you need the long (8h) periodicity.
|
||||
for range time.NewTicker(time.Duration(cfg.General.Periodicity) * time.Minute).C { |
||||
|
||||
go collect() |
||||
|
||||
} |
||||
|
||||
} |
@ -0,0 +1,95 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"log" |
||||
"net/http" |
||||
|
||||
"github.com/prometheus/client_golang/prometheus" |
||||
"github.com/prometheus/client_golang/prometheus/promhttp" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
var ( |
||||
metricReporting *prometheus.GaugeVec |
||||
metricCollection *prometheus.GaugeVec |
||||
metricTime *prometheus.GaugeVec |
||||
metricCount *prometheus.GaugeVec |
||||
) |
||||
|
||||
func prometheusStart() { |
||||
|
||||
reg := prometheus.NewPedanticRegistry() |
||||
|
||||
metricReporting = prometheus.NewGaugeVec( |
||||
prometheus.GaugeOpts{ |
||||
Namespace: "CYCLOPS", |
||||
Subsystem: collector + "_Collector", |
||||
Name: "kafka_send_state", |
||||
Help: "Reporting information and Kafka topics usage", |
||||
}, |
||||
[]string{ |
||||
"reason", |
||||
"state", |
||||
"topic", |
||||
}, |
||||
) |
||||
|
||||
metricCollection = prometheus.NewGaugeVec( |
||||
prometheus.GaugeOpts{ |
||||
Namespace: "CYCLOPS", |
||||
Subsystem: collector + "_Collector", |
||||
Name: "Collection", |
||||
Help: "Collection information and usages data", |
||||
}, |
||||
[]string{ |
||||
"account", |
||||
"event", |
||||
"reason", |
||||
"state", |
||||
"type", |
||||
}, |
||||
) |
||||
|
||||
metricTime = prometheus.NewGaugeVec( |
||||
prometheus.GaugeOpts{ |
||||
Namespace: "CYCLOPS", |
||||
Subsystem: collector + "_Collector", |
||||
Name: "collection_time", |
||||
Help: "Different timing metrics", |
||||
}, |
||||
[]string{ |
||||
"type", |
||||
}, |
||||
) |
||||
|
||||
metricCount = prometheus.NewGaugeVec( |
||||
prometheus.GaugeOpts{ |
||||
Namespace: "CYCLOPS", |
||||
Subsystem: collector + "_Collector", |
||||
Name: objects + "_count", |
||||
Help: "Different VM Counts", |
||||
}, |
||||
[]string{ |
||||
"type", |
||||
}, |
||||
) |
||||
|
||||
reg.MustRegister(metricReporting, metricCollection, metricTime, metricCount) |
||||
//prometheus.MustRegister(metricReporting, metricCollection)
|
||||
|
||||
l.Trace.Printf("[Prometheus] Starting to serve the metrics.\n") |
||||
|
||||
go func() { |
||||
|
||||
if cfg.Prometheus.MetricsExport { |
||||
|
||||
//http.Handle(cfg.Prometheus.MetricsRoute, promhttp.Handler())
|
||||
http.Handle(cfg.Prometheus.MetricsRoute, promhttp.HandlerFor(reg, promhttp.HandlerOpts{})) |
||||
|
||||
go log.Fatal(http.ListenAndServe(":"+cfg.Prometheus.MetricsPort, nil)) |
||||
|
||||
} |
||||
|
||||
}() |
||||
|
||||
} |
@ -0,0 +1,35 @@ |
||||
module github.com/Cyclops-Labs/cyclops-4-hpc.git/cyclops-collectors/objects-collector |
||||
|
||||
go 1.13 |
||||
|
||||
require ( |
||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect |
||||
github.com/go-openapi/runtime v0.21.0 |
||||
github.com/go-openapi/strfmt v0.21.1 // indirect |
||||
github.com/go-playground/universal-translator v0.18.0 // indirect |
||||
github.com/golang/protobuf v1.5.2 // indirect |
||||
github.com/google/go-querystring v1.1.0 // indirect |
||||
github.com/leodido/go-urn v1.2.1 // indirect |
||||
github.com/magiconair/properties v1.8.5 // indirect |
||||
github.com/mailru/easyjson v0.7.7 // indirect |
||||
github.com/mitchellh/mapstructure v1.4.3 // indirect |
||||
github.com/myENA/radosgwadmin v0.1.0 |
||||
github.com/myENA/restclient v1.0.5 |
||||
github.com/prometheus/client_golang v1.11.0 |
||||
github.com/remeh/sizedwaitgroup v1.0.0 |
||||
github.com/segmentio/kafka-go v0.4.25 |
||||
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9 // indirect |
||||
github.com/spf13/afero v1.6.0 // indirect |
||||
github.com/spf13/viper v1.9.0 |
||||
github.com/spkg/bom v1.0.0 // indirect |
||||
gitlab.com/cyclops-utilities/datamodels v0.0.0-20191016132854-e9313e683e5b |
||||
github.com/Cyclops-Labs/cyclops-4-hpc.git/services/customerdb v0.0.1 |
||||
github.com/Cyclops-Labs/cyclops-4-hpc.git/services/eventsengine v0.0.1 |
||||
github.com/Cyclops-Labs/cyclops-4-hpc.git/services/udr v0.0.1 |
||||
gitlab.com/cyclops-utilities/logging v0.0.0-20200914110347-ca1d02efd346 |
||||
go.mongodb.org/mongo-driver v1.8.0 // indirect |
||||
golang.org/x/net v0.0.0-20211201190559-0a0e4e1bb54c // indirect |
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 // indirect |
||||
gopkg.in/go-playground/validator.v9 v9.31.0 // indirect |
||||
gopkg.in/ini.v1 v1.66.1 // indirect |
||||
) |
@ -0,0 +1 @@ |
||||
DUMMY FILE! |
@ -0,0 +1,93 @@ |
||||
# Welcome to the configuration file for this |
||||
# |
||||
# ██████╗██╗ ██╗ ██████╗██╗ ██████╗ ██████╗ ███████╗ |
||||
# ██╔════╝╚██╗ ██╔╝██╔════╝██║ ██╔═══██╗██╔══██╗██╔════╝ |
||||
# ██║ ╚████╔╝ ██║ ██║ ██║ ██║██████╔╝███████╗ |
||||
# ██║ ╚██╔╝ ██║ ██║ ██║ ██║██╔═══╝ ╚════██║ |
||||
# ╚██████╗ ██║ ╚██████╗███████╗╚██████╔╝██║ ███████║ |
||||
# ╚═════╝ ╚═╝ ╚═════╝╚══════╝ ╚═════╝ ╚═╝ ╚══════╝ |
||||
# |
||||
# ██╗ █████╗ ██████╗ ███████╗ |
||||
# ██║ ██╔══██╗██╔══██╗██╔════╝ |
||||
# ██║ ███████║██████╔╝███████╗ |
||||
# ██║ ██╔══██║██╔══██╗╚════██║ |
||||
# ███████╗██║ ██║██████╔╝███████║ |
||||
# ╚══════╝╚═╝ ╚═╝╚═════╝ ╚══════╝ |
||||
# |
||||
# collector! |
||||
|
||||
[APIKEY] |
||||
Enabled = true |
||||
Key = "X-API-KEY" |
||||
Place = "header" |
||||
Token = "1234567890abcdefghi" |
||||
|
||||
[EVENTS] |
||||
Filters = [ "filter1", "filter2", "filter3" ] |
||||
|
||||
[GENERAL] |
||||
LogFile = "" |
||||
LogToConsole = true |
||||
# loglevel values can be one of the following: TRACE, DEBUG, INFO, WARNING, ERROR |
||||
LogLevel = "TRACE" |
||||
ObjectsPeriodicity = 480 |
||||
Periodicity = 15 |
||||
PrometheusPeriodicity = 60 |
||||
|
||||
[HEAPPE] |
||||
Username = "" |
||||
Password = "" |
||||
GroupResourceUsageReportUri = "" |
||||
AuthenticateUserPasswordUri = "" |
||||
|
||||
[KAFKA] |
||||
Brokers = [ "broker-1-IP:broker-1-PORT", "broker-2-IP:broker-2-PORT", "broker-3-IP:broker-3-PORT" ] |
||||
# -1 for the most recent |
||||
# -2 for the first in the partition |
||||
# Anyother for a specific offset |
||||
Offset = "-1" |
||||
Partition = "0" |
||||
SizeMax = 10e6 |
||||
SizeMin = 10e3 |
||||
TLSEnabled = false |
||||
TopicEEngine = "Events" |
||||
TopicUDR = "UDR" |
||||
|
||||
[KEYCLOAK] |
||||
ClientID = "SERVICE" |
||||
ClientSecret = "00000000-0000-0000-0000-00000000" |
||||
Enabled = true |
||||
Host = "0.0.0.0" |
||||
Port = 8080 |
||||
Realm = "Development" |
||||
RedirectURL = "" |
||||
UseHttp = true |
||||
|
||||
[LIEUTENANT] |
||||
Host = "lieutenant:4010" |
||||
Token = "" |
||||
|
||||
[OPENSTACK] |
||||
Domain = "" |
||||
Keystone = "" |
||||
Password = "" |
||||
Project = "" |
||||
Region = "" |
||||
User = "" |
||||
|
||||
[PROMETHEUS] |
||||
Host = "prometheus:9000" |
||||
MetricsExport = true |
||||
MetricsPort = "9000" |
||||
MetricsRoute = "/metrics" |
||||
|
||||
[RGW] |
||||
AccessKey = "" |
||||
AdminPath = "" |
||||
Region = "" |
||||
SecretAccessKey = "" |
||||
ServerURL = "" |
||||
|
||||
[SERVICES] |
||||
CustomerDB = "localhost:8400" |
||||
EventsEngine = "localhost:8500" |
@ -0,0 +1,17 @@ |
||||
version: '3' |
||||
|
||||
services: |
||||
|
||||
collectors: |
||||
environment: |
||||
WAIT_AFTER_HOSTS: 30 |
||||
image: objects-collector:latest |
||||
networks: |
||||
- collectorsnet |
||||
restart: always |
||||
volumes: |
||||
- ${PWD}/config.toml:/config.toml |
||||
|
||||
networks: |
||||
collectorsnet: |
||||
driver: bridge |
@ -0,0 +1 @@ |
||||
DUMMY FILE! |
@ -0,0 +1,25 @@ |
||||
# SERVERS COLLECTOR |
||||
|
||||
OpenStack Servers/Instances collector written in go |
||||
|
||||
## How to build |
||||
|
||||
The building of the service is carried by a multistage docker build, we build everything in an image containing everything needed to build Golang applications and then the executable is moved to a new image that contains the bare minimum starting from the scratch image. |
||||
|
||||
Within the folder build at the root of the repo there's a script call start.sh, it's invokation admits "Dev" as parameter. Running the script with the parameter will use the local version in the repository as the base for the building of the service's docker image, however doing it without providing it will make the building of the service taking everything from sources. |
||||
|
||||
``` |
||||
./start.sh [Dev] |
||||
``` |
||||
|
||||
The initial branch used when building from sources is "master", it can be changed with a few other parameters by editing the script. |
||||
|
||||
Using the [Dev] optional argument will take the code present in the repo at the moment of invokation. |
||||
|
||||
## How to run |
||||
|
||||
Within the folder run at the root of the repo there's a docker-compose sample file and a config.toml sample file. Once configured with appropriate data to start the service just issue the following command: |
||||
|
||||
``` |
||||
docker-compose up -d |
||||
``` |
@ -0,0 +1,189 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"time" |
||||
|
||||
"github.com/gophercloud/gophercloud" |
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" |
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/images" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
type FlavorIDCacheData struct { |
||||
FlavorName string |
||||
LastRefreshed int64 |
||||
} |
||||
|
||||
type ImageIDCacheData struct { |
||||
ImageName string |
||||
OSFlavor string |
||||
LastRefreshed int64 |
||||
} |
||||
|
||||
var ( |
||||
FlavorCache map[string]FlavorIDCacheData |
||||
ImageCache map[string]ImageIDCacheData |
||||
) |
||||
|
||||
func getFromFlavorCache(client *gophercloud.ServiceClient, flavorid string) (flavorname string, e error) { |
||||
|
||||
var flavor *flavors.Flavor |
||||
|
||||
if FlavorCache[flavorid].LastRefreshed != 0 { |
||||
|
||||
//the cache entry exists
|
||||
l.Trace.Printf("[CACHE] Flavor cache entry found for key: %v.\n", flavorid) |
||||
|
||||
if time.Now().Unix()-FlavorCache[flavorid].LastRefreshed > 86400 { |
||||
|
||||
//invalidate the entry and get a fresh one
|
||||
l.Trace.Printf("[CACHE] Cache invalidation for key [ %v ], refreshing entry now.\n", flavorid) |
||||
|
||||
flavor, e = flavors.Get(client, flavorid).Extract() |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE] Error reading remote API response for flavor-id data. Error: %v\n", e) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
flavorname = flavor.Name |
||||
|
||||
FlavorCache[flavorid] = FlavorIDCacheData{ |
||||
flavor.Name, |
||||
time.Now().Unix(), |
||||
} |
||||
|
||||
l.Trace.Printf("[CACHE] Cache entry updated for key [ %v ]. New value returned.\n", flavorid) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
l.Trace.Printf("[CACHE] Cache entry value for key [ %v ] returned without refreshing.\n", flavorid) |
||||
|
||||
flavorname = FlavorCache[flavorid].FlavorName |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
l.Trace.Printf("[CACHE] Cache entry miss for key: %v\n.", flavorid) |
||||
|
||||
flavor, e = flavors.Get(client, flavorid).Extract() |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE] Error reading remote API response for flavor-id data. Error: %v ", e) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
flavorname = flavor.Name |
||||
|
||||
l.Trace.Printf("[CACHE] Going to add a new cache entry added for key: %v\n", flavorid) |
||||
|
||||
FlavorCache[flavorid] = FlavorIDCacheData{ |
||||
flavor.Name, |
||||
time.Now().Unix(), |
||||
} |
||||
|
||||
l.Trace.Printf("[CACHE] Cache entry added for key [ %v ]. New value returned.\n", flavorid) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
func getFromImageCache(client *gophercloud.ServiceClient, imageid string) (imagename string, osflavor string, e error) { |
||||
|
||||
var image *images.Image |
||||
|
||||
if ImageCache[imageid].LastRefreshed != 0 { |
||||
|
||||
//the cache entry exists
|
||||
l.Trace.Printf("[CACHE] Image cache entry found for image key: %v.\n", imageid) |
||||
|
||||
if time.Now().Unix()-ImageCache[imageid].LastRefreshed > 86400 { |
||||
|
||||
//invalidate the entry and get a fresh one
|
||||
l.Trace.Printf("[CACHE] Cache invalidation for image key [ %v ], refreshing entry now.\n", imageid) |
||||
|
||||
image, e = images.Get(client, imageid).Extract() |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE] Error reading remote API response for image data. Error: %v.\n", e) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
imagename = image.Name |
||||
|
||||
osflavor_test, exists := image.Metadata["os_flavor"] //image operating system flavor
|
||||
|
||||
if exists { |
||||
|
||||
osflavor = osflavor_test.(string) |
||||
|
||||
} |
||||
|
||||
ImageCache[imageid] = ImageIDCacheData{ |
||||
image.Name, |
||||
osflavor, |
||||
time.Now().Unix(), |
||||
} |
||||
|
||||
l.Trace.Printf("[CACHE] Cache entry updated for image key [ %v ]. New value returned.\n", imageid) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
l.Trace.Printf("[CACHE] Cache entry value for image key [ %v ] returned without refreshing.\n", imageid) |
||||
|
||||
imagename = ImageCache[imageid].ImageName |
||||
osflavor = ImageCache[imageid].OSFlavor |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
l.Trace.Printf("[CACHE] Cache entry miss for image key: %v.\n", imageid) |
||||
|
||||
image, e = images.Get(client, imageid).Extract() |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE] Error reading remote API response for image data. Error: %v.\n", e) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
imagename = image.Name |
||||
|
||||
osflavor_test, exists := image.Metadata["os_flavor"] //image operating system flavor
|
||||
|
||||
if exists { |
||||
|
||||
osflavor = osflavor_test.(string) |
||||
|
||||
} |
||||
|
||||
l.Trace.Printf("[CACHE] Going to add a new cache entry added for image key: " + imageid) |
||||
|
||||
ImageCache[imageid] = ImageIDCacheData{ |
||||
image.Name, |
||||
osflavor, |
||||
time.Now().Unix(), |
||||
} |
||||
|
||||
l.Trace.Printf("[CACHE] Cache entry added for image key: " + imageid + ". New value returned.") |
||||
|
||||
return |
||||
|
||||
} |
@ -0,0 +1,480 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"os" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/gophercloud/gophercloud" |
||||
"github.com/gophercloud/gophercloud/openstack" |
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers" |
||||
"github.com/gophercloud/gophercloud/pagination" |
||||
"github.com/prometheus/client_golang/prometheus" |
||||
datamodels "gitlab.com/cyclops-utilities/datamodels" |
||||
eeEvent "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/eventsengine/client/event_management" |
||||
eeModels "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/eventsengine/models" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
var ( |
||||
collector = "Servers" |
||||
objects = "vm" |
||||
collectionStart int64 |
||||
vmCount int |
||||
terminatedCount int |
||||
dropCount int |
||||
remotelist []*eeModels.MinimalState |
||||
client *gophercloud.ServiceClient |
||||
) |
||||
|
||||
// collect handles the process of retrieving the information from the system.
|
||||
func collect() { |
||||
|
||||
l.Trace.Printf("[COLLECTION] The collection process has been started.\n") |
||||
|
||||
collectionStart = time.Now().UnixNano() |
||||
|
||||
metricTime.With(prometheus.Labels{"type": "Collection Start Time"}).Set(float64(collectionStart)) |
||||
|
||||
// Here comes the logic to retrieve the information from the system.
|
||||
opts := gophercloud.AuthOptions{ |
||||
DomainName: cfg.OpenStack.Domain, |
||||
IdentityEndpoint: cfg.OpenStack.Keystone, |
||||
Password: cfg.OpenStack.Password, |
||||
Username: cfg.OpenStack.User, |
||||
} |
||||
|
||||
if len(cfg.OpenStack.Project) > 0 { |
||||
|
||||
opts.TenantName = cfg.OpenStack.Project |
||||
|
||||
} |
||||
|
||||
provider, e := openstack.AuthenticatedClient(opts) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Error.Printf("[COLLECTION] Error authenticating against OpenStack. Error: %v", e) |
||||
|
||||
os.Exit(1) |
||||
|
||||
} |
||||
|
||||
client, e = openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ |
||||
Region: cfg.OpenStack.Region, |
||||
}) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Error.Printf("[COLLECTION] Error creating compute collector client. Error: %v", e) |
||||
|
||||
os.Exit(1) |
||||
|
||||
} |
||||
|
||||
serveropts := servers.ListOpts{ |
||||
AllTenants: true, //set this to true to list VMs from all tenants if policy allows it
|
||||
} |
||||
|
||||
l.Trace.Printf("[COLLECTION] Querying events engine service for list of known and not terminated servers") |
||||
|
||||
resourceType := "server" |
||||
eeParams := eeEvent.NewListStatesParams().WithResource(&resourceType).WithRegion(&cfg.OpenStack.Region) |
||||
|
||||
ctx, _ := context.WithTimeout(context.Background(), 300*time.Second) |
||||
r, e := reportClient.EventManagement.ListStates(ctx, eeParams) |
||||
|
||||
// Clears the remotelist between runs
|
||||
remotelist = nil |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[COLLECTION] Something went wrong while retrieving the usage from the system, check with the administrator. Error: %v.\n", e) |
||||
|
||||
} else { |
||||
|
||||
remotelist = r.Payload |
||||
|
||||
} |
||||
|
||||
metricCount.With(prometheus.Labels{"type": "Total VMs from EventEngine"}).Set(float64(len(remotelist))) |
||||
|
||||
l.Trace.Printf("[COLLECTION] (BEFORE) Existing count of servers at remote [ %v ].\n", len(remotelist)) |
||||
|
||||
eeCount := len(remotelist) |
||||
|
||||
pager := servers.List(client, serveropts) |
||||
|
||||
vmCount = 0 |
||||
|
||||
e = pager.EachPage(extractPage) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Error.Printf("[COLLECTION] Error processing the lists of active resources. Error: %v\n", e) |
||||
|
||||
os.Exit(1) |
||||
|
||||
} |
||||
|
||||
metricCount.With(prometheus.Labels{"type": "Total VMs reported by OS API"}).Set(float64(vmCount)) |
||||
|
||||
metricCount.With(prometheus.Labels{"type": "Total VMs reported by OS API (to terminated state)"}).Set(float64(terminatedCount)) |
||||
|
||||
metricCount.With(prometheus.Labels{"type": "Total VMs DROPPED due to unknown flavor"}).Set(float64(dropCount)) |
||||
|
||||
l.Trace.Printf("[COLLECTION] (AFTER) Remaining count of servers at remote which were left unprocessed [ %v ].\n", len(remotelist)) |
||||
|
||||
//now for all remaining servers send terminated status
|
||||
for _, object := range remotelist { |
||||
|
||||
l.Trace.Printf("[COLLECTION] Sending terminated event for server [ %v ] for project [ %v ] with ID [ %v ].\n", object.ResourceID, object.ResourceName, object.Account) |
||||
|
||||
evTime := int64(time.Now().Unix()) |
||||
evLast := getStatus("terminated") |
||||
|
||||
// events reports to be sent.
|
||||
event := eeModels.Event{ |
||||
Account: object.Account, |
||||
EventTime: &evTime, |
||||
LastEvent: &evLast, |
||||
MetaData: object.MetaData, |
||||
Region: cfg.OpenStack.Region, |
||||
ResourceID: object.ResourceID, |
||||
ResourceName: object.ResourceName, |
||||
ResourceType: "server", |
||||
} |
||||
|
||||
report(event) |
||||
|
||||
} |
||||
|
||||
metricCount.With(prometheus.Labels{"type": "Total VMs forcefully TERMINATED"}).Set(float64(len(remotelist))) |
||||
|
||||
metricTime.With(prometheus.Labels{"type": "Collection Processing Time"}).Set(float64(time.Now().UnixNano()-collectionStart) / float64(time.Millisecond)) |
||||
|
||||
l.Warning.Printf("[COLLECTION] Completed.\n - OS Report: %v\n - EE Report: %v\n - Droped: %v\n - OS Terminated: %v\n - Forced Termination: %v\n - Processing Time: %v[ms]\n", vmCount, eeCount, dropCount, terminatedCount, len(remotelist), float64(time.Now().UnixNano()-collectionStart)/float64(time.Millisecond)) |
||||
|
||||
l.Trace.Printf("[COLLECTION] The collection process has been finished.\n") |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
// extractPage is the handler function invoked to process each page collected
|
||||
// from the server list.
|
||||
// Parameters:
|
||||
// - page: Pagination.Page reference of the page to be processed.
|
||||
// Returns:
|
||||
// - ok: a bool to mark the state of the processing.
|
||||
// - e: an error reference raised in case of something goes wrong.
|
||||
func extractPage(page pagination.Page) (ok bool, e error) { |
||||
|
||||
var serverList []servers.Server |
||||
|
||||
serverList, e = servers.ExtractServers(page) |
||||
|
||||
if e != nil { |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
allProjectsLoop: |
||||
for _, s := range serverList { |
||||
|
||||
// Filter by project id:
|
||||
for _, filter := range cfg.ProjectFilters { |
||||
|
||||
if strings.Contains(s.TenantID, filter) && filter != "" { |
||||
|
||||
l.Debug.Printf("[COLLECTION] The Project [ %v ] matches filter [ %v ] and won't be further processed.", s.TenantID, filter) |
||||
|
||||
continue allProjectsLoop |
||||
|
||||
} |
||||
|
||||
} |
||||
|
||||
// Filter by project name:
|
||||
for _, filter := range cfg.NameFilters { |
||||
|
||||
if strings.Contains(strings.ToLower(s.Name), strings.ToLower(filter)) && filter != "" { |
||||
|
||||
l.Debug.Printf("[COLLECTION] The Project [ %v ] matches filter [ %v ] and won't be further processed.", s.Name, filter) |
||||
|
||||
continue allProjectsLoop |
||||
|
||||
} |
||||
|
||||
} |
||||
|
||||
vmCount++ |
||||
|
||||
// "s" will be a servers.Server
|
||||
var imageid, flavorid, imagename, imageosflavor, flavorname string |
||||
|
||||
for k, val := range s.Image { |
||||
|
||||
switch v := val.(type) { |
||||
|
||||
case string: |
||||
|
||||
if strings.Compare(k, "id") == 0 { |
||||
|
||||
imageid = v |
||||
|
||||
} |
||||
|
||||
} |
||||
|
||||
} |
||||
|
||||
for k, val := range s.Flavor { |
||||
|
||||
switch v := val.(type) { |
||||
|
||||
case string: |
||||
|
||||
if strings.Compare(k, "id") == 0 { |
||||
|
||||
flavorid = v |
||||
|
||||
} |
||||
|
||||
} |
||||
|
||||
} |
||||
|
||||
l.Trace.Printf("%+v, %v", client, imageid) |
||||
|
||||
imagename, imageosflavor, e := getFromImageCache(client, imageid) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Error.Printf("[COLLECTION] Error while getting the image id [ %+v ]. Error: %v\n", imageid, e) |
||||
|
||||
} |
||||
|
||||
flavorname, e = getFromFlavorCache(client, flavorid) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Error.Printf("[COLLECTION] Error while getting the flavor id [ %+v ]. Error: %v\n", flavorid, e) |
||||
|
||||
} |
||||
|
||||
if len(flavorname) == 0 { |
||||
|
||||
l.Warning.Printf("[COLLECTION] Found VM - Name:[%s], TenantID:[%s], Status:[%s], ID:[%s], ImageID:[%s], ImageName:[%s], ImageOSFlavor:[%s], FlavorId:[%s], FlavorName:[%s] :: with missing FlavorName, skipping record!", |
||||
s.Name, s.TenantID, s.Status, s.ID, imageid, imagename, imageosflavor, flavorid, flavorname) |
||||
|
||||
dropCount++ |
||||
|
||||
continue |
||||
|
||||
} |
||||
|
||||
l.Info.Printf("[COLLECTION] Found VM - Name:[%s], TenantID:[%s], Status:[%s], ID:[%s], ImageID:[%s], ImageName:[%s], ImageOSFlavor:[%s], FlavorId:[%s], FlavorName:[%s]", |
||||
s.Name, s.TenantID, s.Status, s.ID, imageid, imagename, imageosflavor, flavorid, flavorname) |
||||
|
||||
// Potential problem with these filters are if clients create their
|
||||
// VMs with the filter strings those will not be billed.
|
||||
// It will be better to actually filter out all resources within a given
|
||||
// tenant, so if rally or tempest testrun create resources exclusively
|
||||
// belonging to a specific tenant then that tenant can be filtered out.
|
||||
// TODO: TBD with SWITCH!
|
||||
|
||||
// Here comes the transformation of the information retrieved into either
|
||||
metadata := make(datamodels.JSONdb) |
||||
metadata["imageid"] = imageid |
||||
metadata["imagename"] = imagename |
||||
metadata["imageosflavor"] = imageosflavor |
||||
metadata["flavorid"] = flavorid |
||||
metadata["flavorname"] = flavorname |
||||
metadata["region"] = cfg.OpenStack.Region |
||||
|
||||
// TODO: MAke more generic and customizable via config file
|
||||
if value, exists := s.Metadata["schedule_frequency"]; exists && value == "never" { |
||||
|
||||
metadata["PlanOverride"] = true |
||||
|
||||
} |
||||
|
||||
evTime := int64(time.Now().Unix()) |
||||
evLast := getStatus(s.Status) |
||||
|
||||
if evLast == "terminated" { |
||||
|
||||
terminatedCount++ |
||||
|
||||
} |
||||
|
||||
// events reports to be sent.
|
||||
event := eeModels.Event{ |
||||
Account: s.TenantID, |
||||
EventTime: &evTime, |
||||
LastEvent: &evLast, |
||||
MetaData: metadata, |
||||
Region: cfg.OpenStack.Region, |
||||
ResourceID: s.ID, |
||||
ResourceName: s.Name, |
||||
ResourceType: "server", |
||||
} |
||||
|
||||
report(event) |
||||
|
||||
//if this object exists in remote list then lets remove it
|
||||
for i, object := range remotelist { |
||||
|
||||
if strings.Compare(object.Account, s.TenantID) == 0 && |
||||
strings.Compare(object.ResourceID, s.ID) == 0 && |
||||
strings.Compare(object.ResourceName, s.Name) == 0 { |
||||
|
||||
l.Debug.Printf("[COLLECTION] Event send cleaned from the processing list..\n") |
||||
|
||||
remotelist = append(remotelist[:i], remotelist[i+1:]...) |
||||
|
||||
break |
||||
|
||||
} |
||||
|
||||
} |
||||
|
||||
} |
||||
|
||||
ok = true |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
// getStatus job is to normalize the event state returned by the collectors.
|
||||
// Parameters:
|
||||
// - state: string returned by the system.
|
||||
// Returns:
|
||||
// - status: normalized state to be returned.
|
||||
func getStatus(state string) (status string) { |
||||
|
||||
switch strings.ToUpper(state) { |
||||
|
||||
case "ACTIVE": |
||||
|
||||
status = "active" |
||||
|
||||
case "ATTACHING": |
||||
|
||||
status = "active" |
||||
|
||||
case "AVAILABLE": |
||||
|
||||
status = "active" |
||||
|
||||
case "BUILD": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "CREATING": |
||||
|
||||
status = "active" |
||||
|
||||
case "DELETED": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "DELETING": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "DETACHING": |
||||
|
||||
status = "active" |
||||
|
||||
case "DOWN": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "ERROR": |
||||
|
||||
status = "error" |
||||
|
||||
case "ERROR_DELETING": |
||||
|
||||
status = "error" |
||||
|
||||
case "EXTENDING": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "HARD_DELETED": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "IN-USE": |
||||
|
||||
status = "active" |
||||
|
||||
case "MAINTENANCE": |
||||
|
||||
status = "active" |
||||
|
||||
case "PAUSED": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "RESCUED": |
||||
|
||||
status = "active" |
||||
|
||||
case "RESIZE": |
||||
|
||||
status = "active" |
||||
|
||||
case "RESIZED": |
||||
|
||||
status = "active" |
||||
|
||||
case "RESERVED": |
||||
|
||||
status = "active" |
||||
|
||||
case "RETYPING": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "SHELVED_OFFLOADED": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "SHUTOFF": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "SOFT_DELETED": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "STOPPED": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "SUSPENDED": |
||||
|
||||
status = "inactive" |
||||
|
||||
case "TERMINATED": |
||||
|
||||
status = "terminated" |
||||
|
||||
case "VERIFY_RESIZE": |
||||
|
||||
status = "active" |
||||
|
||||
} |
||||
|
||||
l.Trace.Printf("[REPORT] State received from the system [ %v ] normalized to [ %v ]", state, status) |
||||
|
||||
return status |
||||
|
||||
} |
@ -0,0 +1,246 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"strings" |
||||
|
||||
"github.com/spf13/viper" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
// The following structs are part of the configuration struct which
|
||||
// acts as the main reference for configuration parameters in the system.
|
||||
type apiKey struct { |
||||
Enabled bool |
||||
Key string |
||||
Place string |
||||
Token string |
||||
} |
||||
|
||||
type configuration struct { |
||||
APIKey apiKey |
||||
General generalConfig |
||||
Heappe heappeConfig |
||||
Kafka kafkaConfig |
||||
Keycloak keycloakConfig |
||||
Lieutenant lieutenantConfig |
||||
OpenStack openStackConfig |
||||
Prometheus prometheusConfig |
||||
RGW rgwConfig |
||||
NameFilters []string |
||||
ProjectFilters []string |
||||
Services map[string]string |
||||
} |
||||
|
||||
type generalConfig struct { |
||||
InsecureSkipVerify bool |
||||
LogFile string |
||||
LogLevel string |
||||
LogToConsole bool |
||||
ObjectsPeriodicity int |
||||
Periodicity int |
||||
PrometheusPeriodicity int |
||||
} |
||||
|
||||
type heappeConfig struct { |
||||
Username string |
||||
Password string |
||||
GroupResourceUsageReportURI string |
||||
AuthenticateUserPasswordURI string |
||||
} |
||||
|
||||
type kafkaConfig struct { |
||||
Brokers []string |
||||
MaxBytes int |
||||
MinBytes int |
||||
Offset int64 |
||||
Partition int |
||||
TLSEnabled bool |
||||
TopicUDR string |
||||
TopicEEngine string |
||||
} |
||||
|
||||
type keycloakConfig struct { |
||||
ClientID string `json:"client_id"` |
||||
ClientSecret string `json:"client_secret"` |
||||
Enabled bool `json:"enabled"` |
||||
Host string `json:"host"` |
||||
Port int `json:"port"` |
||||
Realm string `json:"realm"` |
||||
RedirectURL string `json:"redirect_url"` |
||||
UseHTTP bool `json:"use_http"` |
||||
} |
||||
|
||||
type lieutenantConfig struct { |
||||
Host string |
||||
Token string |
||||
} |
||||
|
||||
type openStackConfig struct { |
||||
Domain string |
||||
Keystone string |
||||
Password string |
||||
Project string |
||||
Region string |
||||
User string |
||||
} |
||||
|
||||
type prometheusConfig struct { |
||||
Host string |
||||
MetricsExport bool |
||||
MetricsPort string |
||||
MetricsRoute string |
||||
} |
||||
|
||||
type rgwConfig struct { |
||||
AccessKeyID string |
||||
AdminPath string |
||||
Region string |
||||
SecretAccessKey string |
||||
ServerURL string |
||||
} |
||||
|
||||
// dumpConfig 's job is to dumps the configuration in JSON format to the log
|
||||
// system. It makes use of the masking function to keep some secrecy in the log.
|
||||
// Parameters:
|
||||
// - c: configuration type containing the config present in the system.
|
||||
func dumpConfig(c configuration) { |
||||
cfgCopy := c |
||||
|
||||
// deal with configuration params that should be masked
|
||||
cfgCopy.APIKey.Token = masked(c.APIKey.Token, 4) |
||||
cfgCopy.Heappe.Username = masked(c.Heappe.Username, 4) |
||||
cfgCopy.Heappe.Password = masked(c.Heappe.Password, 4) |
||||
cfgCopy.Keycloak.ClientSecret = masked(c.Keycloak.ClientSecret, 4) |
||||
cfgCopy.Lieutenant.Token = masked(c.Lieutenant.Token, 4) |
||||
cfgCopy.OpenStack.Password = masked(c.OpenStack.Password, 4) |
||||
cfgCopy.RGW.AccessKeyID = masked(c.RGW.AccessKeyID, 4) |
||||
cfgCopy.RGW.SecretAccessKey = masked(c.RGW.SecretAccessKey, 4) |
||||
|
||||
// mmrshalindent creates a string containing newlines; each line starts with
|
||||
// two spaces and two spaces are added for each indent...
|
||||
configJSON, _ := json.MarshalIndent(cfgCopy, " ", " ") |
||||
|
||||
l.Info.Printf("[CONFIG] Configuration settings:\n") |
||||
l.Info.Printf("%v\n", string(configJSON)) |
||||
|
||||
} |
||||
|
||||
// masked 's job is to return asterisks in place of the characters in a
|
||||
// string with the exception of the last indicated.
|
||||
// Parameters:
|
||||
// - s: string to be masked
|
||||
// - unmaskedChars: int with the amount (counting from the end of the string) of
|
||||
// characters to keep unmasked.
|
||||
// Returns:
|
||||
// - returnString: the s string passed as parameter masked.
|
||||
func masked(s string, unmaskedChars int) (returnString string) { |
||||
|
||||
if len(s) <= unmaskedChars { |
||||
|
||||
returnString = s |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
asteriskString := strings.Repeat("*", (len(s) - unmaskedChars)) |
||||
returnString = asteriskString + string(s[len(s)-unmaskedChars:]) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
// parseConfig handles the filling of the config struct with the data Viper gets
|
||||
// from the configuration file.
|
||||
// Returns:
|
||||
// - c: the configuration struct filled with the relevant parsed configuration.
|
||||
func parseConfig() (c configuration) { |
||||
|
||||
l.Trace.Printf("[CONFIG] Retrieving configuration.\n") |
||||
|
||||
c = configuration{ |
||||
|
||||
APIKey: apiKey{ |
||||
Enabled: viper.GetBool("apikey.enabled"), |
||||
Key: viper.GetString("apikey.key"), |
||||
Place: viper.GetString("apikey.place"), |
||||
Token: viper.GetString("apikey.token"), |
||||
}, |
||||
|
||||
General: generalConfig{ |
||||
InsecureSkipVerify: viper.GetBool("general.insecureskipverify"), |
||||
LogFile: viper.GetString("general.logfile"), |
||||
LogLevel: viper.GetString("general.loglevel"), |
||||
LogToConsole: viper.GetBool("general.logtoconsole"), |
||||
ObjectsPeriodicity: viper.GetInt("general.objectsperiodicity"), |
||||
Periodicity: viper.GetInt("general.periodicity"), |
||||
PrometheusPeriodicity: viper.GetInt("general.prometheusperiodicity"), |
||||
}, |
||||
|
||||
Heappe: heappeConfig{ |
||||
Username: viper.GetString("heappe.username"), |
||||
Password: viper.GetString("heappe.password"), |
||||
GroupResourceUsageReportURI: viper.GetString("heappe.groupResourceUsageReportUri"), |
||||
AuthenticateUserPasswordURI: viper.GetString("heappe.authenticateUserPasswordUri"), |
||||
}, |
||||
|
||||
Kafka: kafkaConfig{ |
||||
Brokers: viper.GetStringSlice("kafka.brokers"), |
||||
MaxBytes: viper.GetInt("kafka.sizemax"), |
||||
MinBytes: viper.GetInt("kafka.sizemin"), |
||||
Offset: viper.GetInt64("kafka.offset"), |
||||
Partition: viper.GetInt("kafka.partition"), |
||||
TLSEnabled: viper.GetBool("kafka.tlsenabled"), |
||||
TopicUDR: viper.GetString("kafka.topicudr"), |
||||
TopicEEngine: viper.GetString("kafka.topiceengine"), |
||||
}, |
||||
|
||||
Keycloak: keycloakConfig{ |
||||
ClientID: viper.GetString("keycloak.clientid"), |
||||
ClientSecret: viper.GetString("keycloak.clientsecret"), |
||||
Enabled: viper.GetBool("keycloak.enabled"), |
||||
Host: viper.GetString("keycloak.host"), |
||||
Port: viper.GetInt("keycloak.port"), |
||||
Realm: viper.GetString("keycloak.realm"), |
||||
RedirectURL: viper.GetString("keycloak.redirecturl"), |
||||
UseHTTP: viper.GetBool("keycloak.usehttp"), |
||||
}, |
||||
|
||||
Lieutenant: lieutenantConfig{ |
||||
Host: viper.GetString("lieutenant.host"), |
||||
Token: viper.GetString("lieutenant.token"), |
||||
}, |
||||
|
||||
OpenStack: openStackConfig{ |
||||
Domain: viper.GetString("openstack.domain"), |
||||
Keystone: viper.GetString("openstack.keystone"), |
||||
Password: viper.GetString("openstack.password"), |
||||
Project: viper.GetString("openstack.project"), |
||||
Region: viper.GetString("openstack.region"), |
||||
User: viper.GetString("openstack.user"), |
||||
}, |
||||
|
||||
Prometheus: prometheusConfig{ |
||||
Host: viper.GetString("prometheus.host"), |
||||
MetricsExport: viper.GetBool("prometheus.metricsexport"), |
||||
MetricsPort: viper.GetString("prometheus.metricsport"), |
||||
MetricsRoute: viper.GetString("prometheus.metricsroute"), |
||||
}, |
||||
|
||||
RGW: rgwConfig{ |
||||
AccessKeyID: viper.GetString("rgw.accesskey"), |
||||
AdminPath: viper.GetString("rgw.adminpath"), |
||||
Region: viper.GetString("rgw.region"), |
||||
SecretAccessKey: viper.GetString("rgw.secretaccesskey"), |
||||
ServerURL: viper.GetString("rgw.serverurl"), |
||||
}, |
||||
|
||||
NameFilters: viper.GetStringSlice("events.namefilters"), |
||||
ProjectFilters: viper.GetStringSlice("events.projectfilters"), |
||||
Services: viper.GetStringMapString("services"), |
||||
} |
||||
|
||||
return |
||||
|
||||
} |
@ -0,0 +1,137 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"crypto/tls" |
||||
"encoding/json" |
||||
"strconv" |
||||
"time" |
||||
|
||||
"github.com/prometheus/client_golang/prometheus" |
||||
"github.com/segmentio/kafka-go" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
type kafkaHandlerConf struct { |
||||
out []kafkaPackage |
||||
} |
||||
|
||||
type kafkaPackage struct { |
||||
topic string |
||||
partition int |
||||
channel chan interface{} |
||||
} |
||||
|
||||
// kafkaHandler job is to check the config that it receives and initialize the
|
||||
// go rutines necesaries to satisfay the configuration it receives.
|
||||
// Paramenters:
|
||||
// - kH: kafkaHandlerConf struct with the specific configuration used by the
|
||||
// service.
|
||||
func kafkaHandler(kH kafkaHandlerConf) { |
||||
|
||||
l.Trace.Printf("[KAFKA] Initializing the receivers/senders according to the provided configuration.\n") |
||||
|
||||
if kH.out != nil { |
||||
|
||||
for _, p := range kH.out { |
||||
|
||||
go kafkaSender(p.topic, p.partition, p.channel) |
||||
|
||||
} |
||||
|
||||
} |
||||
} |
||||
|
||||
// kafkaSender is the abstracted interface handling the sending of data through
|
||||
// kafka topics.
|
||||
// Paramenters:
|
||||
// - t: string containing the kafka-topic in use.
|
||||
// - p: int containing the kafka-topic partition.
|
||||
// - c: interface{} channel to receive the data that will be marshalled into
|
||||
// JSON and then transmitted via kafka.
|
||||
func kafkaSender(t string, p int, c chan interface{}) { |
||||
|
||||
l.Trace.Printf("[KAFKA] Initializing kafka sender for topic: %v.\n", t) |
||||
|
||||
conf := kafka.WriterConfig{ |
||||
Brokers: cfg.Kafka.Brokers, |
||||
Topic: t, |
||||
Balancer: &kafka.LeastBytes{}, |
||||
} |
||||
|
||||
if cfg.Kafka.TLSEnabled { |
||||
|
||||
dialer := &kafka.Dialer{ |
||||
Timeout: 10 * time.Second, |
||||
DualStack: true, |
||||
TLS: &tls.Config{ |
||||
MinVersion: tls.VersionTLS12, |
||||
CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, |
||||
PreferServerCipherSuites: true, |
||||
InsecureSkipVerify: cfg.General.InsecureSkipVerify, |
||||
}, |
||||
} |
||||
|
||||
conf.Dialer = dialer |
||||
|
||||
} |
||||
|
||||
w := kafka.NewWriter(conf) |
||||
defer w.Close() |
||||
|
||||
for { |
||||
|
||||
v, ok := <-c |
||||
|
||||
if !ok { |
||||
|
||||
metricReporting.With(prometheus.Labels{"topic": t, "state": "FAIL", "reason": "Go Channel Problems"}).Inc() |
||||
|
||||
break |
||||
|
||||
} |
||||
|
||||
go func() { |
||||
|
||||
m, e := json.Marshal(&v) |
||||
|
||||
if e == nil { |
||||
|
||||
l.Info.Printf("[KAFKA] Object received through the channel. Starting its processing.\n") |
||||
|
||||
err := w.WriteMessages(context.Background(), |
||||
kafka.Message{ |
||||
Key: []byte(t + "-" + strconv.Itoa(p)), |
||||
Value: m, |
||||
}, |
||||
) |
||||
|
||||
if err != nil { |
||||
|
||||
l.Warning.Printf("[KAFKA] There was a problem when sending the record through the stream. Error: %v\n", err) |
||||
|
||||
metricReporting.With(prometheus.Labels{"topic": t, "state": "FAIL", "reason": "Kafka Stream Problems"}).Inc() |
||||
|
||||
} else { |
||||
|
||||
l.Info.Printf("[KAFKA] Object added to the stream succesfully. Topic: %v.\n", t) |
||||
|
||||
metricReporting.With(prometheus.Labels{"topic": t, "state": "OK", "reason": "Object sent"}).Inc() |
||||
|
||||
} |
||||
|
||||
} else { |
||||
|
||||
l.Warning.Printf("[KAFKA] The information to be sent into the stream cannot be marshalled, please check with the administrator. Error: %v\n", e) |
||||
|
||||
metricReporting.With(prometheus.Labels{"topic": t, "state": "FAIL", "reason": "JSON Marshalling"}).Inc() |
||||
|
||||
} |
||||
|
||||
return |
||||
|
||||
}() |
||||
|
||||
} |
||||
|
||||
} |
@ -0,0 +1,192 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"flag" |
||||
"fmt" |
||||
"net/url" |
||||
"os" |
||||
"reflect" |
||||
"time" |
||||
|
||||
httptransport "github.com/go-openapi/runtime/client" |
||||
"github.com/spf13/viper" |
||||
cusClient "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/customerdb/client" |
||||
eeClient "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/eventsengine/client" |
||||
eeModels "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/eventsengine/models" |
||||
udrModels "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/udr/models" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
var ( |
||||
version string |
||||
cfg configuration |
||||
pipeU chan interface{} |
||||
pipeE chan interface{} |
||||
reportClient *eeClient.EventEngineManagementAPI |
||||
zombiesClient *cusClient.CustomerDatabaseManagement |
||||
) |
||||
|
||||
// kafkaStart handles the initialization of the kafka service.
|
||||
// This is a sample function with the most basic usage of the kafka service, it
|
||||
// should be redefined to match the needs of the service.
|
||||
// Returns:
|
||||
// - ch: a interface{} channel to be able to send things through the kafka topic
|
||||
// generated.
|
||||
func kafkaStart() (chUDR, chEvents chan interface{}) { |
||||
|
||||
l.Trace.Printf("[MAIN] Intializing Kafka\n") |
||||
|
||||
chUDR = make(chan interface{}, 1000) |
||||
chEvents = make(chan interface{}, 1000) |
||||
|
||||
handler := kafkaHandlerConf{ |
||||
out: []kafkaPackage{ |
||||
{ |
||||
topic: cfg.Kafka.TopicUDR, |
||||
channel: chUDR, |
||||
}, |
||||
{ |
||||
topic: cfg.Kafka.TopicEEngine, |
||||
channel: chEvents, |
||||
}, |
||||
}, |
||||
} |
||||
|
||||
kafkaHandler(handler) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
// report handles the process of sending the event or usage to the respective
|
||||
// service.
|
||||
// Parameters:
|
||||
// - object: an interface{} reference with the event/usage to be sent.
|
||||
func report(object interface{}) { |
||||
|
||||
l.Trace.Printf("[REPORT] The reporting process has been started.\n") |
||||
|
||||
if reflect.TypeOf(object) == reflect.TypeOf(udrModels.Usage{}) { |
||||
|
||||
l.Trace.Printf("[REPORT] UDR Object detected. Sending through kafka.\n") |
||||
|
||||
pipeU <- object |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
if reflect.TypeOf(object) == reflect.TypeOf(eeModels.Event{}) { |
||||
|
||||
l.Trace.Printf("[REPORT] Event Object detected. Sending through kafka.\n") |
||||
|
||||
pipeE <- object |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
fail := "the provided object doesn't belong to UDR or EE models" |
||||
|
||||
l.Warning.Printf("[REPORT] Something went wrong while processing the object, check with the administrator. Error: %v.\n", fail) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
func init() { |
||||
|
||||
confFile := flag.String("conf", "./config", "configuration file path (without toml extension)") |
||||
|
||||
flag.Parse() |
||||
|
||||
//placeholder code as the default value will ensure this situation will never arise
|
||||
if len(*confFile) == 0 { |
||||
|
||||
fmt.Printf("Usage: Collector-TYPE -conf=/path/to/configuration/file\n") |
||||
|
||||
os.Exit(0) |
||||
|
||||
} |
||||
|
||||
// err := gcfg.ReadFileInto(&cfg, *confFile)
|
||||
viper.SetConfigName(*confFile) // name of config file (without extension)
|
||||
viper.SetConfigType("toml") |
||||
viper.AddConfigPath(".") // path to look for the config file in
|
||||
|
||||
err := viper.ReadInConfig() // Find and read the config file
|
||||
|
||||
if err != nil { |
||||
|
||||
// TODO(murp) - differentiate between file not found and formatting error in
|
||||
// config file)
|
||||
fmt.Printf("[MAIN] Failed to parse configuration data: %s\nCorrect usage: Collector-TYPE -conf=/path/to/configuration/file\n", err) |
||||
|
||||
os.Exit(1) |
||||
|
||||
} |
||||
|
||||
cfg = parseConfig() |
||||
|
||||
e := l.InitLogger(cfg.General.LogFile, cfg.General.LogLevel, cfg.General.LogToConsole) |
||||
|
||||
if e != nil { |
||||
|
||||
fmt.Printf("[MAIN] Initialization of the logger failed. Error: %v\n", e) |
||||
|
||||
} |
||||
|
||||
l.Info.Printf("Cyclops Labs Collector TYPE version %v initialized\n", version) |
||||
|
||||
dumpConfig(cfg) |
||||
|
||||
// Let's start the HTTP Server and Gauges for Prometheus
|
||||
prometheusStart() |
||||
|
||||
} |
||||
|
||||
func main() { |
||||
|
||||
// If needed here is the initialization for the kafka sender:
|
||||
pipeU, pipeE = kafkaStart() |
||||
|
||||
// Here we start the client instantiation to send reports to the EventsEngine.
|
||||
eeConfig := eeClient.Config{ |
||||
URL: &url.URL{ |
||||
Host: cfg.Services["eventsengine"], |
||||
Path: eeClient.DefaultBasePath, |
||||
Scheme: "http", |
||||
}, |
||||
AuthInfo: httptransport.APIKeyAuth(cfg.APIKey.Key, cfg.APIKey.Place, cfg.APIKey.Token), |
||||
} |
||||
|
||||
reportClient = eeClient.New(eeConfig) |
||||
|
||||
// Here we start the client instantiation to get the canceled customers to check for zombies.
|
||||
cusConfig := cusClient.Config{ |
||||
URL: &url.URL{ |
||||
Host: cfg.Services["customerdb"], |
||||
Path: cusClient.DefaultBasePath, |
||||
Scheme: "http", |
||||
}, |
||||
AuthInfo: httptransport.APIKeyAuth(cfg.APIKey.Key, cfg.APIKey.Place, cfg.APIKey.Token), |
||||
} |
||||
|
||||
zombiesClient = cusClient.New(cusConfig) |
||||
|
||||
// Start of the caches
|
||||
FlavorCache = make(map[string]FlavorIDCacheData) |
||||
ImageCache = make(map[string]ImageIDCacheData) |
||||
|
||||
// Let's lunch the first collection process..
|
||||
go collect() |
||||
|
||||
// cfg.General.Periodicity should be changed to cfg.General.ObjectPeriodicity
|
||||
// in the case you need the long (8h) periodicity.
|
||||
for range time.NewTicker(time.Duration(cfg.General.Periodicity) * time.Minute).C { |
||||
|
||||
go collect() |
||||
|
||||
} |
||||
|
||||
} |
@ -0,0 +1,95 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"log" |
||||
"net/http" |
||||
|
||||
"github.com/prometheus/client_golang/prometheus" |
||||
"github.com/prometheus/client_golang/prometheus/promhttp" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
var ( |
||||
metricReporting *prometheus.GaugeVec |
||||
metricCollection *prometheus.GaugeVec |
||||
metricTime *prometheus.GaugeVec |
||||
metricCount *prometheus.GaugeVec |
||||
) |
||||
|
||||
func prometheusStart() { |
||||
|
||||
reg := prometheus.NewPedanticRegistry() |
||||
|
||||
metricReporting = prometheus.NewGaugeVec( |
||||
prometheus.GaugeOpts{ |
||||
Namespace: "CYCLOPS", |
||||
Subsystem: collector + "_Collector", |
||||
Name: "kafka_send_state", |
||||
Help: "Reporting information and Kafka topics usage", |
||||
}, |
||||
[]string{ |
||||
"reason", |
||||
"state", |
||||
"topic", |
||||
}, |
||||
) |
||||
|
||||
metricCollection = prometheus.NewGaugeVec( |
||||
prometheus.GaugeOpts{ |
||||
Namespace: "CYCLOPS", |
||||
Subsystem: collector + "_Collector", |
||||
Name: "Collection", |
||||
Help: "Collection information and usages data", |
||||
}, |
||||
[]string{ |
||||
"account", |
||||
"event", |
||||
"reason", |
||||
"state", |
||||
"type", |
||||
}, |
||||
) |
||||
|
||||
metricTime = prometheus.NewGaugeVec( |
||||
prometheus.GaugeOpts{ |
||||
Namespace: "CYCLOPS", |
||||
Subsystem: collector + "_Collector", |
||||
Name: "collection_time", |
||||
Help: "Different timing metrics", |
||||
}, |
||||
[]string{ |
||||
"type", |
||||
}, |
||||
) |
||||
|
||||
metricCount = prometheus.NewGaugeVec( |
||||
prometheus.GaugeOpts{ |
||||
Namespace: "CYCLOPS", |
||||
Subsystem: collector + "_Collector", |
||||
Name: objects + "_count", |
||||
Help: "Different VM Counts", |
||||
}, |
||||
[]string{ |
||||
"type", |
||||
}, |
||||
) |
||||
|
||||
reg.MustRegister(metricReporting, metricCollection, metricTime, metricCount) |
||||
//prometheus.MustRegister(metricReporting, metricCollection)
|
||||
|
||||
l.Trace.Printf("[Prometheus] Starting to serve the metrics.\n") |
||||
|
||||
go func() { |
||||
|
||||
if cfg.Prometheus.MetricsExport { |
||||
|
||||
//http.Handle(cfg.Prometheus.MetricsRoute, promhttp.Handler())
|
||||
http.Handle(cfg.Prometheus.MetricsRoute, promhttp.HandlerFor(reg, promhttp.HandlerOpts{})) |
||||
|
||||
go log.Fatal(http.ListenAndServe(":"+cfg.Prometheus.MetricsPort, nil)) |
||||
|
||||
} |
||||
|
||||
}() |
||||
|
||||
} |
@ -0,0 +1,28 @@ |
||||
module github.com/Cyclops-Labs/cyclops-4-hpc.git/cyclops-collectors/servers-collector |
||||
|
||||
go 1.13 |
||||
|
||||
require ( |
||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect |
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect |
||||
github.com/go-openapi/analysis v0.21.1 // indirect |
||||
github.com/go-openapi/runtime v0.21.0 |
||||
github.com/go-stack/stack v1.8.1 // indirect |
||||
github.com/golang/snappy v0.0.4 // indirect |
||||
github.com/gophercloud/gophercloud v0.23.0 |
||||
github.com/klauspost/cpuid/v2 v2.0.9 // indirect |
||||
github.com/mailru/easyjson v0.7.7 // indirect |
||||
github.com/pierrec/lz4 v2.6.1+incompatible // indirect |
||||
github.com/prometheus/client_golang v1.11.0 |
||||
github.com/prometheus/common v0.32.1 // indirect |
||||
github.com/prometheus/procfs v0.7.3 // indirect |
||||
github.com/segmentio/kafka-go v0.4.23 |
||||
github.com/spf13/viper v1.9.0 |
||||
gitlab.com/cyclops-utilities/datamodels v0.0.0-20191016132854-e9313e683e5b |
||||
github.com/Cyclops-Labs/cyclops-4-hpc.git/services/customerdb v0.0.1 |
||||
github.com/Cyclops-Labs/cyclops-4-hpc.git/services/eventsengine v0.0.1 |
||||
github.com/Cyclops-Labs/cyclops-4-hpc.git/services/udr v0.0.1 |
||||
gitlab.com/cyclops-utilities/logging v0.0.0-20200914110347-ca1d02efd346 |
||||
go.mongodb.org/mongo-driver v1.7.4 // indirect |
||||
golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1 // indirect |
||||
) |
@ -0,0 +1 @@ |
||||
DUMMY FILE! |
@ -0,0 +1,93 @@ |
||||
# Welcome to the configuration file for this |
||||
# |
||||
# ██████╗██╗ ██╗ ██████╗██╗ ██████╗ ██████╗ ███████╗ |
||||
# ██╔════╝╚██╗ ██╔╝██╔════╝██║ ██╔═══██╗██╔══██╗██╔════╝ |
||||
# ██║ ╚████╔╝ ██║ ██║ ██║ ██║██████╔╝███████╗ |
||||
# ██║ ╚██╔╝ ██║ ██║ ██║ ██║██╔═══╝ ╚════██║ |
||||
# ╚██████╗ ██║ ╚██████╗███████╗╚██████╔╝██║ ███████║ |
||||
# ╚═════╝ ╚═╝ ╚═════╝╚══════╝ ╚═════╝ ╚═╝ ╚══════╝ |
||||
# |
||||
# ██╗ █████╗ ██████╗ ███████╗ |
||||
# ██║ ██╔══██╗██╔══██╗██╔════╝ |
||||
# ██║ ███████║██████╔╝███████╗ |
||||
# ██║ ██╔══██║██╔══██╗╚════██║ |
||||
# ███████╗██║ ██║██████╔╝███████║ |
||||
# ╚══════╝╚═╝ ╚═╝╚═════╝ ╚══════╝ |
||||
# |
||||
# collector! |
||||
|
||||
[APIKEY] |
||||
Enabled = true |
||||
Key = "X-API-KEY" |
||||
Place = "header" |
||||
Token = "1234567890abcdefghi" |
||||
|
||||
[EVENTS] |
||||
Filters = [ "filter1", "filter2", "filter3" ] |
||||
|
||||
[GENERAL] |
||||
LogFile = "" |
||||
LogToConsole = true |
||||
# loglevel values can be one of the following: TRACE, DEBUG, INFO, WARNING, ERROR |
||||
LogLevel = "TRACE" |
||||
ObjectsPeriodicity = 480 |
||||
Periodicity = 15 |
||||
PrometheusPeriodicity = 60 |
||||
|
||||
[HEAPPE] |
||||
Username = "" |
||||
Password = "" |
||||
GroupResourceUsageReportUri = "" |
||||
AuthenticateUserPasswordUri = "" |
||||
|
||||
[KAFKA] |
||||
Brokers = [ "broker-1-IP:broker-1-PORT", "broker-2-IP:broker-2-PORT", "broker-3-IP:broker-3-PORT" ] |
||||
# -1 for the most recent |
||||
# -2 for the first in the partition |
||||
# Anyother for a specific offset |
||||
Offset = "-1" |
||||
Partition = "0" |
||||
SizeMax = 10e6 |
||||
SizeMin = 10e3 |
||||
TLSEnabled = false |
||||
TopicEEngine = "Events" |
||||
TopicUDR = "UDR" |
||||
|
||||
[KEYCLOAK] |
||||
ClientID = "SERVICE" |
||||
ClientSecret = "00000000-0000-0000-0000-00000000" |
||||
Enabled = true |
||||
Host = "0.0.0.0" |
||||
Port = 8080 |
||||
Realm = "Development" |
||||
RedirectURL = "" |
||||
UseHttp = true |
||||
|
||||
[LIEUTENANT] |
||||
Host = "lieutenant:4010" |
||||
Token = "" |
||||
|
||||
[OPENSTACK] |
||||
Domain = "" |
||||
Keystone = "" |
||||
Password = "" |
||||
Project = "" |
||||
Region = "" |
||||
User = "" |
||||
|
||||
[PROMETHEUS] |
||||
Host = "prometheus:9000" |
||||
MetricsExport = true |
||||
MetricsPort = "9000" |
||||
MetricsRoute = "/metrics" |
||||
|
||||
[RGW] |
||||
AccessKey = "" |
||||
AdminPath = "" |
||||
Region = "" |
||||
SecretAccessKey = "" |
||||
ServerURL = "" |
||||
|
||||
[SERVICES] |
||||
CustomerDB = "localhost:8400" |
||||
EventsEngine = "localhost:8500" |
@ -0,0 +1,17 @@ |
||||
version: '3' |
||||
|
||||
services: |
||||
|
||||
collectors: |
||||
environment: |
||||
WAIT_AFTER_HOSTS: 30 |
||||
image: servers-collector:latest |
||||
networks: |
||||
- collectorsnet |
||||
restart: always |
||||
volumes: |
||||
- ${PWD}/config.toml:/config.toml |
||||
|
||||
networks: |
||||
collectorsnet: |
||||
driver: bridge |
@ -0,0 +1 @@ |
||||
DUMMY FILE! |
@ -0,0 +1,27 @@ |
||||
# LEXIS EXTENSIONs SERVICE |
||||
|
||||
Cyclops Engine's Extension Service for LEXIS implemented in Go |
||||
|
||||
## How to build |
||||
|
||||
The building of the service is carried by a multistage docker build, we build everything in an image containing everything needed to build Golang applications and then the executable is moved to a new image that contains the bare minimum starting from the scratch image. |
||||
|
||||
Within the folder build at the root of the repo there's a script call start.sh, it's invokation admits "Dev" as parameter. Running the script with the parameter will use the local version in the repository as the base for the building of the service's docker image, however doing it without providing it will make the building of the service taking everything from sources. |
||||
|
||||
``` |
||||
./start.sh [Dev] |
||||
``` |
||||
|
||||
The initial branch used when building from sources is "master", it can be changed with a few other parameters by editing the script. |
||||
|
||||
Using the [Dev] optional argument will take the code present in the repo at the moment of invokation. |
||||
|
||||
## How to run |
||||
|
||||
Within the folder run at the root of the repo there's a docker-compose sample file and a config.toml sample file. Once configured with appropriate data to start the service just issue the following command: |
||||
|
||||
``` |
||||
docker-compose up -d |
||||
``` |
||||
|
||||
|
@ -0,0 +1,75 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package client |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"net/http" |
||||
"net/url" |
||||
|
||||
"github.com/go-openapi/runtime" |
||||
rtclient "github.com/go-openapi/runtime/client" |
||||
"github.com/go-openapi/strfmt" |
||||
|
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/client/status_management" |
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/client/sync_management" |
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/client/trigger_management" |
||||
) |
||||
|
||||
const ( |
||||
// DefaultHost is the default Host
|
||||
// found in Meta (info) section of spec file
|
||||
DefaultHost string = "localhost:8000" |
||||
// DefaultBasePath is the default BasePath
|
||||
// found in Meta (info) section of spec file
|
||||
DefaultBasePath string = "/api/v0.1" |
||||
) |
||||
|
||||
// DefaultSchemes are the default schemes found in Meta (info) section of spec file
|
||||
var DefaultSchemes = []string{"http", "https"} |
||||
|
||||
type Config struct { |
||||
// URL is the base URL of the upstream server
|
||||
URL *url.URL |
||||
// Transport is an inner transport for the client
|
||||
Transport http.RoundTripper |
||||
// AuthInfo is for authentication
|
||||
AuthInfo runtime.ClientAuthInfoWriter |
||||
} |
||||
|
||||
// New creates a new l e x i s extension management API HTTP client.
|
||||
func New(c Config) *LEXISExtensionManagementAPI { |
||||
var ( |
||||
host = DefaultHost |
||||
basePath = DefaultBasePath |
||||
schemes = DefaultSchemes |
||||
) |
||||
|
||||
if c.URL != nil { |
||||
host = c.URL.Host |
||||
basePath = c.URL.Path |
||||
schemes = []string{c.URL.Scheme} |
||||
} |
||||
|
||||
transport := rtclient.New(host, basePath, schemes) |
||||
if c.Transport != nil { |
||||
transport.Transport = c.Transport |
||||
} |
||||
|
||||
cli := new(LEXISExtensionManagementAPI) |
||||
cli.Transport = transport |
||||
cli.StatusManagement = status_management.New(transport, strfmt.Default, c.AuthInfo) |
||||
cli.SyncManagement = sync_management.New(transport, strfmt.Default, c.AuthInfo) |
||||
cli.TriggerManagement = trigger_management.New(transport, strfmt.Default, c.AuthInfo) |
||||
return cli |
||||
} |
||||
|
||||
// LEXISExtensionManagementAPI is a client for l e x i s extension management API
|
||||
type LEXISExtensionManagementAPI struct { |
||||
StatusManagement *status_management.Client |
||||
SyncManagement *sync_management.Client |
||||
TriggerManagement *trigger_management.Client |
||||
Transport runtime.ClientTransport |
||||
} |
@ -0,0 +1,135 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package status_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"context" |
||||
"net/http" |
||||
"time" |
||||
|
||||
"github.com/go-openapi/errors" |
||||
"github.com/go-openapi/runtime" |
||||
cr "github.com/go-openapi/runtime/client" |
||||
"github.com/go-openapi/strfmt" |
||||
) |
||||
|
||||
// NewGetStatusParams creates a new GetStatusParams object
|
||||
// with the default values initialized.
|
||||
func NewGetStatusParams() *GetStatusParams { |
||||
var () |
||||
return &GetStatusParams{ |
||||
|
||||
timeout: cr.DefaultTimeout, |
||||
} |
||||
} |
||||
|
||||
// NewGetStatusParamsWithTimeout creates a new GetStatusParams object
|
||||
// with the default values initialized, and the ability to set a timeout on a request
|
||||
func NewGetStatusParamsWithTimeout(timeout time.Duration) *GetStatusParams { |
||||
var () |
||||
return &GetStatusParams{ |
||||
|
||||
timeout: timeout, |
||||
} |
||||
} |
||||
|
||||
// NewGetStatusParamsWithContext creates a new GetStatusParams object
|
||||
// with the default values initialized, and the ability to set a context for a request
|
||||
func NewGetStatusParamsWithContext(ctx context.Context) *GetStatusParams { |
||||
var () |
||||
return &GetStatusParams{ |
||||
|
||||
Context: ctx, |
||||
} |
||||
} |
||||
|
||||
// NewGetStatusParamsWithHTTPClient creates a new GetStatusParams object
|
||||
// with the default values initialized, and the ability to set a custom HTTPClient for a request
|
||||
func NewGetStatusParamsWithHTTPClient(client *http.Client) *GetStatusParams { |
||||
var () |
||||
return &GetStatusParams{ |
||||
HTTPClient: client, |
||||
} |
||||
} |
||||
|
||||
/*GetStatusParams contains all the parameters to send to the API endpoint |
||||
for the get status operation typically these are written to a http.Request |
||||
*/ |
||||
type GetStatusParams struct { |
||||
|
||||
/*ID |
||||
Id of the endpoint to be checked |
||||
|
||||
*/ |
||||
ID string |
||||
|
||||
timeout time.Duration |
||||
Context context.Context |
||||
HTTPClient *http.Client |
||||
} |
||||
|
||||
// WithTimeout adds the timeout to the get status params
|
||||
func (o *GetStatusParams) WithTimeout(timeout time.Duration) *GetStatusParams { |
||||
o.SetTimeout(timeout) |
||||
return o |
||||
} |
||||
|
||||
// SetTimeout adds the timeout to the get status params
|
||||
func (o *GetStatusParams) SetTimeout(timeout time.Duration) { |
||||
o.timeout = timeout |
||||
} |
||||
|
||||
// WithContext adds the context to the get status params
|
||||
func (o *GetStatusParams) WithContext(ctx context.Context) *GetStatusParams { |
||||
o.SetContext(ctx) |
||||
return o |
||||
} |
||||
|
||||
// SetContext adds the context to the get status params
|
||||
func (o *GetStatusParams) SetContext(ctx context.Context) { |
||||
o.Context = ctx |
||||
} |
||||
|
||||
// WithHTTPClient adds the HTTPClient to the get status params
|
||||
func (o *GetStatusParams) WithHTTPClient(client *http.Client) *GetStatusParams { |
||||
o.SetHTTPClient(client) |
||||
return o |
||||
} |
||||
|
||||
// SetHTTPClient adds the HTTPClient to the get status params
|
||||
func (o *GetStatusParams) SetHTTPClient(client *http.Client) { |
||||
o.HTTPClient = client |
||||
} |
||||
|
||||
// WithID adds the id to the get status params
|
||||
func (o *GetStatusParams) WithID(id string) *GetStatusParams { |
||||
o.SetID(id) |
||||
return o |
||||
} |
||||
|
||||
// SetID adds the id to the get status params
|
||||
func (o *GetStatusParams) SetID(id string) { |
||||
o.ID = id |
||||
} |
||||
|
||||
// WriteToRequest writes these params to a swagger request
|
||||
func (o *GetStatusParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { |
||||
|
||||
if err := r.SetTimeout(o.timeout); err != nil { |
||||
return err |
||||
} |
||||
var res []error |
||||
|
||||
// path param id
|
||||
if err := r.SetPathParam("id", o.ID); err != nil { |
||||
return err |
||||
} |
||||
|
||||
if len(res) > 0 { |
||||
return errors.CompositeValidationError(res...) |
||||
} |
||||
return nil |
||||
} |
@ -0,0 +1,108 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package status_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"fmt" |
||||
"io" |
||||
|
||||
"github.com/go-openapi/runtime" |
||||
"github.com/go-openapi/strfmt" |
||||
|
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/models" |
||||
) |
||||
|
||||
// GetStatusReader is a Reader for the GetStatus structure.
|
||||
type GetStatusReader struct { |
||||
formats strfmt.Registry |
||||
} |
||||
|
||||
// ReadResponse reads a server response into the received o.
|
||||
func (o *GetStatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { |
||||
switch response.Code() { |
||||
case 200: |
||||
result := NewGetStatusOK() |
||||
if err := result.readResponse(response, consumer, o.formats); err != nil { |
||||
return nil, err |
||||
} |
||||
return result, nil |
||||
case 404: |
||||
result := NewGetStatusNotFound() |
||||
if err := result.readResponse(response, consumer, o.formats); err != nil { |
||||
return nil, err |
||||
} |
||||
return nil, result |
||||
|
||||
default: |
||||
return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) |
||||
} |
||||
} |
||||
|
||||
// NewGetStatusOK creates a GetStatusOK with default headers values
|
||||
func NewGetStatusOK() *GetStatusOK { |
||||
return &GetStatusOK{} |
||||
} |
||||
|
||||
/*GetStatusOK handles this case with default header values. |
||||
|
||||
Status information of the system |
||||
*/ |
||||
type GetStatusOK struct { |
||||
Payload *models.Status |
||||
} |
||||
|
||||
func (o *GetStatusOK) Error() string { |
||||
return fmt.Sprintf("[GET /status/{id}][%d] getStatusOK %+v", 200, o.Payload) |
||||
} |
||||
|
||||
func (o *GetStatusOK) GetPayload() *models.Status { |
||||
return o.Payload |
||||
} |
||||
|
||||
func (o *GetStatusOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { |
||||
|
||||
o.Payload = new(models.Status) |
||||
|
||||
// response payload
|
||||
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// NewGetStatusNotFound creates a GetStatusNotFound with default headers values
|
||||
func NewGetStatusNotFound() *GetStatusNotFound { |
||||
return &GetStatusNotFound{} |
||||
} |
||||
|
||||
/*GetStatusNotFound handles this case with default header values. |
||||
|
||||
The endpoint provided doesn't exist |
||||
*/ |
||||
type GetStatusNotFound struct { |
||||
Payload *models.ErrorResponse |
||||
} |
||||
|
||||
func (o *GetStatusNotFound) Error() string { |
||||
return fmt.Sprintf("[GET /status/{id}][%d] getStatusNotFound %+v", 404, o.Payload) |
||||
} |
||||
|
||||
func (o *GetStatusNotFound) GetPayload() *models.ErrorResponse { |
||||
return o.Payload |
||||
} |
||||
|
||||
func (o *GetStatusNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { |
||||
|
||||
o.Payload = new(models.ErrorResponse) |
||||
|
||||
// response payload
|
||||
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
@ -0,0 +1,112 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package status_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"context" |
||||
"net/http" |
||||
"time" |
||||
|
||||
"github.com/go-openapi/errors" |
||||
"github.com/go-openapi/runtime" |
||||
cr "github.com/go-openapi/runtime/client" |
||||
"github.com/go-openapi/strfmt" |
||||
) |
||||
|
||||
// NewShowStatusParams creates a new ShowStatusParams object
|
||||
// with the default values initialized.
|
||||
func NewShowStatusParams() *ShowStatusParams { |
||||
|
||||
return &ShowStatusParams{ |
||||
|
||||
timeout: cr.DefaultTimeout, |
||||
} |
||||
} |
||||
|
||||
// NewShowStatusParamsWithTimeout creates a new ShowStatusParams object
|
||||
// with the default values initialized, and the ability to set a timeout on a request
|
||||
func NewShowStatusParamsWithTimeout(timeout time.Duration) *ShowStatusParams { |
||||
|
||||
return &ShowStatusParams{ |
||||
|
||||
timeout: timeout, |
||||
} |
||||
} |
||||
|
||||
// NewShowStatusParamsWithContext creates a new ShowStatusParams object
|
||||
// with the default values initialized, and the ability to set a context for a request
|
||||
func NewShowStatusParamsWithContext(ctx context.Context) *ShowStatusParams { |
||||
|
||||
return &ShowStatusParams{ |
||||
|
||||
Context: ctx, |
||||
} |
||||
} |
||||
|
||||
// NewShowStatusParamsWithHTTPClient creates a new ShowStatusParams object
|
||||
// with the default values initialized, and the ability to set a custom HTTPClient for a request
|
||||
func NewShowStatusParamsWithHTTPClient(client *http.Client) *ShowStatusParams { |
||||
|
||||
return &ShowStatusParams{ |
||||
HTTPClient: client, |
||||
} |
||||
} |
||||
|
||||
/*ShowStatusParams contains all the parameters to send to the API endpoint |
||||
for the show status operation typically these are written to a http.Request |
||||
*/ |
||||
type ShowStatusParams struct { |
||||
timeout time.Duration |
||||
Context context.Context |
||||
HTTPClient *http.Client |
||||
} |
||||
|
||||
// WithTimeout adds the timeout to the show status params
|
||||
func (o *ShowStatusParams) WithTimeout(timeout time.Duration) *ShowStatusParams { |
||||
o.SetTimeout(timeout) |
||||
return o |
||||
} |
||||
|
||||
// SetTimeout adds the timeout to the show status params
|
||||
func (o *ShowStatusParams) SetTimeout(timeout time.Duration) { |
||||
o.timeout = timeout |
||||
} |
||||
|
||||
// WithContext adds the context to the show status params
|
||||
func (o *ShowStatusParams) WithContext(ctx context.Context) *ShowStatusParams { |
||||
o.SetContext(ctx) |
||||
return o |
||||
} |
||||
|
||||
// SetContext adds the context to the show status params
|
||||
func (o *ShowStatusParams) SetContext(ctx context.Context) { |
||||
o.Context = ctx |
||||
} |
||||
|
||||
// WithHTTPClient adds the HTTPClient to the show status params
|
||||
func (o *ShowStatusParams) WithHTTPClient(client *http.Client) *ShowStatusParams { |
||||
o.SetHTTPClient(client) |
||||
return o |
||||
} |
||||
|
||||
// SetHTTPClient adds the HTTPClient to the show status params
|
||||
func (o *ShowStatusParams) SetHTTPClient(client *http.Client) { |
||||
o.HTTPClient = client |
||||
} |
||||
|
||||
// WriteToRequest writes these params to a swagger request
|
||||
func (o *ShowStatusParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { |
||||
|
||||
if err := r.SetTimeout(o.timeout); err != nil { |
||||
return err |
||||
} |
||||
var res []error |
||||
|
||||
if len(res) > 0 { |
||||
return errors.CompositeValidationError(res...) |
||||
} |
||||
return nil |
||||
} |
@ -0,0 +1,69 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package status_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"fmt" |
||||
"io" |
||||
|
||||
"github.com/go-openapi/runtime" |
||||
"github.com/go-openapi/strfmt" |
||||
|
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/models" |
||||
) |
||||
|
||||
// ShowStatusReader is a Reader for the ShowStatus structure.
|
||||
type ShowStatusReader struct { |
||||
formats strfmt.Registry |
||||
} |
||||
|
||||
// ReadResponse reads a server response into the received o.
|
||||
func (o *ShowStatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { |
||||
switch response.Code() { |
||||
case 200: |
||||
result := NewShowStatusOK() |
||||
if err := result.readResponse(response, consumer, o.formats); err != nil { |
||||
return nil, err |
||||
} |
||||
return result, nil |
||||
|
||||
default: |
||||
return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) |
||||
} |
||||
} |
||||
|
||||
// NewShowStatusOK creates a ShowStatusOK with default headers values
|
||||
func NewShowStatusOK() *ShowStatusOK { |
||||
return &ShowStatusOK{} |
||||
} |
||||
|
||||
/*ShowStatusOK handles this case with default header values. |
||||
|
||||
Status information of the system |
||||
*/ |
||||
type ShowStatusOK struct { |
||||
Payload *models.Status |
||||
} |
||||
|
||||
func (o *ShowStatusOK) Error() string { |
||||
return fmt.Sprintf("[GET /status][%d] showStatusOK %+v", 200, o.Payload) |
||||
} |
||||
|
||||
func (o *ShowStatusOK) GetPayload() *models.Status { |
||||
return o.Payload |
||||
} |
||||
|
||||
func (o *ShowStatusOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { |
||||
|
||||
o.Payload = new(models.Status) |
||||
|
||||
// response payload
|
||||
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
@ -0,0 +1,94 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package status_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"context" |
||||
|
||||
"github.com/go-openapi/runtime" |
||||
|
||||
strfmt "github.com/go-openapi/strfmt" |
||||
) |
||||
|
||||
//go:generate mockery -name API -inpkg
|
||||
|
||||
// API is the interface of the status management client
|
||||
type API interface { |
||||
/* |
||||
GetStatus basics status of the system*/ |
||||
GetStatus(ctx context.Context, params *GetStatusParams) (*GetStatusOK, error) |
||||
/* |
||||
ShowStatus basics status of the system*/ |
||||
ShowStatus(ctx context.Context, params *ShowStatusParams) (*ShowStatusOK, error) |
||||
} |
||||
|
||||
// New creates a new status management API client.
|
||||
func New(transport runtime.ClientTransport, formats strfmt.Registry, authInfo runtime.ClientAuthInfoWriter) *Client { |
||||
return &Client{ |
||||
transport: transport, |
||||
formats: formats, |
||||
authInfo: authInfo, |
||||
} |
||||
} |
||||
|
||||
/* |
||||
Client for status management API |
||||
*/ |
||||
type Client struct { |
||||
transport runtime.ClientTransport |
||||
formats strfmt.Registry |
||||
authInfo runtime.ClientAuthInfoWriter |
||||
} |
||||
|
||||
/* |
||||
GetStatus basics status of the system |
||||
*/ |
||||
func (a *Client) GetStatus(ctx context.Context, params *GetStatusParams) (*GetStatusOK, error) { |
||||
|
||||
result, err := a.transport.Submit(&runtime.ClientOperation{ |
||||
ID: "getStatus", |
||||
Method: "GET", |
||||
PathPattern: "/status/{id}", |
||||
ProducesMediaTypes: []string{"application/json"}, |
||||
ConsumesMediaTypes: []string{"application/json"}, |
||||
Schemes: []string{"http", "https"}, |
||||
Params: params, |
||||
Reader: &GetStatusReader{formats: a.formats}, |
||||
AuthInfo: a.authInfo, |
||||
Context: ctx, |
||||
Client: params.HTTPClient, |
||||
}) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return result.(*GetStatusOK), nil |
||||
|
||||
} |
||||
|
||||
/* |
||||
ShowStatus basics status of the system |
||||
*/ |
||||
func (a *Client) ShowStatus(ctx context.Context, params *ShowStatusParams) (*ShowStatusOK, error) { |
||||
|
||||
result, err := a.transport.Submit(&runtime.ClientOperation{ |
||||
ID: "showStatus", |
||||
Method: "GET", |
||||
PathPattern: "/status", |
||||
ProducesMediaTypes: []string{"application/json"}, |
||||
ConsumesMediaTypes: []string{"application/json"}, |
||||
Schemes: []string{"http", "https"}, |
||||
Params: params, |
||||
Reader: &ShowStatusReader{formats: a.formats}, |
||||
AuthInfo: a.authInfo, |
||||
Context: ctx, |
||||
Client: params.HTTPClient, |
||||
}) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return result.(*ShowStatusOK), nil |
||||
|
||||
} |
@ -0,0 +1,112 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package sync_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"context" |
||||
"net/http" |
||||
"time" |
||||
|
||||
"github.com/go-openapi/errors" |
||||
"github.com/go-openapi/runtime" |
||||
cr "github.com/go-openapi/runtime/client" |
||||
"github.com/go-openapi/strfmt" |
||||
) |
||||
|
||||
// NewSyncFlavorsParams creates a new SyncFlavorsParams object
|
||||
// with the default values initialized.
|
||||
func NewSyncFlavorsParams() *SyncFlavorsParams { |
||||
|
||||
return &SyncFlavorsParams{ |
||||
|
||||
timeout: cr.DefaultTimeout, |
||||
} |
||||
} |
||||
|
||||
// NewSyncFlavorsParamsWithTimeout creates a new SyncFlavorsParams object
|
||||
// with the default values initialized, and the ability to set a timeout on a request
|
||||
func NewSyncFlavorsParamsWithTimeout(timeout time.Duration) *SyncFlavorsParams { |
||||
|
||||
return &SyncFlavorsParams{ |
||||
|
||||
timeout: timeout, |
||||
} |
||||
} |
||||
|
||||
// NewSyncFlavorsParamsWithContext creates a new SyncFlavorsParams object
|
||||
// with the default values initialized, and the ability to set a context for a request
|
||||
func NewSyncFlavorsParamsWithContext(ctx context.Context) *SyncFlavorsParams { |
||||
|
||||
return &SyncFlavorsParams{ |
||||
|
||||
Context: ctx, |
||||
} |
||||
} |
||||
|
||||
// NewSyncFlavorsParamsWithHTTPClient creates a new SyncFlavorsParams object
|
||||
// with the default values initialized, and the ability to set a custom HTTPClient for a request
|
||||
func NewSyncFlavorsParamsWithHTTPClient(client *http.Client) *SyncFlavorsParams { |
||||
|
||||
return &SyncFlavorsParams{ |
||||
HTTPClient: client, |
||||
} |
||||
} |
||||
|
||||
/*SyncFlavorsParams contains all the parameters to send to the API endpoint |
||||
for the sync flavors operation typically these are written to a http.Request |
||||
*/ |
||||
type SyncFlavorsParams struct { |
||||
timeout time.Duration |
||||
Context context.Context |
||||
HTTPClient *http.Client |
||||
} |
||||
|
||||
// WithTimeout adds the timeout to the sync flavors params
|
||||
func (o *SyncFlavorsParams) WithTimeout(timeout time.Duration) *SyncFlavorsParams { |
||||
o.SetTimeout(timeout) |
||||
return o |
||||
} |
||||
|
||||
// SetTimeout adds the timeout to the sync flavors params
|
||||
func (o *SyncFlavorsParams) SetTimeout(timeout time.Duration) { |
||||
o.timeout = timeout |
||||
} |
||||
|
||||
// WithContext adds the context to the sync flavors params
|
||||
func (o *SyncFlavorsParams) WithContext(ctx context.Context) *SyncFlavorsParams { |
||||
o.SetContext(ctx) |
||||
return o |
||||
} |
||||
|
||||
// SetContext adds the context to the sync flavors params
|
||||
func (o *SyncFlavorsParams) SetContext(ctx context.Context) { |
||||
o.Context = ctx |
||||
} |
||||
|
||||
// WithHTTPClient adds the HTTPClient to the sync flavors params
|
||||
func (o *SyncFlavorsParams) WithHTTPClient(client *http.Client) *SyncFlavorsParams { |
||||
o.SetHTTPClient(client) |
||||
return o |
||||
} |
||||
|
||||
// SetHTTPClient adds the HTTPClient to the sync flavors params
|
||||
func (o *SyncFlavorsParams) SetHTTPClient(client *http.Client) { |
||||
o.HTTPClient = client |
||||
} |
||||
|
||||
// WriteToRequest writes these params to a swagger request
|
||||
func (o *SyncFlavorsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { |
||||
|
||||
if err := r.SetTimeout(o.timeout); err != nil { |
||||
return err |
||||
} |
||||
var res []error |
||||
|
||||
if len(res) > 0 { |
||||
return errors.CompositeValidationError(res...) |
||||
} |
||||
return nil |
||||
} |
@ -0,0 +1,123 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package sync_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"fmt" |
||||
"io" |
||||
|
||||
"github.com/go-openapi/runtime" |
||||
"github.com/go-openapi/strfmt" |
||||
|
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/models" |
||||
) |
||||
|
||||
// SyncFlavorsReader is a Reader for the SyncFlavors structure.
|
||||
type SyncFlavorsReader struct { |
||||
formats strfmt.Registry |
||||
} |
||||
|
||||
// ReadResponse reads a server response into the received o.
|
||||
func (o *SyncFlavorsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { |
||||
switch response.Code() { |
||||
case 200: |
||||
result := NewSyncFlavorsOK() |
||||
if err := result.readResponse(response, consumer, o.formats); err != nil { |
||||
return nil, err |
||||
} |
||||
return result, nil |
||||
case 202: |
||||
result := NewSyncFlavorsAccepted() |
||||
if err := result.readResponse(response, consumer, o.formats); err != nil { |
||||
return nil, err |
||||
} |
||||
return result, nil |
||||
case 500: |
||||
result := NewSyncFlavorsInternalServerError() |
||||
if err := result.readResponse(response, consumer, o.formats); err != nil { |
||||
return nil, err |
||||
} |
||||
return nil, result |
||||
|
||||
default: |
||||
return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) |
||||
} |
||||
} |
||||
|
||||
// NewSyncFlavorsOK creates a SyncFlavorsOK with default headers values
|
||||
func NewSyncFlavorsOK() *SyncFlavorsOK { |
||||
return &SyncFlavorsOK{} |
||||
} |
||||
|
||||
/*SyncFlavorsOK handles this case with default header values. |
||||
|
||||
The load of data was completely successfully |
||||
*/ |
||||
type SyncFlavorsOK struct { |
||||
} |
||||
|
||||
func (o *SyncFlavorsOK) Error() string { |
||||
return fmt.Sprintf("[GET /sync/flavors][%d] syncFlavorsOK ", 200) |
||||
} |
||||
|
||||
func (o *SyncFlavorsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// NewSyncFlavorsAccepted creates a SyncFlavorsAccepted with default headers values
|
||||
func NewSyncFlavorsAccepted() *SyncFlavorsAccepted { |
||||
return &SyncFlavorsAccepted{} |
||||
} |
||||
|
||||
/*SyncFlavorsAccepted handles this case with default header values. |
||||
|
||||
Operation done but there might have been some fails when adding part of the data |
||||
*/ |
||||
type SyncFlavorsAccepted struct { |
||||
} |
||||
|
||||
func (o *SyncFlavorsAccepted) Error() string { |
||||
return fmt.Sprintf("[GET /sync/flavors][%d] syncFlavorsAccepted ", 202) |
||||
} |
||||
|
||||
func (o *SyncFlavorsAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// NewSyncFlavorsInternalServerError creates a SyncFlavorsInternalServerError with default headers values
|
||||
func NewSyncFlavorsInternalServerError() *SyncFlavorsInternalServerError { |
||||
return &SyncFlavorsInternalServerError{} |
||||
} |
||||
|
||||
/*SyncFlavorsInternalServerError handles this case with default header values. |
||||
|
||||
Something unexpected happend, error raised |
||||
*/ |
||||
type SyncFlavorsInternalServerError struct { |
||||
Payload *models.ErrorResponse |
||||
} |
||||
|
||||
func (o *SyncFlavorsInternalServerError) Error() string { |
||||
return fmt.Sprintf("[GET /sync/flavors][%d] syncFlavorsInternalServerError %+v", 500, o.Payload) |
||||
} |
||||
|
||||
func (o *SyncFlavorsInternalServerError) GetPayload() *models.ErrorResponse { |
||||
return o.Payload |
||||
} |
||||
|
||||
func (o *SyncFlavorsInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { |
||||
|
||||
o.Payload = new(models.ErrorResponse) |
||||
|
||||
// response payload
|
||||
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
@ -0,0 +1,112 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package sync_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"context" |
||||
"net/http" |
||||
"time" |
||||
|
||||
"github.com/go-openapi/errors" |
||||
"github.com/go-openapi/runtime" |
||||
cr "github.com/go-openapi/runtime/client" |
||||
"github.com/go-openapi/strfmt" |
||||
) |
||||
|
||||
// NewSyncHierarchyParams creates a new SyncHierarchyParams object
|
||||
// with the default values initialized.
|
||||
func NewSyncHierarchyParams() *SyncHierarchyParams { |
||||
|
||||
return &SyncHierarchyParams{ |
||||
|
||||
timeout: cr.DefaultTimeout, |
||||
} |
||||
} |
||||
|
||||
// NewSyncHierarchyParamsWithTimeout creates a new SyncHierarchyParams object
|
||||
// with the default values initialized, and the ability to set a timeout on a request
|
||||
func NewSyncHierarchyParamsWithTimeout(timeout time.Duration) *SyncHierarchyParams { |
||||
|
||||
return &SyncHierarchyParams{ |
||||
|
||||
timeout: timeout, |
||||
} |
||||
} |
||||
|
||||
// NewSyncHierarchyParamsWithContext creates a new SyncHierarchyParams object
|
||||
// with the default values initialized, and the ability to set a context for a request
|
||||
func NewSyncHierarchyParamsWithContext(ctx context.Context) *SyncHierarchyParams { |
||||
|
||||
return &SyncHierarchyParams{ |
||||
|
||||
Context: ctx, |
||||
} |
||||
} |
||||
|
||||
// NewSyncHierarchyParamsWithHTTPClient creates a new SyncHierarchyParams object
|
||||
// with the default values initialized, and the ability to set a custom HTTPClient for a request
|
||||
func NewSyncHierarchyParamsWithHTTPClient(client *http.Client) *SyncHierarchyParams { |
||||
|
||||
return &SyncHierarchyParams{ |
||||
HTTPClient: client, |
||||
} |
||||
} |
||||
|
||||
/*SyncHierarchyParams contains all the parameters to send to the API endpoint |
||||
for the sync hierarchy operation typically these are written to a http.Request |
||||
*/ |
||||
type SyncHierarchyParams struct { |
||||
timeout time.Duration |
||||
Context context.Context |
||||
HTTPClient *http.Client |
||||
} |
||||
|
||||
// WithTimeout adds the timeout to the sync hierarchy params
|
||||
func (o *SyncHierarchyParams) WithTimeout(timeout time.Duration) *SyncHierarchyParams { |
||||
o.SetTimeout(timeout) |
||||
return o |
||||
} |
||||
|
||||
// SetTimeout adds the timeout to the sync hierarchy params
|
||||
func (o *SyncHierarchyParams) SetTimeout(timeout time.Duration) { |
||||
o.timeout = timeout |
||||
} |
||||
|
||||
// WithContext adds the context to the sync hierarchy params
|
||||
func (o *SyncHierarchyParams) WithContext(ctx context.Context) *SyncHierarchyParams { |
||||
o.SetContext(ctx) |
||||
return o |
||||
} |
||||
|
||||
// SetContext adds the context to the sync hierarchy params
|
||||
func (o *SyncHierarchyParams) SetContext(ctx context.Context) { |
||||
o.Context = ctx |
||||
} |
||||
|
||||
// WithHTTPClient adds the HTTPClient to the sync hierarchy params
|
||||
func (o *SyncHierarchyParams) WithHTTPClient(client *http.Client) *SyncHierarchyParams { |
||||
o.SetHTTPClient(client) |
||||
return o |
||||
} |
||||
|
||||
// SetHTTPClient adds the HTTPClient to the sync hierarchy params
|
||||
func (o *SyncHierarchyParams) SetHTTPClient(client *http.Client) { |
||||
o.HTTPClient = client |
||||
} |
||||
|
||||
// WriteToRequest writes these params to a swagger request
|
||||
func (o *SyncHierarchyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { |
||||
|
||||
if err := r.SetTimeout(o.timeout); err != nil { |
||||
return err |
||||
} |
||||
var res []error |
||||
|
||||
if len(res) > 0 { |
||||
return errors.CompositeValidationError(res...) |
||||
} |
||||
return nil |
||||
} |
@ -0,0 +1,123 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package sync_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"fmt" |
||||
"io" |
||||
|
||||
"github.com/go-openapi/runtime" |
||||
"github.com/go-openapi/strfmt" |
||||
|
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/models" |
||||
) |
||||
|
||||
// SyncHierarchyReader is a Reader for the SyncHierarchy structure.
|
||||
type SyncHierarchyReader struct { |
||||
formats strfmt.Registry |
||||
} |
||||
|
||||
// ReadResponse reads a server response into the received o.
|
||||
func (o *SyncHierarchyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { |
||||
switch response.Code() { |
||||
case 200: |
||||
result := NewSyncHierarchyOK() |
||||
if err := result.readResponse(response, consumer, o.formats); err != nil { |
||||
return nil, err |
||||
} |
||||
return result, nil |
||||
case 202: |
||||
result := NewSyncHierarchyAccepted() |
||||
if err := result.readResponse(response, consumer, o.formats); err != nil { |
||||
return nil, err |
||||
} |
||||
return result, nil |
||||
case 500: |
||||
result := NewSyncHierarchyInternalServerError() |
||||
if err := result.readResponse(response, consumer, o.formats); err != nil { |
||||
return nil, err |
||||
} |
||||
return nil, result |
||||
|
||||
default: |
||||
return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) |
||||
} |
||||
} |
||||
|
||||
// NewSyncHierarchyOK creates a SyncHierarchyOK with default headers values
|
||||
func NewSyncHierarchyOK() *SyncHierarchyOK { |
||||
return &SyncHierarchyOK{} |
||||
} |
||||
|
||||
/*SyncHierarchyOK handles this case with default header values. |
||||
|
||||
The load of data was completely successfully |
||||
*/ |
||||
type SyncHierarchyOK struct { |
||||
} |
||||
|
||||
func (o *SyncHierarchyOK) Error() string { |
||||
return fmt.Sprintf("[GET /sync/hierarchy][%d] syncHierarchyOK ", 200) |
||||
} |
||||
|
||||
func (o *SyncHierarchyOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// NewSyncHierarchyAccepted creates a SyncHierarchyAccepted with default headers values
|
||||
func NewSyncHierarchyAccepted() *SyncHierarchyAccepted { |
||||
return &SyncHierarchyAccepted{} |
||||
} |
||||
|
||||
/*SyncHierarchyAccepted handles this case with default header values. |
||||
|
||||
Operation done but there might have been some fails when adding part of the data |
||||
*/ |
||||
type SyncHierarchyAccepted struct { |
||||
} |
||||
|
||||
func (o *SyncHierarchyAccepted) Error() string { |
||||
return fmt.Sprintf("[GET /sync/hierarchy][%d] syncHierarchyAccepted ", 202) |
||||
} |
||||
|
||||
func (o *SyncHierarchyAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// NewSyncHierarchyInternalServerError creates a SyncHierarchyInternalServerError with default headers values
|
||||
func NewSyncHierarchyInternalServerError() *SyncHierarchyInternalServerError { |
||||
return &SyncHierarchyInternalServerError{} |
||||
} |
||||
|
||||
/*SyncHierarchyInternalServerError handles this case with default header values. |
||||
|
||||
Something unexpected happend, error raised |
||||
*/ |
||||
type SyncHierarchyInternalServerError struct { |
||||
Payload *models.ErrorResponse |
||||
} |
||||
|
||||
func (o *SyncHierarchyInternalServerError) Error() string { |
||||
return fmt.Sprintf("[GET /sync/hierarchy][%d] syncHierarchyInternalServerError %+v", 500, o.Payload) |
||||
} |
||||
|
||||
func (o *SyncHierarchyInternalServerError) GetPayload() *models.ErrorResponse { |
||||
return o.Payload |
||||
} |
||||
|
||||
func (o *SyncHierarchyInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { |
||||
|
||||
o.Payload = new(models.ErrorResponse) |
||||
|
||||
// response payload
|
||||
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
@ -0,0 +1,106 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package sync_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"context" |
||||
|
||||
"github.com/go-openapi/runtime" |
||||
|
||||
strfmt "github.com/go-openapi/strfmt" |
||||
) |
||||
|
||||
//go:generate mockery -name API -inpkg
|
||||
|
||||
// API is the interface of the sync management client
|
||||
type API interface { |
||||
/* |
||||
SyncFlavors syncs the open stack s flavors data in the system*/ |
||||
SyncFlavors(ctx context.Context, params *SyncFlavorsParams) (*SyncFlavorsOK, *SyncFlavorsAccepted, error) |
||||
/* |
||||
SyncHierarchy syncs all the organizations projects resources hierarchy from l e x i s*/ |
||||
SyncHierarchy(ctx context.Context, params *SyncHierarchyParams) (*SyncHierarchyOK, *SyncHierarchyAccepted, error) |
||||
} |
||||
|
||||
// New creates a new sync management API client.
|
||||
func New(transport runtime.ClientTransport, formats strfmt.Registry, authInfo runtime.ClientAuthInfoWriter) *Client { |
||||
return &Client{ |
||||
transport: transport, |
||||
formats: formats, |
||||
authInfo: authInfo, |
||||
} |
||||
} |
||||
|
||||
/* |
||||
Client for sync management API |
||||
*/ |
||||
type Client struct { |
||||
transport runtime.ClientTransport |
||||
formats strfmt.Registry |
||||
authInfo runtime.ClientAuthInfoWriter |
||||
} |
||||
|
||||
/* |
||||
SyncFlavors syncs the open stack s flavors data in the system |
||||
*/ |
||||
func (a *Client) SyncFlavors(ctx context.Context, params *SyncFlavorsParams) (*SyncFlavorsOK, *SyncFlavorsAccepted, error) { |
||||
|
||||
result, err := a.transport.Submit(&runtime.ClientOperation{ |
||||
ID: "syncFlavors", |
||||
Method: "GET", |
||||
PathPattern: "/sync/flavors", |
||||
ProducesMediaTypes: []string{"application/json"}, |
||||
ConsumesMediaTypes: []string{"application/json"}, |
||||
Schemes: []string{"http", "https"}, |
||||
Params: params, |
||||
Reader: &SyncFlavorsReader{formats: a.formats}, |
||||
AuthInfo: a.authInfo, |
||||
Context: ctx, |
||||
Client: params.HTTPClient, |
||||
}) |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
switch value := result.(type) { |
||||
case *SyncFlavorsOK: |
||||
return value, nil, nil |
||||
case *SyncFlavorsAccepted: |
||||
return nil, value, nil |
||||
} |
||||
return nil, nil, nil |
||||
|
||||
} |
||||
|
||||
/* |
||||
SyncHierarchy syncs all the organizations projects resources hierarchy from l e x i s |
||||
*/ |
||||
func (a *Client) SyncHierarchy(ctx context.Context, params *SyncHierarchyParams) (*SyncHierarchyOK, *SyncHierarchyAccepted, error) { |
||||
|
||||
result, err := a.transport.Submit(&runtime.ClientOperation{ |
||||
ID: "syncHierarchy", |
||||
Method: "GET", |
||||
PathPattern: "/sync/hierarchy", |
||||
ProducesMediaTypes: []string{"application/json"}, |
||||
ConsumesMediaTypes: []string{"application/json"}, |
||||
Schemes: []string{"http", "https"}, |
||||
Params: params, |
||||
Reader: &SyncHierarchyReader{formats: a.formats}, |
||||
AuthInfo: a.authInfo, |
||||
Context: ctx, |
||||
Client: params.HTTPClient, |
||||
}) |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
switch value := result.(type) { |
||||
case *SyncHierarchyOK: |
||||
return value, nil, nil |
||||
case *SyncHierarchyAccepted: |
||||
return nil, value, nil |
||||
} |
||||
return nil, nil, nil |
||||
|
||||
} |
@ -0,0 +1,66 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package trigger_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"context" |
||||
|
||||
"github.com/go-openapi/runtime" |
||||
|
||||
strfmt "github.com/go-openapi/strfmt" |
||||
) |
||||
|
||||
//go:generate mockery -name API -inpkg
|
||||
|
||||
// API is the interface of the trigger management client
|
||||
type API interface { |
||||
/* |
||||
UDRRedo redos of u d rs from the specific dates and with the specifc interval*/ |
||||
UDRRedo(ctx context.Context, params *UDRRedoParams) (*UDRRedoOK, error) |
||||
} |
||||
|
||||
// New creates a new trigger management API client.
|
||||
func New(transport runtime.ClientTransport, formats strfmt.Registry, authInfo runtime.ClientAuthInfoWriter) *Client { |
||||
return &Client{ |
||||
transport: transport, |
||||
formats: formats, |
||||
authInfo: authInfo, |
||||
} |
||||
} |
||||
|
||||
/* |
||||
Client for trigger management API |
||||
*/ |
||||
type Client struct { |
||||
transport runtime.ClientTransport |
||||
formats strfmt.Registry |
||||
authInfo runtime.ClientAuthInfoWriter |
||||
} |
||||
|
||||
/* |
||||
UDRRedo redos of u d rs from the specific dates and with the specifc interval |
||||
*/ |
||||
func (a *Client) UDRRedo(ctx context.Context, params *UDRRedoParams) (*UDRRedoOK, error) { |
||||
|
||||
result, err := a.transport.Submit(&runtime.ClientOperation{ |
||||
ID: "UDRRedo", |
||||
Method: "GET", |
||||
PathPattern: "/trigger/udrsredo", |
||||
ProducesMediaTypes: []string{"application/json"}, |
||||
ConsumesMediaTypes: []string{"application/json"}, |
||||
Schemes: []string{"http", "https"}, |
||||
Params: params, |
||||
Reader: &UDRRedoReader{formats: a.formats}, |
||||
AuthInfo: a.authInfo, |
||||
Context: ctx, |
||||
Client: params.HTTPClient, |
||||
}) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return result.(*UDRRedoOK), nil |
||||
|
||||
} |
@ -0,0 +1,210 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package trigger_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"context" |
||||
"net/http" |
||||
"time" |
||||
|
||||
"github.com/go-openapi/errors" |
||||
"github.com/go-openapi/runtime" |
||||
cr "github.com/go-openapi/runtime/client" |
||||
"github.com/go-openapi/strfmt" |
||||
) |
||||
|
||||
// NewUDRRedoParams creates a new UDRRedoParams object
|
||||
// with the default values initialized.
|
||||
func NewUDRRedoParams() *UDRRedoParams { |
||||
var () |
||||
return &UDRRedoParams{ |
||||
|
||||
timeout: cr.DefaultTimeout, |
||||
} |
||||
} |
||||
|
||||
// NewUDRRedoParamsWithTimeout creates a new UDRRedoParams object
|
||||
// with the default values initialized, and the ability to set a timeout on a request
|
||||
func NewUDRRedoParamsWithTimeout(timeout time.Duration) *UDRRedoParams { |
||||
var () |
||||
return &UDRRedoParams{ |
||||
|
||||
timeout: timeout, |
||||
} |
||||
} |
||||
|
||||
// NewUDRRedoParamsWithContext creates a new UDRRedoParams object
|
||||
// with the default values initialized, and the ability to set a context for a request
|
||||
func NewUDRRedoParamsWithContext(ctx context.Context) *UDRRedoParams { |
||||
var () |
||||
return &UDRRedoParams{ |
||||
|
||||
Context: ctx, |
||||
} |
||||
} |
||||
|
||||
// NewUDRRedoParamsWithHTTPClient creates a new UDRRedoParams object
|
||||
// with the default values initialized, and the ability to set a custom HTTPClient for a request
|
||||
func NewUDRRedoParamsWithHTTPClient(client *http.Client) *UDRRedoParams { |
||||
var () |
||||
return &UDRRedoParams{ |
||||
HTTPClient: client, |
||||
} |
||||
} |
||||
|
||||
/*UDRRedoParams contains all the parameters to send to the API endpoint |
||||
for the u d r redo operation typically these are written to a http.Request |
||||
*/ |
||||
type UDRRedoParams struct { |
||||
|
||||
/*From |
||||
Datetime from which to regenerate the udrs |
||||
|
||||
*/ |
||||
From *strfmt.DateTime |
||||
/*Interval |
||||
Interval to do increments |
||||
|
||||
*/ |
||||
Interval *string |
||||
/*To |
||||
Datetime until which to regenerate the udrs |
||||
|
||||
*/ |
||||
To *strfmt.DateTime |
||||
|
||||
timeout time.Duration |
||||
Context context.Context |
||||
HTTPClient *http.Client |
||||
} |
||||
|
||||
// WithTimeout adds the timeout to the u d r redo params
|
||||
func (o *UDRRedoParams) WithTimeout(timeout time.Duration) *UDRRedoParams { |
||||
o.SetTimeout(timeout) |
||||
return o |
||||
} |
||||
|
||||
// SetTimeout adds the timeout to the u d r redo params
|
||||
func (o *UDRRedoParams) SetTimeout(timeout time.Duration) { |
||||
o.timeout = timeout |
||||
} |
||||
|
||||
// WithContext adds the context to the u d r redo params
|
||||
func (o *UDRRedoParams) WithContext(ctx context.Context) *UDRRedoParams { |
||||
o.SetContext(ctx) |
||||
return o |
||||
} |
||||
|
||||
// SetContext adds the context to the u d r redo params
|
||||
func (o *UDRRedoParams) SetContext(ctx context.Context) { |
||||
o.Context = ctx |
||||
} |
||||
|
||||
// WithHTTPClient adds the HTTPClient to the u d r redo params
|
||||
func (o *UDRRedoParams) WithHTTPClient(client *http.Client) *UDRRedoParams { |
||||
o.SetHTTPClient(client) |
||||
return o |
||||
} |
||||
|
||||
// SetHTTPClient adds the HTTPClient to the u d r redo params
|
||||
func (o *UDRRedoParams) SetHTTPClient(client *http.Client) { |
||||
o.HTTPClient = client |
||||
} |
||||
|
||||
// WithFrom adds the from to the u d r redo params
|
||||
func (o *UDRRedoParams) WithFrom(from *strfmt.DateTime) *UDRRedoParams { |
||||
o.SetFrom(from) |
||||
return o |
||||
} |
||||
|
||||
// SetFrom adds the from to the u d r redo params
|
||||
func (o *UDRRedoParams) SetFrom(from *strfmt.DateTime) { |
||||
o.From = from |
||||
} |
||||
|
||||
// WithInterval adds the interval to the u d r redo params
|
||||
func (o *UDRRedoParams) WithInterval(interval *string) *UDRRedoParams { |
||||
o.SetInterval(interval) |
||||
return o |
||||
} |
||||
|
||||
// SetInterval adds the interval to the u d r redo params
|
||||
func (o *UDRRedoParams) SetInterval(interval *string) { |
||||
o.Interval = interval |
||||
} |
||||
|
||||
// WithTo adds the to to the u d r redo params
|
||||
func (o *UDRRedoParams) WithTo(to *strfmt.DateTime) *UDRRedoParams { |
||||
o.SetTo(to) |
||||
return o |
||||
} |
||||
|
||||
// SetTo adds the to to the u d r redo params
|
||||
func (o *UDRRedoParams) SetTo(to *strfmt.DateTime) { |
||||
o.To = to |
||||
} |
||||
|
||||
// WriteToRequest writes these params to a swagger request
|
||||
func (o *UDRRedoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { |
||||
|
||||
if err := r.SetTimeout(o.timeout); err != nil { |
||||
return err |
||||
} |
||||
var res []error |
||||
|
||||
if o.From != nil { |
||||
|
||||
// query param from
|
||||
var qrFrom strfmt.DateTime |
||||
if o.From != nil { |
||||
qrFrom = *o.From |
||||
} |
||||
qFrom := qrFrom.String() |
||||
if qFrom != "" { |
||||
if err := r.SetQueryParam("from", qFrom); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
} |
||||
|
||||
if o.Interval != nil { |
||||
|
||||
// query param interval
|
||||
var qrInterval string |
||||
if o.Interval != nil { |
||||
qrInterval = *o.Interval |
||||
} |
||||
qInterval := qrInterval |
||||
if qInterval != "" { |
||||
if err := r.SetQueryParam("interval", qInterval); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
} |
||||
|
||||
if o.To != nil { |
||||
|
||||
// query param to
|
||||
var qrTo strfmt.DateTime |
||||
if o.To != nil { |
||||
qrTo = *o.To |
||||
} |
||||
qTo := qrTo.String() |
||||
if qTo != "" { |
||||
if err := r.SetQueryParam("to", qTo); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
} |
||||
|
||||
if len(res) > 0 { |
||||
return errors.CompositeValidationError(res...) |
||||
} |
||||
return nil |
||||
} |
@ -0,0 +1,108 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package trigger_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"fmt" |
||||
"io" |
||||
|
||||
"github.com/go-openapi/runtime" |
||||
"github.com/go-openapi/strfmt" |
||||
|
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/models" |
||||
) |
||||
|
||||
// UDRRedoReader is a Reader for the UDRRedo structure.
|
||||
type UDRRedoReader struct { |
||||
formats strfmt.Registry |
||||
} |
||||
|
||||
// ReadResponse reads a server response into the received o.
|
||||
func (o *UDRRedoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { |
||||
switch response.Code() { |
||||
case 200: |
||||
result := NewUDRRedoOK() |
||||
if err := result.readResponse(response, consumer, o.formats); err != nil { |
||||
return nil, err |
||||
} |
||||
return result, nil |
||||
case 500: |
||||
result := NewUDRRedoInternalServerError() |
||||
if err := result.readResponse(response, consumer, o.formats); err != nil { |
||||
return nil, err |
||||
} |
||||
return nil, result |
||||
|
||||
default: |
||||
return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) |
||||
} |
||||
} |
||||
|
||||
// NewUDRRedoOK creates a UDRRedoOK with default headers values
|
||||
func NewUDRRedoOK() *UDRRedoOK { |
||||
return &UDRRedoOK{} |
||||
} |
||||
|
||||
/*UDRRedoOK handles this case with default header values. |
||||
|
||||
Generation task executed successfully. |
||||
*/ |
||||
type UDRRedoOK struct { |
||||
Payload *models.ItemCreatedResponse |
||||
} |
||||
|
||||
func (o *UDRRedoOK) Error() string { |
||||
return fmt.Sprintf("[GET /trigger/udrsredo][%d] uDRRedoOK %+v", 200, o.Payload) |
||||
} |
||||
|
||||
func (o *UDRRedoOK) GetPayload() *models.ItemCreatedResponse { |
||||
return o.Payload |
||||
} |
||||
|
||||
func (o *UDRRedoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { |
||||
|
||||
o.Payload = new(models.ItemCreatedResponse) |
||||
|
||||
// response payload
|
||||
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// NewUDRRedoInternalServerError creates a UDRRedoInternalServerError with default headers values
|
||||
func NewUDRRedoInternalServerError() *UDRRedoInternalServerError { |
||||
return &UDRRedoInternalServerError{} |
||||
} |
||||
|
||||
/*UDRRedoInternalServerError handles this case with default header values. |
||||
|
||||
Something unexpected happend, error raised |
||||
*/ |
||||
type UDRRedoInternalServerError struct { |
||||
Payload *models.ErrorResponse |
||||
} |
||||
|
||||
func (o *UDRRedoInternalServerError) Error() string { |
||||
return fmt.Sprintf("[GET /trigger/udrsredo][%d] uDRRedoInternalServerError %+v", 500, o.Payload) |
||||
} |
||||
|
||||
func (o *UDRRedoInternalServerError) GetPayload() *models.ErrorResponse { |
||||
return o.Payload |
||||
} |
||||
|
||||
func (o *UDRRedoInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { |
||||
|
||||
o.Payload = new(models.ErrorResponse) |
||||
|
||||
// response payload
|
||||
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
@ -0,0 +1,45 @@ |
||||
module github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis |
||||
|
||||
go 1.16 |
||||
|
||||
require ( |
||||
github.com/Nerzal/gocloak/v7 v7.11.0 |
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect |
||||
github.com/go-openapi/errors v0.20.1 |
||||
github.com/go-openapi/loads v0.21.0 |
||||
github.com/go-openapi/runtime v0.21.0 |
||||
github.com/go-openapi/spec v0.20.4 |
||||
github.com/go-openapi/strfmt v0.21.1 |
||||
github.com/go-openapi/swag v0.19.15 |
||||
github.com/go-openapi/validate v0.20.3 |
||||
github.com/go-stack/stack v1.8.1 // indirect |
||||
github.com/golang/snappy v0.0.4 // indirect |
||||
github.com/gophercloud/gophercloud v0.24.0 |
||||
github.com/jackc/pgx/v4 v4.14.1 // indirect |
||||
github.com/jinzhu/now v1.1.4 // indirect |
||||
github.com/klauspost/cpuid/v2 v2.0.9 // indirect |
||||
github.com/lib/pq v1.10.4 |
||||
github.com/pierrec/lz4 v2.6.1+incompatible // indirect |
||||
github.com/prometheus/client_golang v1.11.0 |
||||
github.com/prometheus/common v0.32.1 // indirect |
||||
github.com/prometheus/procfs v0.7.3 // indirect |
||||
github.com/rs/cors v1.8.0 |
||||
github.com/segmentio/asm v1.1.3 // indirect |
||||
github.com/segmentio/encoding v0.3.2 |
||||
github.com/segmentio/kafka-go v0.4.25 |
||||
github.com/segmentio/ksuid v1.0.4 // indirect |
||||
github.com/spf13/viper v1.10.1 |
||||
gitlab.com/cyclops-utilities/datamodels v0.0.0-20191016132854-e9313e683e5b |
||||
github.com/Cyclops-Labs/cyclops-4-hpc.git/services/cdr v0.0.1 |
||||
github.com/Cyclops-Labs/cyclops-4-hpc.git/services/credit-system v0.0.1 |
||||
github.com/Cyclops-Labs/cyclops-4-hpc.git/services/customerdb v0.0.1 |
||||
github.com/Cyclops-Labs/cyclops-4-hpc.git/services/plan-manager v0.0.1 |
||||
github.com/Cyclops-Labs/cyclops-4-hpc.git/services/udr v0.0.1 |
||||
gitlab.com/cyclops-utilities/logging v0.0.0-20200914110347-ca1d02efd346 |
||||
go.mongodb.org/mongo-driver v1.8.1 // indirect |
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 // indirect |
||||
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f // indirect |
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect |
||||
gorm.io/driver/postgres v1.2.3 |
||||
gorm.io/gorm v1.22.4 |
||||
) |
@ -0,0 +1,64 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package models |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"github.com/go-openapi/errors" |
||||
"github.com/go-openapi/strfmt" |
||||
"github.com/go-openapi/swag" |
||||
"github.com/go-openapi/validate" |
||||
) |
||||
|
||||
// ErrorResponse error response
|
||||
//
|
||||
// swagger:model ErrorResponse
|
||||
type ErrorResponse struct { |
||||
|
||||
// error string
|
||||
// Required: true
|
||||
ErrorString *string `json:"errorString"` |
||||
} |
||||
|
||||
// Validate validates this error response
|
||||
func (m *ErrorResponse) Validate(formats strfmt.Registry) error { |
||||
var res []error |
||||
|
||||
if err := m.validateErrorString(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if len(res) > 0 { |
||||
return errors.CompositeValidationError(res...) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *ErrorResponse) validateErrorString(formats strfmt.Registry) error { |
||||
|
||||
if err := validate.Required("errorString", "body", m.ErrorString); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *ErrorResponse) MarshalBinary() ([]byte, error) { |
||||
if m == nil { |
||||
return nil, nil |
||||
} |
||||
return swag.WriteJSON(m) |
||||
} |
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *ErrorResponse) UnmarshalBinary(b []byte) error { |
||||
var res ErrorResponse |
||||
if err := swag.ReadJSON(b, &res); err != nil { |
||||
return err |
||||
} |
||||
*m = res |
||||
return nil |
||||
} |
@ -0,0 +1,253 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package models |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"encoding/json" |
||||
|
||||
"github.com/go-openapi/errors" |
||||
"github.com/go-openapi/strfmt" |
||||
"github.com/go-openapi/swag" |
||||
"github.com/go-openapi/validate" |
||||
) |
||||
|
||||
// HPCResource h p c resource
|
||||
//
|
||||
// swagger:model HPCResource
|
||||
type HPCResource struct { |
||||
|
||||
// approval status
|
||||
// Enum: [ACCEPTED REJECTED PENDING]
|
||||
ApprovalStatus string `json:"ApprovalStatus,omitempty" gorm:"column:approvalstatus"` |
||||
|
||||
// associated h p c project
|
||||
AssociatedHPCProject string `json:"AssociatedHPCProject,omitempty" gorm:"column:associatedhpcproject"` |
||||
|
||||
// associated l e x i s project
|
||||
// Format: uuid
|
||||
AssociatedLEXISProject strfmt.UUID `json:"AssociatedLEXISProject,omitempty" gorm:"column:associatedlexisproject;type:uuid"` |
||||
|
||||
// cloud network name
|
||||
CloudNetworkName string `json:"CloudNetworkName,omitempty" gorm:"column:cloudnetworkname"` |
||||
|
||||
// h e app e endpoint
|
||||
HEAppEEndpoint string `json:"HEAppEEndpoint,omitempty" gorm:"column:heappeendpoint"` |
||||
|
||||
// h p c provider
|
||||
// Enum: [IT4I LRZ ICHEC]
|
||||
HPCProvider string `json:"HPCProvider,omitempty" gorm:"column:hpcprovider"` |
||||
|
||||
// h p c resource ID
|
||||
HPCResourceID string `json:"HPCResourceID,omitempty" gorm:"column:hpcresourceid;primary_key;unique;default:md5(random()::text || clock_timestamp()::text)::uuid"` |
||||
|
||||
// open stack endpoint
|
||||
OpenStackEndpoint string `json:"OpenStackEndpoint,omitempty" gorm:"column:openstackendpoint"` |
||||
|
||||
// open stack project ID
|
||||
OpenStackProjectID string `json:"OpenStackProjectID,omitempty" gorm:"column:openstackprojectid"` |
||||
|
||||
// project network name
|
||||
ProjectNetworkName string `json:"ProjectNetworkName,omitempty" gorm:"column:projectnetworkname"` |
||||
|
||||
// resource type
|
||||
// Enum: [CLOUD HPC]
|
||||
ResourceType string `json:"ResourceType,omitempty" gorm:"column:resourcetype"` |
||||
|
||||
// terms consent
|
||||
TermsConsent bool `json:"TermsConsent,omitempty" gorm:"column:termsconsent;type:bool"` |
||||
} |
||||
|
||||
// Validate validates this h p c resource
|
||||
func (m *HPCResource) Validate(formats strfmt.Registry) error { |
||||
var res []error |
||||
|
||||
if err := m.validateApprovalStatus(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if err := m.validateAssociatedLEXISProject(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if err := m.validateHPCProvider(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if err := m.validateResourceType(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if len(res) > 0 { |
||||
return errors.CompositeValidationError(res...) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
var hPCResourceTypeApprovalStatusPropEnum []interface{} |
||||
|
||||
func init() { |
||||
var res []string |
||||
if err := json.Unmarshal([]byte(`["ACCEPTED","REJECTED","PENDING"]`), &res); err != nil { |
||||
panic(err) |
||||
} |
||||
for _, v := range res { |
||||
hPCResourceTypeApprovalStatusPropEnum = append(hPCResourceTypeApprovalStatusPropEnum, v) |
||||
} |
||||
} |
||||
|
||||
const ( |
||||
|
||||
// HPCResourceApprovalStatusACCEPTED captures enum value "ACCEPTED"
|
||||
HPCResourceApprovalStatusACCEPTED string = "ACCEPTED" |
||||
|
||||
// HPCResourceApprovalStatusREJECTED captures enum value "REJECTED"
|
||||
HPCResourceApprovalStatusREJECTED string = "REJECTED" |
||||
|
||||
// HPCResourceApprovalStatusPENDING captures enum value "PENDING"
|
||||
HPCResourceApprovalStatusPENDING string = "PENDING" |
||||
) |
||||
|
||||
// prop value enum
|
||||
func (m *HPCResource) validateApprovalStatusEnum(path, location string, value string) error { |
||||
if err := validate.EnumCase(path, location, value, hPCResourceTypeApprovalStatusPropEnum, true); err != nil { |
||||
return err |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *HPCResource) validateApprovalStatus(formats strfmt.Registry) error { |
||||
|
||||
if swag.IsZero(m.ApprovalStatus) { // not required
|
||||
return nil |
||||
} |
||||
|
||||
// value enum
|
||||
if err := m.validateApprovalStatusEnum("ApprovalStatus", "body", m.ApprovalStatus); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (m *HPCResource) validateAssociatedLEXISProject(formats strfmt.Registry) error { |
||||
|
||||
if swag.IsZero(m.AssociatedLEXISProject) { // not required
|
||||
return nil |
||||
} |
||||
|
||||
if err := validate.FormatOf("AssociatedLEXISProject", "body", "uuid", m.AssociatedLEXISProject.String(), formats); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
var hPCResourceTypeHPCProviderPropEnum []interface{} |
||||
|
||||
func init() { |
||||
var res []string |
||||
if err := json.Unmarshal([]byte(`["IT4I","LRZ","ICHEC"]`), &res); err != nil { |
||||
panic(err) |
||||
} |
||||
for _, v := range res { |
||||
hPCResourceTypeHPCProviderPropEnum = append(hPCResourceTypeHPCProviderPropEnum, v) |
||||
} |
||||
} |
||||
|
||||
const ( |
||||
|
||||
// HPCResourceHPCProviderIT4I captures enum value "IT4I"
|
||||
HPCResourceHPCProviderIT4I string = "IT4I" |
||||
|
||||
// HPCResourceHPCProviderLRZ captures enum value "LRZ"
|
||||
HPCResourceHPCProviderLRZ string = "LRZ" |
||||
|
||||
// HPCResourceHPCProviderICHEC captures enum value "ICHEC"
|
||||
HPCResourceHPCProviderICHEC string = "ICHEC" |
||||
) |
||||
|
||||
// prop value enum
|
||||
func (m *HPCResource) validateHPCProviderEnum(path, location string, value string) error { |
||||
if err := validate.EnumCase(path, location, value, hPCResourceTypeHPCProviderPropEnum, true); err != nil { |
||||
return err |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *HPCResource) validateHPCProvider(formats strfmt.Registry) error { |
||||
|
||||
if swag.IsZero(m.HPCProvider) { // not required
|
||||
return nil |
||||
} |
||||
|
||||
// value enum
|
||||
if err := m.validateHPCProviderEnum("HPCProvider", "body", m.HPCProvider); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
var hPCResourceTypeResourceTypePropEnum []interface{} |
||||
|
||||
func init() { |
||||
var res []string |
||||
if err := json.Unmarshal([]byte(`["CLOUD","HPC"]`), &res); err != nil { |
||||
panic(err) |
||||
} |
||||
for _, v := range res { |
||||
hPCResourceTypeResourceTypePropEnum = append(hPCResourceTypeResourceTypePropEnum, v) |
||||
} |
||||
} |
||||
|
||||
const ( |
||||
|
||||
// HPCResourceResourceTypeCLOUD captures enum value "CLOUD"
|
||||
HPCResourceResourceTypeCLOUD string = "CLOUD" |
||||
|
||||
// HPCResourceResourceTypeHPC captures enum value "HPC"
|
||||
HPCResourceResourceTypeHPC string = "HPC" |
||||
) |
||||
|
||||
// prop value enum
|
||||
func (m *HPCResource) validateResourceTypeEnum(path, location string, value string) error { |
||||
if err := validate.EnumCase(path, location, value, hPCResourceTypeResourceTypePropEnum, true); err != nil { |
||||
return err |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *HPCResource) validateResourceType(formats strfmt.Registry) error { |
||||
|
||||
if swag.IsZero(m.ResourceType) { // not required
|
||||
return nil |
||||
} |
||||
|
||||
// value enum
|
||||
if err := m.validateResourceTypeEnum("ResourceType", "body", m.ResourceType); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *HPCResource) MarshalBinary() ([]byte, error) { |
||||
if m == nil { |
||||
return nil, nil |
||||
} |
||||
return swag.WriteJSON(m) |
||||
} |
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *HPCResource) UnmarshalBinary(b []byte) error { |
||||
var res HPCResource |
||||
if err := swag.ReadJSON(b, &res); err != nil { |
||||
return err |
||||
} |
||||
*m = res |
||||
return nil |
||||
} |
@ -0,0 +1,43 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package models |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"github.com/go-openapi/strfmt" |
||||
"github.com/go-openapi/swag" |
||||
) |
||||
|
||||
// ItemCreatedResponse item created response
|
||||
//
|
||||
// swagger:model ItemCreatedResponse
|
||||
type ItemCreatedResponse struct { |
||||
|
||||
// message
|
||||
Message string `json:"Message,omitempty"` |
||||
} |
||||
|
||||
// Validate validates this item created response
|
||||
func (m *ItemCreatedResponse) Validate(formats strfmt.Registry) error { |
||||
return nil |
||||
} |
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *ItemCreatedResponse) MarshalBinary() ([]byte, error) { |
||||
if m == nil { |
||||
return nil, nil |
||||
} |
||||
return swag.WriteJSON(m) |
||||
} |
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *ItemCreatedResponse) UnmarshalBinary(b []byte) error { |
||||
var res ItemCreatedResponse |
||||
if err := swag.ReadJSON(b, &res); err != nil { |
||||
return err |
||||
} |
||||
*m = res |
||||
return nil |
||||
} |
@ -0,0 +1,214 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package models |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"encoding/json" |
||||
|
||||
"github.com/go-openapi/errors" |
||||
"github.com/go-openapi/strfmt" |
||||
"github.com/go-openapi/swag" |
||||
"github.com/go-openapi/validate" |
||||
) |
||||
|
||||
// Organization organization
|
||||
//
|
||||
// swagger:model Organization
|
||||
type Organization struct { |
||||
|
||||
// created by
|
||||
// Format: uuid
|
||||
CreatedBy strfmt.UUID `json:"CreatedBy,omitempty" gorm:"column:createdby;type:uuid"` |
||||
|
||||
// creation date
|
||||
// Format: date-time
|
||||
CreationDate strfmt.DateTime `json:"CreationDate,omitempty" gorm:"column:registrationdatetime;type:timestamptz"` |
||||
|
||||
// formal name
|
||||
FormalName string `json:"FormalName,omitempty" gorm:"column:formalname"` |
||||
|
||||
// ID
|
||||
// Format: uuid
|
||||
ID strfmt.UUID `json:"ID,omitempty" gorm:"type:uuid;primary_key;unique;default:md5(random()::text || clock_timestamp()::text)::uuid"` |
||||
|
||||
// organization email address
|
||||
// Format: email
|
||||
OrganizationEmailAddress strfmt.Email `json:"OrganizationEmailAddress,omitempty" gorm:"column:organizationemailaddress"` |
||||
|
||||
// organization status
|
||||
// Enum: [PENDING_APPROVAL APPROVED DISABLED TERMINATED]
|
||||
OrganizationStatus string `json:"OrganizationStatus,omitempty" gorm:"column:organizationstatus"` |
||||
|
||||
// primary telephone number
|
||||
PrimaryTelephoneNumber string `json:"PrimaryTelephoneNumber,omitempty" gorm:"column:primarytelephonenumber"` |
||||
|
||||
// registered address1
|
||||
RegisteredAddress1 string `json:"RegisteredAddress1,omitempty" gorm:"column:registeredaddress1"` |
||||
|
||||
// registered address2
|
||||
RegisteredAddress2 string `json:"RegisteredAddress2,omitempty" gorm:"column:registeredaddress2"` |
||||
|
||||
// registered address3
|
||||
RegisteredAddress3 string `json:"RegisteredAddress3,omitempty" gorm:"column:registeredaddress3"` |
||||
|
||||
// registered country
|
||||
RegisteredCountry string `json:"RegisteredCountry,omitempty" gorm:"column:registeredcountry"` |
||||
|
||||
// v a t registration number
|
||||
VATRegistrationNumber string `json:"VATRegistrationNumber,omitempty" gorm:"column:vatregistrationnumber"` |
||||
|
||||
// website
|
||||
Website string `json:"Website,omitempty"` |
||||
} |
||||
|
||||
// Validate validates this organization
|
||||
func (m *Organization) Validate(formats strfmt.Registry) error { |
||||
var res []error |
||||
|
||||
if err := m.validateCreatedBy(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if err := m.validateCreationDate(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if err := m.validateID(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if err := m.validateOrganizationEmailAddress(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if err := m.validateOrganizationStatus(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if len(res) > 0 { |
||||
return errors.CompositeValidationError(res...) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *Organization) validateCreatedBy(formats strfmt.Registry) error { |
||||
|
||||
if swag.IsZero(m.CreatedBy) { // not required
|
||||
return nil |
||||
} |
||||
|
||||
if err := validate.FormatOf("CreatedBy", "body", "uuid", m.CreatedBy.String(), formats); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (m *Organization) validateCreationDate(formats strfmt.Registry) error { |
||||
|
||||
if swag.IsZero(m.CreationDate) { // not required
|
||||
return nil |
||||
} |
||||
|
||||
if err := validate.FormatOf("CreationDate", "body", "date-time", m.CreationDate.String(), formats); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (m *Organization) validateID(formats strfmt.Registry) error { |
||||
|
||||
if swag.IsZero(m.ID) { // not required
|
||||
return nil |
||||
} |
||||
|
||||
if err := validate.FormatOf("ID", "body", "uuid", m.ID.String(), formats); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (m *Organization) validateOrganizationEmailAddress(formats strfmt.Registry) error { |
||||
|
||||
if swag.IsZero(m.OrganizationEmailAddress) { // not required
|
||||
return nil |
||||
} |
||||
|
||||
if err := validate.FormatOf("OrganizationEmailAddress", "body", "email", m.OrganizationEmailAddress.String(), formats); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
var organizationTypeOrganizationStatusPropEnum []interface{} |
||||
|
||||
func init() { |
||||
var res []string |
||||
if err := json.Unmarshal([]byte(`["PENDING_APPROVAL","APPROVED","DISABLED","TERMINATED"]`), &res); err != nil { |
||||
panic(err) |
||||
} |
||||
for _, v := range res { |
||||
organizationTypeOrganizationStatusPropEnum = append(organizationTypeOrganizationStatusPropEnum, v) |
||||
} |
||||
} |
||||
|
||||
const ( |
||||
|
||||
// OrganizationOrganizationStatusPENDINGAPPROVAL captures enum value "PENDING_APPROVAL"
|
||||
OrganizationOrganizationStatusPENDINGAPPROVAL string = "PENDING_APPROVAL" |
||||
|
||||
// OrganizationOrganizationStatusAPPROVED captures enum value "APPROVED"
|
||||
OrganizationOrganizationStatusAPPROVED string = "APPROVED" |
||||
|
||||
// OrganizationOrganizationStatusDISABLED captures enum value "DISABLED"
|
||||
OrganizationOrganizationStatusDISABLED string = "DISABLED" |
||||
|
||||
// OrganizationOrganizationStatusTERMINATED captures enum value "TERMINATED"
|
||||
OrganizationOrganizationStatusTERMINATED string = "TERMINATED" |
||||
) |
||||
|
||||
// prop value enum
|
||||
func (m *Organization) validateOrganizationStatusEnum(path, location string, value string) error { |
||||
if err := validate.EnumCase(path, location, value, organizationTypeOrganizationStatusPropEnum, true); err != nil { |
||||
return err |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *Organization) validateOrganizationStatus(formats strfmt.Registry) error { |
||||
|
||||
if swag.IsZero(m.OrganizationStatus) { // not required
|
||||
return nil |
||||
} |
||||
|
||||
// value enum
|
||||
if err := m.validateOrganizationStatusEnum("OrganizationStatus", "body", m.OrganizationStatus); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *Organization) MarshalBinary() ([]byte, error) { |
||||
if m == nil { |
||||
return nil, nil |
||||
} |
||||
return swag.WriteJSON(m) |
||||
} |
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *Organization) UnmarshalBinary(b []byte) error { |
||||
var res Organization |
||||
if err := swag.ReadJSON(b, &res); err != nil { |
||||
return err |
||||
} |
||||
*m = res |
||||
return nil |
||||
} |
@ -0,0 +1,296 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package models |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"encoding/json" |
||||
|
||||
"github.com/go-openapi/errors" |
||||
"github.com/go-openapi/strfmt" |
||||
"github.com/go-openapi/swag" |
||||
"github.com/go-openapi/validate" |
||||
"github.com/lib/pq" |
||||
) |
||||
|
||||
// Project project
|
||||
//
|
||||
// swagger:model Project
|
||||
type Project struct { |
||||
|
||||
// allowed organizations
|
||||
AllowedOrganizations pq.StringArray `json:"AllowedOrganizations,omitempty" gorm:"column:allowedorganizations;type:text[]"` |
||||
|
||||
// linked organization
|
||||
// Format: uuid
|
||||
LinkedOrganization strfmt.UUID `json:"LinkedOrganization,omitempty" gorm:"column:linkedorganization;type:uuid"` |
||||
|
||||
// norm core hours
|
||||
NormCoreHours *int64 `json:"NormCoreHours,omitempty" gorm:"column:normcorehours;default:0"` |
||||
|
||||
// project contact email
|
||||
// Format: email
|
||||
ProjectContactEmail strfmt.Email `json:"ProjectContactEmail,omitempty" gorm:"column:projectcontactemail"` |
||||
|
||||
// project contact person
|
||||
// Format: uuid
|
||||
ProjectContactPerson strfmt.UUID `json:"ProjectContactPerson,omitempty" gorm:"column:projectcontactperson;type:uuid"` |
||||
|
||||
// project created by
|
||||
// Format: uuid
|
||||
ProjectCreatedBy strfmt.UUID `json:"ProjectCreatedBy,omitempty" gorm:"column:projectcreatedby;type:uuid"` |
||||
|
||||
// project creation time
|
||||
// Format: date-time
|
||||
ProjectCreationTime strfmt.DateTime `json:"ProjectCreationTime,omitempty" gorm:"column:projectcreationtime;type:timestamptz;default:now()"` |
||||
|
||||
// project description
|
||||
ProjectDescription string `json:"ProjectDescription,omitempty" gorm:"column:projectdescription"` |
||||
|
||||
// project domain
|
||||
ProjectDomain string `json:"ProjectDomain,omitempty" gorm:"column:projectdomain"` |
||||
|
||||
// project ID
|
||||
// Format: uuid
|
||||
ProjectID strfmt.UUID `json:"ProjectID,omitempty" gorm:"column:projectid;type:uuid;primary_key;unique;default:md5(random()::text || clock_timestamp()::text)::uuid"` |
||||
|
||||
// project max price
|
||||
ProjectMaxPrice *float64 `json:"ProjectMaxPrice,omitempty" gorm:"column:projectmaxprice;type:float8;default:0.0"` |
||||
|
||||
// project name
|
||||
ProjectName string `json:"ProjectName,omitempty" gorm:"column:projectname"` |
||||
|
||||
// project short name
|
||||
ProjectShortName string `json:"ProjectShortName,omitempty" gorm:"column:projectshortname;unique"` |
||||
|
||||
// project start date
|
||||
// Format: date-time
|
||||
ProjectStartDate strfmt.DateTime `json:"ProjectStartDate,omitempty" gorm:"column:projectstartdate;type:timestamptz"` |
||||
|
||||
// project status
|
||||
// Enum: [PENDING ACTIVE DISABLED TERMINATED]
|
||||
ProjectStatus string `json:"ProjectStatus,omitempty" gorm:"column:projectstatus"` |
||||
|
||||
// project termination date
|
||||
// Format: date-time
|
||||
ProjectTerminationDate strfmt.DateTime `json:"ProjectTerminationDate,omitempty" gorm:"column:projectterminationdate;type:timestamptz"` |
||||
} |
||||
|
||||
// Validate validates this project
|
||||
func (m *Project) Validate(formats strfmt.Registry) error { |
||||
var res []error |
||||
|
||||
if err := m.validateLinkedOrganization(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if err := m.validateProjectContactEmail(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if err := m.validateProjectContactPerson(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if err := m.validateProjectCreatedBy(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if err := m.validateProjectCreationTime(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if err := m.validateProjectID(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if err := m.validateProjectStartDate(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if err := m.validateProjectStatus(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if err := m.validateProjectTerminationDate(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if len(res) > 0 { |
||||
return errors.CompositeValidationError(res...) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *Project) validateLinkedOrganization(formats strfmt.Registry) error { |
||||
|
||||
if swag.IsZero(m.LinkedOrganization) { // not required
|
||||
return nil |
||||
} |
||||
|
||||
if err := validate.FormatOf("LinkedOrganization", "body", "uuid", m.LinkedOrganization.String(), formats); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (m *Project) validateProjectContactEmail(formats strfmt.Registry) error { |
||||
|
||||
if swag.IsZero(m.ProjectContactEmail) { // not required
|
||||
return nil |
||||
} |
||||
|
||||
if err := validate.FormatOf("ProjectContactEmail", "body", "email", m.ProjectContactEmail.String(), formats); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (m *Project) validateProjectContactPerson(formats strfmt.Registry) error { |
||||
|
||||
if swag.IsZero(m.ProjectContactPerson) { // not required
|
||||
return nil |
||||
} |
||||
|
||||
if err := validate.FormatOf("ProjectContactPerson", "body", "uuid", m.ProjectContactPerson.String(), formats); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (m *Project) validateProjectCreatedBy(formats strfmt.Registry) error { |
||||
|
||||
if swag.IsZero(m.ProjectCreatedBy) { // not required
|
||||
return nil |
||||
} |
||||
|
||||
if err := validate.FormatOf("ProjectCreatedBy", "body", "uuid", m.ProjectCreatedBy.String(), formats); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (m *Project) validateProjectCreationTime(formats strfmt.Registry) error { |
||||
|
||||
if swag.IsZero(m.ProjectCreationTime) { // not required
|
||||
return nil |
||||
} |
||||
|
||||
if err := validate.FormatOf("ProjectCreationTime", "body", "date-time", m.ProjectCreationTime.String(), formats); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (m *Project) validateProjectID(formats strfmt.Registry) error { |
||||
|
||||
if swag.IsZero(m.ProjectID) { // not required
|
||||
return nil |
||||
} |
||||
|
||||
if err := validate.FormatOf("ProjectID", "body", "uuid", m.ProjectID.String(), formats); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (m *Project) validateProjectStartDate(formats strfmt.Registry) error { |
||||
|
||||
if swag.IsZero(m.ProjectStartDate) { // not required
|
||||
return nil |
||||
} |
||||
|
||||
if err := validate.FormatOf("ProjectStartDate", "body", "date-time", m.ProjectStartDate.String(), formats); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
var projectTypeProjectStatusPropEnum []interface{} |
||||
|
||||
func init() { |
||||
var res []string |
||||
if err := json.Unmarshal([]byte(`["PENDING","ACTIVE","DISABLED","TERMINATED"]`), &res); err != nil { |
||||
panic(err) |
||||
} |
||||
for _, v := range res { |
||||
projectTypeProjectStatusPropEnum = append(projectTypeProjectStatusPropEnum, v) |
||||
} |
||||
} |
||||
|
||||
const ( |
||||
|
||||
// ProjectProjectStatusPENDING captures enum value "PENDING"
|
||||
ProjectProjectStatusPENDING string = "PENDING" |
||||
|
||||
// ProjectProjectStatusACTIVE captures enum value "ACTIVE"
|
||||
ProjectProjectStatusACTIVE string = "ACTIVE" |
||||
|
||||
// ProjectProjectStatusDISABLED captures enum value "DISABLED"
|
||||
ProjectProjectStatusDISABLED string = "DISABLED" |
||||
|
||||
// ProjectProjectStatusTERMINATED captures enum value "TERMINATED"
|
||||
ProjectProjectStatusTERMINATED string = "TERMINATED" |
||||
) |
||||
|
||||
// prop value enum
|
||||
func (m *Project) validateProjectStatusEnum(path, location string, value string) error { |
||||
if err := validate.EnumCase(path, location, value, projectTypeProjectStatusPropEnum, true); err != nil { |
||||
return err |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *Project) validateProjectStatus(formats strfmt.Registry) error { |
||||
|
||||
if swag.IsZero(m.ProjectStatus) { // not required
|
||||
return nil |
||||
} |
||||
|
||||
// value enum
|
||||
if err := m.validateProjectStatusEnum("ProjectStatus", "body", m.ProjectStatus); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (m *Project) validateProjectTerminationDate(formats strfmt.Registry) error { |
||||
|
||||
if swag.IsZero(m.ProjectTerminationDate) { // not required
|
||||
return nil |
||||
} |
||||
|
||||
if err := validate.FormatOf("ProjectTerminationDate", "body", "date-time", m.ProjectTerminationDate.String(), formats); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *Project) MarshalBinary() ([]byte, error) { |
||||
if m == nil { |
||||
return nil, nil |
||||
} |
||||
return swag.WriteJSON(m) |
||||
} |
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *Project) UnmarshalBinary(b []byte) error { |
||||
var res Project |
||||
if err := swag.ReadJSON(b, &res); err != nil { |
||||
return err |
||||
} |
||||
*m = res |
||||
return nil |
||||
} |
@ -0,0 +1,82 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package models |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"github.com/go-openapi/errors" |
||||
"github.com/go-openapi/strfmt" |
||||
"github.com/go-openapi/swag" |
||||
"github.com/go-openapi/validate" |
||||
) |
||||
|
||||
// Status status
|
||||
//
|
||||
// swagger:model Status
|
||||
type Status struct { |
||||
|
||||
// average response time
|
||||
AverageResponseTime float64 `json:"AverageResponseTime,omitempty"` |
||||
|
||||
// d b state
|
||||
DBState string `json:"DBState,omitempty"` |
||||
|
||||
// last request
|
||||
LastRequest string `json:"LastRequest,omitempty"` |
||||
|
||||
// requests bo t
|
||||
RequestsBoT int64 `json:"RequestsBoT,omitempty"` |
||||
|
||||
// requests last hour
|
||||
RequestsLastHour int64 `json:"RequestsLastHour,omitempty"` |
||||
|
||||
// requests today
|
||||
RequestsToday int64 `json:"RequestsToday,omitempty"` |
||||
|
||||
// system state
|
||||
// Required: true
|
||||
SystemState *string `json:"SystemState"` |
||||
} |
||||
|
||||
// Validate validates this status
|
||||
func (m *Status) Validate(formats strfmt.Registry) error { |
||||
var res []error |
||||
|
||||
if err := m.validateSystemState(formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if len(res) > 0 { |
||||
return errors.CompositeValidationError(res...) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *Status) validateSystemState(formats strfmt.Registry) error { |
||||
|
||||
if err := validate.Required("SystemState", "body", m.SystemState); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *Status) MarshalBinary() ([]byte, error) { |
||||
if m == nil { |
||||
return nil, nil |
||||
} |
||||
return swag.WriteJSON(m) |
||||
} |
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *Status) UnmarshalBinary(b []byte) error { |
||||
var res Status |
||||
if err := swag.ReadJSON(b, &res); err != nil { |
||||
return err |
||||
} |
||||
*m = res |
||||
return nil |
||||
} |
@ -0,0 +1,64 @@ |
||||
#!/bin/bash |
||||
|
||||
SERVER="http://localhost" |
||||
API_KEY="1234567890abcdefghi" |
||||
API_VERSION="v1.0" |
||||
|
||||
# We load the plans |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/plan" -X POST -d '{ "id":"1", "name": "LEXIS_1", "offeredstartdate":"2019-01-01", "offeredenddate":"2040-12-31" }' |
||||
|
||||
# We load the skus |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku" -X POST -d '{ "id": "1", "name": "vcpu", "unit": "Core" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku" -X POST -d '{ "id": "2", "name": "ram", "unit": "GB" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku" -X POST -d '{ "id": "3", "name": "rootdisk", "unit": "GB" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku" -X POST -d '{ "id": "4", "name": "ephemeraldisk", "unit": "GB" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku" -X POST -d '{ "id": "5", "name": "floatingip", "unit": "IP" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku" -X POST -d '{ "id": "6", "name": "blockstorage", "unit": "GB" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku" -X POST -d '{ "id": "7", "name": "objectstorage", "unit": "GB" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku" -X POST -d '{ "id": "8", "name": "license", "unit": "License*Core" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku" -X POST -d '{ "id": "9", "name": "titanxp", "unit": "Core" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku" -X POST -d '{ "id": "10", "name": "t4", "unit": "Core" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku" -X POST -d '{ "id": "11", "name": "p100", "unit": "Core" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku" -X POST -d '{ "id": "12", "name": "rootdisk_ssd", "unit": "GB" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku" -X POST -d '{ "id": "13", "name": "ephemeraldisk_ssd", "unit": "GB" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku" -X POST -d '{ "id": "14", "name": "blockstorage_ssd", "unit": "GB" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku" -X POST -d '{ "id": "15", "name": "salomon", "unit": "core-hour" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku" -X POST -d '{ "id": "16", "name": "barbora", "unit": "core-hour" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku" -X POST -d '{ "id": "17", "name": "barbora-gpu", "unit": "core-hour" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku" -X POST -d '{ "id": "18", "name": "dgx", "unit": "core-hour" }' |
||||
|
||||
# We load the sku prices |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku/price" -X POST -d '{ "skuid": "1", "skuname": "vcpu", "unitprice": 0.000011574, "unit": "Core", "planid": "1", "UnitCreditPrice": 0.000011574, "AccountingMode": "CASH" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku/price" -X POST -d '{ "skuid": "2", "skuname": "ram", "unitprice": 0.000011574, "unit": "GB", "planid": "1", "UnitCreditPrice": 0.000011574, "AccountingMode": "CASH" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku/price" -X POST -d '{ "skuid": "3", "skuname": "rootdisk", "unitprice": 0.000011574, "unit": "GB", "planid": "1", "UnitCreditPrice": 0.000011574, "AccountingMode": "CASH" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku/price" -X POST -d '{ "skuid": "4", "skuname": "ephemeraldisk", "unitprice": 0.000011574, "unit": "GB", "planid": "1", "UnitCreditPrice": 0.000011574, "AccountingMode": "CASH" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku/price" -X POST -d '{ "skuid": "5", "skuname": "floatingip", "unitprice": 0.000011574, "unit": "IP", "planid": "1", "UnitCreditPrice": 0.000011574, "AccountingMode": "CASH" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku/price" -X POST -d '{ "skuid": "6", "skuname": "blockstorage", "unitprice": 0.000011574, "unit": "GB", "planid": "1", "UnitCreditPrice": 0.000011574, "AccountingMode": "CASH" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku/price" -X POST -d '{ "skuid": "7", "skuname": "objectstorage", "unitprice": 0.000011574, "unit": "GB", "planid": "1", "UnitCreditPrice": 0.000011574, "AccountingMode": "CASH" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku/price" -X POST -d '{ "skuid": "8", "skuname": "license", "unitprice": 0.000011574, "unit": "License*Core", "planid": "1", "UnitCreditPrice": 0.000011574, "AccountingMode": "CASH" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku/price" -X POST -d '{ "skuid": "9", "skuname": "titanxp", "unitprice": 0.000011574, "unit": "Core", "planid": "1", "UnitCreditPrice": 0.000011574, "AccountingMode": "CASH" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku/price" -X POST -d '{ "skuid": "10", "skuname": "t4", "unitprice": 0.000011574, "unit": "Core", "planid": "1", "UnitCreditPrice": 0.000011574, "AccountingMode": "CASH" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku/price" -X POST -d '{ "skuid": "11", "skuname": "p100", "unitprice": 0.000011574, "unit": "Core", "planid": "1", "UnitCreditPrice": 0.000011574, "AccountingMode": "CASH" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku/price" -X POST -d '{ "skuid": "12", "skuname": "rootdisk_ssd", "unitprice": 0.000011574, "unit": "GB", "planid": "1", "UnitCreditPrice": 0.000011574, "AccountingMode": "CASH" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku/price" -X POST -d '{ "skuid": "13", "skuname": "ephemeraldisk_ssd", "unitprice": 0.000011574, "unit": "GB", "planid": "1", "UnitCreditPrice": 0.000011574, "AccountingMode": "CASH" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku/price" -X POST -d '{ "skuid": "14", "skuname": "blockstorage_ssd", "unitprice": 0.000011574, "unit": "GB", "planid": "1", "UnitCreditPrice": 0.000011574, "AccountingMode": "CASH" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku/price" -X POST -d '{ "skuid": "15", "skuname": "salomon", "unitprice": 0.000013889, "unit": "core-hour", "planid": "1", "UnitCreditPrice": 0.000013889, "AccountingMode": "CREDIT" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku/price" -X POST -d '{ "skuid": "16", "skuname": "barbora", "unitprice": 0.000020694, "unit": "core-hour", "planid": "1", "UnitCreditPrice": 0.000020694, "AccountingMode": "CREDIT" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku/price" -X POST -d '{ "skuid": "17", "skuname": "barbora-gpu", "unitprice": 0.000066111, "unit": "core-hour", "planid": "1", "UnitCreditPrice": 0.000066111, "AccountingMode": "CREDIT" }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/sku/price" -X POST -d '{ "skuid": "18", "skuname": "dgx", "unitprice": 0.000161667, "unit": "core-hour", "planid": "1", "UnitCreditPrice": 0.000161667, "AccountingMode": "CREDIT" }' |
||||
|
||||
|
||||
# We load the life cycles |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/cycle" -X POST -d '{ "state": "active", "resourceType": "blockstorage", "skuList":{ "blockstorage": 1 } }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/cycle" -X POST -d '{ "state": "inactive", "resourceType": "blockstorage", "skuList":{ "blockstorage": 1 } }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/cycle" -X POST -d '{ "state": "active", "resourceType": "floatingip", "skuList":{ "floatingip": 1 } }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/cycle" -X POST -d '{ "state": "inactive", "resourceType": "floatingip", "skuList":{ "floatingip": 1 } }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/cycle" -X POST -d '{ "state": "used", "resourceType": "objectstorage", "skuList":{ "objectstorage": 1 } }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/cycle" -X POST -d '{ "state": "used", "resourceType": "salomon", "skuList":{ "salomon": 1 } }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/cycle" -X POST -d '{ "state": "used", "resourceType": "barbora", "skuList":{ "barbora": 1 } }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/cycle" -X POST -d '{ "state": "used", "resourceType": "barbora-gpu", "skuList":{ "barbora-gpu": 1 } }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/cycle" -X POST -d '{ "state": "used", "resourceType": "dgx", "skuList":{ "dgx": 1 } }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/cycle" -X POST -d '{ "state": "active", "resourceType": "server", "skuList":{ "vcpu": 1, "ram": 1, "titanxp": 1, "t4": 1, "p100": 1, "rootdisk": 1, "rootdisk_ssd": 1, "ephemeraldisk": 1, "ephemeraldisk_ssd": 1, "license": 1 } }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/cycle" -X POST -d '{ "state": "inactive", "resourceType": "server", "skuList":{ "titanxp": 1, "t4": 1, "p100": 1, "rootdisk": 1, "rootdisk_ssd": 1, "ephemeraldisk": 1, "ephemeraldisk_ssd": 1, "license": 1 } }' |
||||
curl --silent -H "X-API-Key: ${API_KEY}" -H "Content-Type: application/json" "${SERVER}:8600/api/${API_VERSION}/cycle" -X POST -d '{ "state": "suspended", "resourceType": "server", "skuList":{ "ram": 1, "titanxp": 1, "t4": 1, "p100": 1, "rootdisk": 1, "rootdisk_ssd": 1, "ephemeraldisk": 1, "ephemeraldisk_ssd": 1, "license": 1 } }' |
||||
|
@ -0,0 +1,188 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package restapi |
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/json" |
||||
"fmt" |
||||
"net/http" |
||||
|
||||
"github.com/go-openapi/errors" |
||||
"github.com/go-openapi/loads" |
||||
"github.com/go-openapi/runtime" |
||||
"github.com/go-openapi/runtime/middleware" |
||||
"github.com/go-openapi/runtime/security" |
||||
|
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/restapi/operations" |
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/restapi/operations/status_management" |
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/restapi/operations/sync_management" |
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/restapi/operations/trigger_management" |
||||
) |
||||
|
||||
type contextKey string |
||||
|
||||
const AuthKey contextKey = "Auth" |
||||
|
||||
//go:generate mockery -name StatusManagementAPI -inpkg
|
||||
|
||||
/* StatusManagementAPI */ |
||||
type StatusManagementAPI interface { |
||||
/* GetStatus Basic status of the system */ |
||||
GetStatus(ctx context.Context, params status_management.GetStatusParams) middleware.Responder |
||||
|
||||
/* ShowStatus Basic status of the system */ |
||||
ShowStatus(ctx context.Context, params status_management.ShowStatusParams) middleware.Responder |
||||
} |
||||
|
||||
//go:generate mockery -name SyncManagementAPI -inpkg
|
||||
|
||||
/* SyncManagementAPI */ |
||||
type SyncManagementAPI interface { |
||||
/* SyncFlavors Sync the OpenStack's flavors data in the system */ |
||||
SyncFlavors(ctx context.Context, params sync_management.SyncFlavorsParams) middleware.Responder |
||||
|
||||
/* SyncHierarchy syncs all the organizations, projects, resources hierarchy from LEXIS */ |
||||
SyncHierarchy(ctx context.Context, params sync_management.SyncHierarchyParams) middleware.Responder |
||||
} |
||||
|
||||
//go:generate mockery -name TriggerManagementAPI -inpkg
|
||||
|
||||
/* TriggerManagementAPI */ |
||||
type TriggerManagementAPI interface { |
||||
/* UDRRedo Redo of UDRs from the specific dates and with the specifc interval */ |
||||
UDRRedo(ctx context.Context, params trigger_management.UDRRedoParams) middleware.Responder |
||||
} |
||||
|
||||
// Config is configuration for Handler
|
||||
type Config struct { |
||||
StatusManagementAPI |
||||
SyncManagementAPI |
||||
TriggerManagementAPI |
||||
Logger func(string, ...interface{}) |
||||
// InnerMiddleware is for the handler executors. These do not apply to the swagger.json document.
|
||||
// The middleware executes after routing but before authentication, binding and validation
|
||||
InnerMiddleware func(http.Handler) http.Handler |
||||
|
||||
// Authorizer is used to authorize a request after the Auth function was called using the "Auth*" functions
|
||||
// and the principal was stored in the context in the "AuthKey" context value.
|
||||
Authorizer func(*http.Request) error |
||||
|
||||
// AuthAPIKeyHeader Applies when the "X-API-KEY" header is set
|
||||
AuthAPIKeyHeader func(token string) (interface{}, error) |
||||
|
||||
// AuthAPIKeyParam Applies when the "api_key" query is set
|
||||
AuthAPIKeyParam func(token string) (interface{}, error) |
||||
|
||||
// AuthKeycloak For OAuth2 authentication
|
||||
AuthKeycloak func(token string, scopes []string) (interface{}, error) |
||||
// Authenticator to use for all APIKey authentication
|
||||
APIKeyAuthenticator func(string, string, security.TokenAuthentication) runtime.Authenticator |
||||
// Authenticator to use for all Bearer authentication
|
||||
BasicAuthenticator func(security.UserPassAuthentication) runtime.Authenticator |
||||
// Authenticator to use for all Basic authentication
|
||||
BearerAuthenticator func(string, security.ScopedTokenAuthentication) runtime.Authenticator |
||||
} |
||||
|
||||
// Handler returns an http.Handler given the handler configuration
|
||||
// It mounts all the business logic implementers in the right routing.
|
||||
func Handler(c Config) (http.Handler, error) { |
||||
h, _, err := HandlerAPI(c) |
||||
return h, err |
||||
} |
||||
|
||||
// HandlerAPI returns an http.Handler given the handler configuration
|
||||
// and the corresponding *LEXISExtensionManagementAPI instance.
|
||||
// It mounts all the business logic implementers in the right routing.
|
||||
func HandlerAPI(c Config) (http.Handler, *operations.LEXISExtensionManagementAPIAPI, error) { |
||||
spec, err := loads.Analyzed(swaggerCopy(SwaggerJSON), "") |
||||
if err != nil { |
||||
return nil, nil, fmt.Errorf("analyze swagger: %v", err) |
||||
} |
||||
api := operations.NewLEXISExtensionManagementAPIAPI(spec) |
||||
api.ServeError = errors.ServeError |
||||
api.Logger = c.Logger |
||||
|
||||
if c.APIKeyAuthenticator != nil { |
||||
api.APIKeyAuthenticator = c.APIKeyAuthenticator |
||||
} |
||||
if c.BasicAuthenticator != nil { |
||||
api.BasicAuthenticator = c.BasicAuthenticator |
||||
} |
||||
if c.BearerAuthenticator != nil { |
||||
api.BearerAuthenticator = c.BearerAuthenticator |
||||
} |
||||
|
||||
api.JSONConsumer = runtime.JSONConsumer() |
||||
api.JSONProducer = runtime.JSONProducer() |
||||
api.APIKeyHeaderAuth = func(token string) (interface{}, error) { |
||||
if c.AuthAPIKeyHeader == nil { |
||||
return token, nil |
||||
} |
||||
return c.AuthAPIKeyHeader(token) |
||||
} |
||||
|
||||
api.APIKeyParamAuth = func(token string) (interface{}, error) { |
||||
if c.AuthAPIKeyParam == nil { |
||||
return token, nil |
||||
} |
||||
return c.AuthAPIKeyParam(token) |
||||
} |
||||
|
||||
api.KeycloakAuth = func(token string, scopes []string) (interface{}, error) { |
||||
if c.AuthKeycloak == nil { |
||||
return token, nil |
||||
} |
||||
return c.AuthKeycloak(token, scopes) |
||||
} |
||||
api.APIAuthorizer = authorizer(c.Authorizer) |
||||
api.TriggerManagementUDRRedoHandler = trigger_management.UDRRedoHandlerFunc(func(params trigger_management.UDRRedoParams, principal interface{}) middleware.Responder { |
||||
ctx := params.HTTPRequest.Context() |
||||
ctx = storeAuth(ctx, principal) |
||||
return c.TriggerManagementAPI.UDRRedo(ctx, params) |
||||
}) |
||||
api.StatusManagementGetStatusHandler = status_management.GetStatusHandlerFunc(func(params status_management.GetStatusParams, principal interface{}) middleware.Responder { |
||||
ctx := params.HTTPRequest.Context() |
||||
ctx = storeAuth(ctx, principal) |
||||
return c.StatusManagementAPI.GetStatus(ctx, params) |
||||
}) |
||||
api.StatusManagementShowStatusHandler = status_management.ShowStatusHandlerFunc(func(params status_management.ShowStatusParams, principal interface{}) middleware.Responder { |
||||
ctx := params.HTTPRequest.Context() |
||||
ctx = storeAuth(ctx, principal) |
||||
return c.StatusManagementAPI.ShowStatus(ctx, params) |
||||
}) |
||||
api.SyncManagementSyncFlavorsHandler = sync_management.SyncFlavorsHandlerFunc(func(params sync_management.SyncFlavorsParams, principal interface{}) middleware.Responder { |
||||
ctx := params.HTTPRequest.Context() |
||||
ctx = storeAuth(ctx, principal) |
||||
return c.SyncManagementAPI.SyncFlavors(ctx, params) |
||||
}) |
||||
api.SyncManagementSyncHierarchyHandler = sync_management.SyncHierarchyHandlerFunc(func(params sync_management.SyncHierarchyParams, principal interface{}) middleware.Responder { |
||||
ctx := params.HTTPRequest.Context() |
||||
ctx = storeAuth(ctx, principal) |
||||
return c.SyncManagementAPI.SyncHierarchy(ctx, params) |
||||
}) |
||||
api.ServerShutdown = func() {} |
||||
return api.Serve(c.InnerMiddleware), api, nil |
||||
} |
||||
|
||||
// swaggerCopy copies the swagger json to prevent data races in runtime
|
||||
func swaggerCopy(orig json.RawMessage) json.RawMessage { |
||||
c := make(json.RawMessage, len(orig)) |
||||
copy(c, orig) |
||||
return c |
||||
} |
||||
|
||||
// authorizer is a helper function to implement the runtime.Authorizer interface.
|
||||
type authorizer func(*http.Request) error |
||||
|
||||
func (a authorizer) Authorize(req *http.Request, principal interface{}) error { |
||||
if a == nil { |
||||
return nil |
||||
} |
||||
ctx := storeAuth(req.Context(), principal) |
||||
return a(req.WithContext(ctx)) |
||||
} |
||||
|
||||
func storeAuth(ctx context.Context, principal interface{}) context.Context { |
||||
return context.WithValue(ctx, AuthKey, principal) |
||||
} |
@ -0,0 +1,22 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
// Package restapi LEXIS Extension Management API
|
||||
//
|
||||
// An API which supports creation, deletion, listing etc of SERVICE
|
||||
// Schemes:
|
||||
// http
|
||||
// https
|
||||
// Host: localhost:8000
|
||||
// BasePath: /api/v0.1
|
||||
// Version: 0.1.0
|
||||
// License: Apache 2.0 http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
// Contact: <diego@cyclops-labs.io>
|
||||
//
|
||||
// Consumes:
|
||||
// - application/json
|
||||
//
|
||||
// Produces:
|
||||
// - application/json
|
||||
//
|
||||
// swagger:meta
|
||||
package restapi |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,405 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package operations |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"fmt" |
||||
"net/http" |
||||
"strings" |
||||
|
||||
"github.com/go-openapi/errors" |
||||
"github.com/go-openapi/loads" |
||||
"github.com/go-openapi/runtime" |
||||
"github.com/go-openapi/runtime/middleware" |
||||
"github.com/go-openapi/runtime/security" |
||||
"github.com/go-openapi/spec" |
||||
"github.com/go-openapi/strfmt" |
||||
"github.com/go-openapi/swag" |
||||
|
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/restapi/operations/status_management" |
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/restapi/operations/sync_management" |
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/restapi/operations/trigger_management" |
||||
) |
||||
|
||||
// NewLEXISExtensionManagementAPIAPI creates a new LEXISExtensionManagementAPI instance
|
||||
func NewLEXISExtensionManagementAPIAPI(spec *loads.Document) *LEXISExtensionManagementAPIAPI { |
||||
return &LEXISExtensionManagementAPIAPI{ |
||||
handlers: make(map[string]map[string]http.Handler), |
||||
formats: strfmt.Default, |
||||
defaultConsumes: "application/json", |
||||
defaultProduces: "application/json", |
||||
customConsumers: make(map[string]runtime.Consumer), |
||||
customProducers: make(map[string]runtime.Producer), |
||||
PreServerShutdown: func() {}, |
||||
ServerShutdown: func() {}, |
||||
spec: spec, |
||||
useSwaggerUI: false, |
||||
ServeError: errors.ServeError, |
||||
BasicAuthenticator: security.BasicAuth, |
||||
APIKeyAuthenticator: security.APIKeyAuth, |
||||
BearerAuthenticator: security.BearerAuth, |
||||
|
||||
JSONConsumer: runtime.JSONConsumer(), |
||||
|
||||
JSONProducer: runtime.JSONProducer(), |
||||
|
||||
TriggerManagementUDRRedoHandler: trigger_management.UDRRedoHandlerFunc(func(params trigger_management.UDRRedoParams, principal interface{}) middleware.Responder { |
||||
return middleware.NotImplemented("operation trigger_management.UDRRedo has not yet been implemented") |
||||
}), |
||||
StatusManagementGetStatusHandler: status_management.GetStatusHandlerFunc(func(params status_management.GetStatusParams, principal interface{}) middleware.Responder { |
||||
return middleware.NotImplemented("operation status_management.GetStatus has not yet been implemented") |
||||
}), |
||||
StatusManagementShowStatusHandler: status_management.ShowStatusHandlerFunc(func(params status_management.ShowStatusParams, principal interface{}) middleware.Responder { |
||||
return middleware.NotImplemented("operation status_management.ShowStatus has not yet been implemented") |
||||
}), |
||||
SyncManagementSyncFlavorsHandler: sync_management.SyncFlavorsHandlerFunc(func(params sync_management.SyncFlavorsParams, principal interface{}) middleware.Responder { |
||||
return middleware.NotImplemented("operation sync_management.SyncFlavors has not yet been implemented") |
||||
}), |
||||
SyncManagementSyncHierarchyHandler: sync_management.SyncHierarchyHandlerFunc(func(params sync_management.SyncHierarchyParams, principal interface{}) middleware.Responder { |
||||
return middleware.NotImplemented("operation sync_management.SyncHierarchy has not yet been implemented") |
||||
}), |
||||
|
||||
// Applies when the "X-API-KEY" header is set
|
||||
APIKeyHeaderAuth: func(token string) (interface{}, error) { |
||||
return nil, errors.NotImplemented("api key auth (APIKeyHeader) X-API-KEY from header param [X-API-KEY] has not yet been implemented") |
||||
}, |
||||
// Applies when the "api_key" query is set
|
||||
APIKeyParamAuth: func(token string) (interface{}, error) { |
||||
return nil, errors.NotImplemented("api key auth (APIKeyParam) api_key from query param [api_key] has not yet been implemented") |
||||
}, |
||||
KeycloakAuth: func(token string, scopes []string) (interface{}, error) { |
||||
return nil, errors.NotImplemented("oauth2 bearer auth (Keycloak) has not yet been implemented") |
||||
}, |
||||
// default authorizer is authorized meaning no requests are blocked
|
||||
APIAuthorizer: security.Authorized(), |
||||
} |
||||
} |
||||
|
||||
/*LEXISExtensionManagementAPIAPI An API which supports creation, deletion, listing etc of SERVICE */ |
||||
type LEXISExtensionManagementAPIAPI struct { |
||||
spec *loads.Document |
||||
context *middleware.Context |
||||
handlers map[string]map[string]http.Handler |
||||
formats strfmt.Registry |
||||
customConsumers map[string]runtime.Consumer |
||||
customProducers map[string]runtime.Producer |
||||
defaultConsumes string |
||||
defaultProduces string |
||||
Middleware func(middleware.Builder) http.Handler |
||||
useSwaggerUI bool |
||||
|
||||
// BasicAuthenticator generates a runtime.Authenticator from the supplied basic auth function.
|
||||
// It has a default implementation in the security package, however you can replace it for your particular usage.
|
||||
BasicAuthenticator func(security.UserPassAuthentication) runtime.Authenticator |
||||
// APIKeyAuthenticator generates a runtime.Authenticator from the supplied token auth function.
|
||||
// It has a default implementation in the security package, however you can replace it for your particular usage.
|
||||
APIKeyAuthenticator func(string, string, security.TokenAuthentication) runtime.Authenticator |
||||
// BearerAuthenticator generates a runtime.Authenticator from the supplied bearer token auth function.
|
||||
// It has a default implementation in the security package, however you can replace it for your particular usage.
|
||||
BearerAuthenticator func(string, security.ScopedTokenAuthentication) runtime.Authenticator |
||||
|
||||
// JSONConsumer registers a consumer for the following mime types:
|
||||
// - application/json
|
||||
JSONConsumer runtime.Consumer |
||||
|
||||
// JSONProducer registers a producer for the following mime types:
|
||||
// - application/json
|
||||
JSONProducer runtime.Producer |
||||
|
||||
// APIKeyHeaderAuth registers a function that takes a token and returns a principal
|
||||
// it performs authentication based on an api key X-API-KEY provided in the header
|
||||
APIKeyHeaderAuth func(string) (interface{}, error) |
||||
|
||||
// APIKeyParamAuth registers a function that takes a token and returns a principal
|
||||
// it performs authentication based on an api key api_key provided in the query
|
||||
APIKeyParamAuth func(string) (interface{}, error) |
||||
|
||||
// KeycloakAuth registers a function that takes an access token and a collection of required scopes and returns a principal
|
||||
// it performs authentication based on an oauth2 bearer token provided in the request
|
||||
KeycloakAuth func(string, []string) (interface{}, error) |
||||
|
||||
// APIAuthorizer provides access control (ACL/RBAC/ABAC) by providing access to the request and authenticated principal
|
||||
APIAuthorizer runtime.Authorizer |
||||
|
||||
// TriggerManagementUDRRedoHandler sets the operation handler for the u d r redo operation
|
||||
TriggerManagementUDRRedoHandler trigger_management.UDRRedoHandler |
||||
// StatusManagementGetStatusHandler sets the operation handler for the get status operation
|
||||
StatusManagementGetStatusHandler status_management.GetStatusHandler |
||||
// StatusManagementShowStatusHandler sets the operation handler for the show status operation
|
||||
StatusManagementShowStatusHandler status_management.ShowStatusHandler |
||||
// SyncManagementSyncFlavorsHandler sets the operation handler for the sync flavors operation
|
||||
SyncManagementSyncFlavorsHandler sync_management.SyncFlavorsHandler |
||||
// SyncManagementSyncHierarchyHandler sets the operation handler for the sync hierarchy operation
|
||||
SyncManagementSyncHierarchyHandler sync_management.SyncHierarchyHandler |
||||
// ServeError is called when an error is received, there is a default handler
|
||||
// but you can set your own with this
|
||||
ServeError func(http.ResponseWriter, *http.Request, error) |
||||
|
||||
// PreServerShutdown is called before the HTTP(S) server is shutdown
|
||||
// This allows for custom functions to get executed before the HTTP(S) server stops accepting traffic
|
||||
PreServerShutdown func() |
||||
|
||||
// ServerShutdown is called when the HTTP(S) server is shut down and done
|
||||
// handling all active connections and does not accept connections any more
|
||||
ServerShutdown func() |
||||
|
||||
// Custom command line argument groups with their descriptions
|
||||
CommandLineOptionsGroups []swag.CommandLineOptionsGroup |
||||
|
||||
// User defined logger function.
|
||||
Logger func(string, ...interface{}) |
||||
} |
||||
|
||||
// UseRedoc for documentation at /docs
|
||||
func (o *LEXISExtensionManagementAPIAPI) UseRedoc() { |
||||
o.useSwaggerUI = false |
||||
} |
||||
|
||||
// UseSwaggerUI for documentation at /docs
|
||||
func (o *LEXISExtensionManagementAPIAPI) UseSwaggerUI() { |
||||
o.useSwaggerUI = true |
||||
} |
||||
|
||||
// SetDefaultProduces sets the default produces media type
|
||||
func (o *LEXISExtensionManagementAPIAPI) SetDefaultProduces(mediaType string) { |
||||
o.defaultProduces = mediaType |
||||
} |
||||
|
||||
// SetDefaultConsumes returns the default consumes media type
|
||||
func (o *LEXISExtensionManagementAPIAPI) SetDefaultConsumes(mediaType string) { |
||||
o.defaultConsumes = mediaType |
||||
} |
||||
|
||||
// SetSpec sets a spec that will be served for the clients.
|
||||
func (o *LEXISExtensionManagementAPIAPI) SetSpec(spec *loads.Document) { |
||||
o.spec = spec |
||||
} |
||||
|
||||
// DefaultProduces returns the default produces media type
|
||||
func (o *LEXISExtensionManagementAPIAPI) DefaultProduces() string { |
||||
return o.defaultProduces |
||||
} |
||||
|
||||
// DefaultConsumes returns the default consumes media type
|
||||
func (o *LEXISExtensionManagementAPIAPI) DefaultConsumes() string { |
||||
return o.defaultConsumes |
||||
} |
||||
|
||||
// Formats returns the registered string formats
|
||||
func (o *LEXISExtensionManagementAPIAPI) Formats() strfmt.Registry { |
||||
return o.formats |
||||
} |
||||
|
||||
// RegisterFormat registers a custom format validator
|
||||
func (o *LEXISExtensionManagementAPIAPI) RegisterFormat(name string, format strfmt.Format, validator strfmt.Validator) { |
||||
o.formats.Add(name, format, validator) |
||||
} |
||||
|
||||
// Validate validates the registrations in the LEXISExtensionManagementAPIAPI
|
||||
func (o *LEXISExtensionManagementAPIAPI) Validate() error { |
||||
var unregistered []string |
||||
|
||||
if o.JSONConsumer == nil { |
||||
unregistered = append(unregistered, "JSONConsumer") |
||||
} |
||||
|
||||
if o.JSONProducer == nil { |
||||
unregistered = append(unregistered, "JSONProducer") |
||||
} |
||||
|
||||
if o.APIKeyHeaderAuth == nil { |
||||
unregistered = append(unregistered, "XAPIKEYAuth") |
||||
} |
||||
if o.APIKeyParamAuth == nil { |
||||
unregistered = append(unregistered, "APIKeyAuth") |
||||
} |
||||
if o.KeycloakAuth == nil { |
||||
unregistered = append(unregistered, "KeycloakAuth") |
||||
} |
||||
|
||||
if o.TriggerManagementUDRRedoHandler == nil { |
||||
unregistered = append(unregistered, "trigger_management.UDRRedoHandler") |
||||
} |
||||
if o.StatusManagementGetStatusHandler == nil { |
||||
unregistered = append(unregistered, "status_management.GetStatusHandler") |
||||
} |
||||
if o.StatusManagementShowStatusHandler == nil { |
||||
unregistered = append(unregistered, "status_management.ShowStatusHandler") |
||||
} |
||||
if o.SyncManagementSyncFlavorsHandler == nil { |
||||
unregistered = append(unregistered, "sync_management.SyncFlavorsHandler") |
||||
} |
||||
if o.SyncManagementSyncHierarchyHandler == nil { |
||||
unregistered = append(unregistered, "sync_management.SyncHierarchyHandler") |
||||
} |
||||
|
||||
if len(unregistered) > 0 { |
||||
return fmt.Errorf("missing registration: %s", strings.Join(unregistered, ", ")) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// ServeErrorFor gets a error handler for a given operation id
|
||||
func (o *LEXISExtensionManagementAPIAPI) ServeErrorFor(operationID string) func(http.ResponseWriter, *http.Request, error) { |
||||
return o.ServeError |
||||
} |
||||
|
||||
// AuthenticatorsFor gets the authenticators for the specified security schemes
|
||||
func (o *LEXISExtensionManagementAPIAPI) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { |
||||
result := make(map[string]runtime.Authenticator) |
||||
for name := range schemes { |
||||
switch name { |
||||
case "APIKeyHeader": |
||||
scheme := schemes[name] |
||||
result[name] = o.APIKeyAuthenticator(scheme.Name, scheme.In, o.APIKeyHeaderAuth) |
||||
|
||||
case "APIKeyParam": |
||||
scheme := schemes[name] |
||||
result[name] = o.APIKeyAuthenticator(scheme.Name, scheme.In, o.APIKeyParamAuth) |
||||
|
||||
case "Keycloak": |
||||
result[name] = o.BearerAuthenticator(name, o.KeycloakAuth) |
||||
|
||||
} |
||||
} |
||||
return result |
||||
} |
||||
|
||||
// Authorizer returns the registered authorizer
|
||||
func (o *LEXISExtensionManagementAPIAPI) Authorizer() runtime.Authorizer { |
||||
return o.APIAuthorizer |
||||
} |
||||
|
||||
// ConsumersFor gets the consumers for the specified media types.
|
||||
// MIME type parameters are ignored here.
|
||||
func (o *LEXISExtensionManagementAPIAPI) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { |
||||
result := make(map[string]runtime.Consumer, len(mediaTypes)) |
||||
for _, mt := range mediaTypes { |
||||
switch mt { |
||||
case "application/json": |
||||
result["application/json"] = o.JSONConsumer |
||||
} |
||||
|
||||
if c, ok := o.customConsumers[mt]; ok { |
||||
result[mt] = c |
||||
} |
||||
} |
||||
return result |
||||
} |
||||
|
||||
// ProducersFor gets the producers for the specified media types.
|
||||
// MIME type parameters are ignored here.
|
||||
func (o *LEXISExtensionManagementAPIAPI) ProducersFor(mediaTypes []string) map[string]runtime.Producer { |
||||
result := make(map[string]runtime.Producer, len(mediaTypes)) |
||||
for _, mt := range mediaTypes { |
||||
switch mt { |
||||
case "application/json": |
||||
result["application/json"] = o.JSONProducer |
||||
} |
||||
|
||||
if p, ok := o.customProducers[mt]; ok { |
||||
result[mt] = p |
||||
} |
||||
} |
||||
return result |
||||
} |
||||
|
||||
// HandlerFor gets a http.Handler for the provided operation method and path
|
||||
func (o *LEXISExtensionManagementAPIAPI) HandlerFor(method, path string) (http.Handler, bool) { |
||||
if o.handlers == nil { |
||||
return nil, false |
||||
} |
||||
um := strings.ToUpper(method) |
||||
if _, ok := o.handlers[um]; !ok { |
||||
return nil, false |
||||
} |
||||
if path == "/" { |
||||
path = "" |
||||
} |
||||
h, ok := o.handlers[um][path] |
||||
return h, ok |
||||
} |
||||
|
||||
// Context returns the middleware context for the l e x i s extension management API API
|
||||
func (o *LEXISExtensionManagementAPIAPI) Context() *middleware.Context { |
||||
if o.context == nil { |
||||
o.context = middleware.NewRoutableContext(o.spec, o, nil) |
||||
} |
||||
|
||||
return o.context |
||||
} |
||||
|
||||
func (o *LEXISExtensionManagementAPIAPI) initHandlerCache() { |
||||
o.Context() // don't care about the result, just that the initialization happened
|
||||
if o.handlers == nil { |
||||
o.handlers = make(map[string]map[string]http.Handler) |
||||
} |
||||
|
||||
if o.handlers["GET"] == nil { |
||||
o.handlers["GET"] = make(map[string]http.Handler) |
||||
} |
||||
o.handlers["GET"]["/trigger/udrsredo"] = trigger_management.NewUDRRedo(o.context, o.TriggerManagementUDRRedoHandler) |
||||
if o.handlers["GET"] == nil { |
||||
o.handlers["GET"] = make(map[string]http.Handler) |
||||
} |
||||
o.handlers["GET"]["/status/{id}"] = status_management.NewGetStatus(o.context, o.StatusManagementGetStatusHandler) |
||||
if o.handlers["GET"] == nil { |
||||
o.handlers["GET"] = make(map[string]http.Handler) |
||||
} |
||||
o.handlers["GET"]["/status"] = status_management.NewShowStatus(o.context, o.StatusManagementShowStatusHandler) |
||||
if o.handlers["GET"] == nil { |
||||
o.handlers["GET"] = make(map[string]http.Handler) |
||||
} |
||||
o.handlers["GET"]["/sync/flavors"] = sync_management.NewSyncFlavors(o.context, o.SyncManagementSyncFlavorsHandler) |
||||
if o.handlers["GET"] == nil { |
||||
o.handlers["GET"] = make(map[string]http.Handler) |
||||
} |
||||
o.handlers["GET"]["/sync/hierarchy"] = sync_management.NewSyncHierarchy(o.context, o.SyncManagementSyncHierarchyHandler) |
||||
} |
||||
|
||||
// Serve creates a http handler to serve the API over HTTP
|
||||
// can be used directly in http.ListenAndServe(":8000", api.Serve(nil))
|
||||
func (o *LEXISExtensionManagementAPIAPI) Serve(builder middleware.Builder) http.Handler { |
||||
o.Init() |
||||
|
||||
if o.Middleware != nil { |
||||
return o.Middleware(builder) |
||||
} |
||||
if o.useSwaggerUI { |
||||
return o.context.APIHandlerSwaggerUI(builder) |
||||
} |
||||
return o.context.APIHandler(builder) |
||||
} |
||||
|
||||
// Init allows you to just initialize the handler cache, you can then recompose the middleware as you see fit
|
||||
func (o *LEXISExtensionManagementAPIAPI) Init() { |
||||
if len(o.handlers) == 0 { |
||||
o.initHandlerCache() |
||||
} |
||||
} |
||||
|
||||
// RegisterConsumer allows you to add (or override) a consumer for a media type.
|
||||
func (o *LEXISExtensionManagementAPIAPI) RegisterConsumer(mediaType string, consumer runtime.Consumer) { |
||||
o.customConsumers[mediaType] = consumer |
||||
} |
||||
|
||||
// RegisterProducer allows you to add (or override) a producer for a media type.
|
||||
func (o *LEXISExtensionManagementAPIAPI) RegisterProducer(mediaType string, producer runtime.Producer) { |
||||
o.customProducers[mediaType] = producer |
||||
} |
||||
|
||||
// AddMiddlewareFor adds a http middleware to existing handler
|
||||
func (o *LEXISExtensionManagementAPIAPI) AddMiddlewareFor(method, path string, builder middleware.Builder) { |
||||
um := strings.ToUpper(method) |
||||
if path == "/" { |
||||
path = "" |
||||
} |
||||
o.Init() |
||||
if h, ok := o.handlers[um][path]; ok { |
||||
o.handlers[method][path] = builder(h) |
||||
} |
||||
} |
@ -0,0 +1,71 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package status_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the generate command
|
||||
|
||||
import ( |
||||
"net/http" |
||||
|
||||
"github.com/go-openapi/runtime/middleware" |
||||
) |
||||
|
||||
// GetStatusHandlerFunc turns a function with the right signature into a get status handler
|
||||
type GetStatusHandlerFunc func(GetStatusParams, interface{}) middleware.Responder |
||||
|
||||
// Handle executing the request and returning a response
|
||||
func (fn GetStatusHandlerFunc) Handle(params GetStatusParams, principal interface{}) middleware.Responder { |
||||
return fn(params, principal) |
||||
} |
||||
|
||||
// GetStatusHandler interface for that can handle valid get status params
|
||||
type GetStatusHandler interface { |
||||
Handle(GetStatusParams, interface{}) middleware.Responder |
||||
} |
||||
|
||||
// NewGetStatus creates a new http.Handler for the get status operation
|
||||
func NewGetStatus(ctx *middleware.Context, handler GetStatusHandler) *GetStatus { |
||||
return &GetStatus{Context: ctx, Handler: handler} |
||||
} |
||||
|
||||
/*GetStatus swagger:route GET /status/{id} statusManagement getStatus |
||||
|
||||
Basic status of the system |
||||
|
||||
*/ |
||||
type GetStatus struct { |
||||
Context *middleware.Context |
||||
Handler GetStatusHandler |
||||
} |
||||
|
||||
func (o *GetStatus) ServeHTTP(rw http.ResponseWriter, r *http.Request) { |
||||
route, rCtx, _ := o.Context.RouteInfo(r) |
||||
if rCtx != nil { |
||||
r = rCtx |
||||
} |
||||
var Params = NewGetStatusParams() |
||||
|
||||
uprinc, aCtx, err := o.Context.Authorize(r, route) |
||||
if err != nil { |
||||
o.Context.Respond(rw, r, route.Produces, route, err) |
||||
return |
||||
} |
||||
if aCtx != nil { |
||||
r = aCtx |
||||
} |
||||
var principal interface{} |
||||
if uprinc != nil { |
||||
principal = uprinc |
||||
} |
||||
|
||||
if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params
|
||||
o.Context.Respond(rw, r, route.Produces, route, err) |
||||
return |
||||
} |
||||
|
||||
res := o.Handler.Handle(Params, principal) // actually handle the request
|
||||
|
||||
o.Context.Respond(rw, r, route.Produces, route, res) |
||||
|
||||
} |
@ -0,0 +1,87 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package status_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"net/http" |
||||
|
||||
"github.com/go-openapi/errors" |
||||
"github.com/go-openapi/runtime/middleware" |
||||
"github.com/go-openapi/strfmt" |
||||
"github.com/go-openapi/validate" |
||||
) |
||||
|
||||
// NewGetStatusParams creates a new GetStatusParams object
|
||||
// no default values defined in spec.
|
||||
func NewGetStatusParams() GetStatusParams { |
||||
|
||||
return GetStatusParams{} |
||||
} |
||||
|
||||
// GetStatusParams contains all the bound params for the get status operation
|
||||
// typically these are obtained from a http.Request
|
||||
//
|
||||
// swagger:parameters getStatus
|
||||
type GetStatusParams struct { |
||||
|
||||
// HTTP Request Object
|
||||
HTTPRequest *http.Request `json:"-"` |
||||
|
||||
/*Id of the endpoint to be checked |
||||
Required: true |
||||
In: path |
||||
*/ |
||||
ID string |
||||
} |
||||
|
||||
// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface
|
||||
// for simple values it will use straight method calls.
|
||||
//
|
||||
// To ensure default values, the struct must have been initialized with NewGetStatusParams() beforehand.
|
||||
func (o *GetStatusParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { |
||||
var res []error |
||||
|
||||
o.HTTPRequest = r |
||||
|
||||
rID, rhkID, _ := route.Params.GetOK("id") |
||||
if err := o.bindID(rID, rhkID, route.Formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if len(res) > 0 { |
||||
return errors.CompositeValidationError(res...) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// bindID binds and validates parameter ID from path.
|
||||
func (o *GetStatusParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { |
||||
var raw string |
||||
if len(rawData) > 0 { |
||||
raw = rawData[len(rawData)-1] |
||||
} |
||||
|
||||
// Required: true
|
||||
// Parameter is provided by construction from the route
|
||||
|
||||
o.ID = raw |
||||
|
||||
if err := o.validateID(formats); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// validateID carries on validations for parameter ID
|
||||
func (o *GetStatusParams) validateID(formats strfmt.Registry) error { |
||||
|
||||
if err := validate.EnumCase("id", "path", o.ID, []interface{}{"kafka-receiver", "kafka-sender", "status", "trigger", "metrics", "sync"}, true); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
@ -0,0 +1,102 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package status_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"net/http" |
||||
|
||||
"github.com/go-openapi/runtime" |
||||
|
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/models" |
||||
) |
||||
|
||||
// GetStatusOKCode is the HTTP code returned for type GetStatusOK
|
||||
const GetStatusOKCode int = 200 |
||||
|
||||
/*GetStatusOK Status information of the system |
||||
|
||||
swagger:response getStatusOK |
||||
*/ |
||||
type GetStatusOK struct { |
||||
|
||||
/* |
||||
In: Body |
||||
*/ |
||||
Payload *models.Status `json:"body,omitempty"` |
||||
} |
||||
|
||||
// NewGetStatusOK creates GetStatusOK with default headers values
|
||||
func NewGetStatusOK() *GetStatusOK { |
||||
|
||||
return &GetStatusOK{} |
||||
} |
||||
|
||||
// WithPayload adds the payload to the get status o k response
|
||||
func (o *GetStatusOK) WithPayload(payload *models.Status) *GetStatusOK { |
||||
o.Payload = payload |
||||
return o |
||||
} |
||||
|
||||
// SetPayload sets the payload to the get status o k response
|
||||
func (o *GetStatusOK) SetPayload(payload *models.Status) { |
||||
o.Payload = payload |
||||
} |
||||
|
||||
// WriteResponse to the client
|
||||
func (o *GetStatusOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { |
||||
|
||||
rw.WriteHeader(200) |
||||
if o.Payload != nil { |
||||
payload := o.Payload |
||||
if err := producer.Produce(rw, payload); err != nil { |
||||
panic(err) // let the recovery middleware deal with this
|
||||
} |
||||
} |
||||
} |
||||
|
||||
// GetStatusNotFoundCode is the HTTP code returned for type GetStatusNotFound
|
||||
const GetStatusNotFoundCode int = 404 |
||||
|
||||
/*GetStatusNotFound The endpoint provided doesn't exist |
||||
|
||||
swagger:response getStatusNotFound |
||||
*/ |
||||
type GetStatusNotFound struct { |
||||
|
||||
/* |
||||
In: Body |
||||
*/ |
||||
Payload *models.ErrorResponse `json:"body,omitempty"` |
||||
} |
||||
|
||||
// NewGetStatusNotFound creates GetStatusNotFound with default headers values
|
||||
func NewGetStatusNotFound() *GetStatusNotFound { |
||||
|
||||
return &GetStatusNotFound{} |
||||
} |
||||
|
||||
// WithPayload adds the payload to the get status not found response
|
||||
func (o *GetStatusNotFound) WithPayload(payload *models.ErrorResponse) *GetStatusNotFound { |
||||
o.Payload = payload |
||||
return o |
||||
} |
||||
|
||||
// SetPayload sets the payload to the get status not found response
|
||||
func (o *GetStatusNotFound) SetPayload(payload *models.ErrorResponse) { |
||||
o.Payload = payload |
||||
} |
||||
|
||||
// WriteResponse to the client
|
||||
func (o *GetStatusNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { |
||||
|
||||
rw.WriteHeader(404) |
||||
if o.Payload != nil { |
||||
payload := o.Payload |
||||
if err := producer.Produce(rw, payload); err != nil { |
||||
panic(err) // let the recovery middleware deal with this
|
||||
} |
||||
} |
||||
} |
@ -0,0 +1,99 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package status_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the generate command
|
||||
|
||||
import ( |
||||
"errors" |
||||
"net/url" |
||||
golangswaggerpaths "path" |
||||
"strings" |
||||
) |
||||
|
||||
// GetStatusURL generates an URL for the get status operation
|
||||
type GetStatusURL struct { |
||||
ID string |
||||
|
||||
_basePath string |
||||
// avoid unkeyed usage
|
||||
_ struct{} |
||||
} |
||||
|
||||
// WithBasePath sets the base path for this url builder, only required when it's different from the
|
||||
// base path specified in the swagger spec.
|
||||
// When the value of the base path is an empty string
|
||||
func (o *GetStatusURL) WithBasePath(bp string) *GetStatusURL { |
||||
o.SetBasePath(bp) |
||||
return o |
||||
} |
||||
|
||||
// SetBasePath sets the base path for this url builder, only required when it's different from the
|
||||
// base path specified in the swagger spec.
|
||||
// When the value of the base path is an empty string
|
||||
func (o *GetStatusURL) SetBasePath(bp string) { |
||||
o._basePath = bp |
||||
} |
||||
|
||||
// Build a url path and query string
|
||||
func (o *GetStatusURL) Build() (*url.URL, error) { |
||||
var _result url.URL |
||||
|
||||
var _path = "/status/{id}" |
||||
|
||||
id := o.ID |
||||
if id != "" { |
||||
_path = strings.Replace(_path, "{id}", id, -1) |
||||
} else { |
||||
return nil, errors.New("id is required on GetStatusURL") |
||||
} |
||||
|
||||
_basePath := o._basePath |
||||
if _basePath == "" { |
||||
_basePath = "/api/v0.1" |
||||
} |
||||
_result.Path = golangswaggerpaths.Join(_basePath, _path) |
||||
|
||||
return &_result, nil |
||||
} |
||||
|
||||
// Must is a helper function to panic when the url builder returns an error
|
||||
func (o *GetStatusURL) Must(u *url.URL, err error) *url.URL { |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
if u == nil { |
||||
panic("url can't be nil") |
||||
} |
||||
return u |
||||
} |
||||
|
||||
// String returns the string representation of the path with query string
|
||||
func (o *GetStatusURL) String() string { |
||||
return o.Must(o.Build()).String() |
||||
} |
||||
|
||||
// BuildFull builds a full url with scheme, host, path and query string
|
||||
func (o *GetStatusURL) BuildFull(scheme, host string) (*url.URL, error) { |
||||
if scheme == "" { |
||||
return nil, errors.New("scheme is required for a full url on GetStatusURL") |
||||
} |
||||
if host == "" { |
||||
return nil, errors.New("host is required for a full url on GetStatusURL") |
||||
} |
||||
|
||||
base, err := o.Build() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
base.Scheme = scheme |
||||
base.Host = host |
||||
return base, nil |
||||
} |
||||
|
||||
// StringFull returns the string representation of a complete url
|
||||
func (o *GetStatusURL) StringFull(scheme, host string) string { |
||||
return o.Must(o.BuildFull(scheme, host)).String() |
||||
} |
@ -0,0 +1,71 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package status_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the generate command
|
||||
|
||||
import ( |
||||
"net/http" |
||||
|
||||
"github.com/go-openapi/runtime/middleware" |
||||
) |
||||
|
||||
// ShowStatusHandlerFunc turns a function with the right signature into a show status handler
|
||||
type ShowStatusHandlerFunc func(ShowStatusParams, interface{}) middleware.Responder |
||||
|
||||
// Handle executing the request and returning a response
|
||||
func (fn ShowStatusHandlerFunc) Handle(params ShowStatusParams, principal interface{}) middleware.Responder { |
||||
return fn(params, principal) |
||||
} |
||||
|
||||
// ShowStatusHandler interface for that can handle valid show status params
|
||||
type ShowStatusHandler interface { |
||||
Handle(ShowStatusParams, interface{}) middleware.Responder |
||||
} |
||||
|
||||
// NewShowStatus creates a new http.Handler for the show status operation
|
||||
func NewShowStatus(ctx *middleware.Context, handler ShowStatusHandler) *ShowStatus { |
||||
return &ShowStatus{Context: ctx, Handler: handler} |
||||
} |
||||
|
||||
/*ShowStatus swagger:route GET /status statusManagement showStatus |
||||
|
||||
Basic status of the system |
||||
|
||||
*/ |
||||
type ShowStatus struct { |
||||
Context *middleware.Context |
||||
Handler ShowStatusHandler |
||||
} |
||||
|
||||
func (o *ShowStatus) ServeHTTP(rw http.ResponseWriter, r *http.Request) { |
||||
route, rCtx, _ := o.Context.RouteInfo(r) |
||||
if rCtx != nil { |
||||
r = rCtx |
||||
} |
||||
var Params = NewShowStatusParams() |
||||
|
||||
uprinc, aCtx, err := o.Context.Authorize(r, route) |
||||
if err != nil { |
||||
o.Context.Respond(rw, r, route.Produces, route, err) |
||||
return |
||||
} |
||||
if aCtx != nil { |
||||
r = aCtx |
||||
} |
||||
var principal interface{} |
||||
if uprinc != nil { |
||||
principal = uprinc |
||||
} |
||||
|
||||
if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params
|
||||
o.Context.Respond(rw, r, route.Produces, route, err) |
||||
return |
||||
} |
||||
|
||||
res := o.Handler.Handle(Params, principal) // actually handle the request
|
||||
|
||||
o.Context.Respond(rw, r, route.Produces, route, res) |
||||
|
||||
} |
@ -0,0 +1,45 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package status_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"net/http" |
||||
|
||||
"github.com/go-openapi/errors" |
||||
"github.com/go-openapi/runtime/middleware" |
||||
) |
||||
|
||||
// NewShowStatusParams creates a new ShowStatusParams object
|
||||
// no default values defined in spec.
|
||||
func NewShowStatusParams() ShowStatusParams { |
||||
|
||||
return ShowStatusParams{} |
||||
} |
||||
|
||||
// ShowStatusParams contains all the bound params for the show status operation
|
||||
// typically these are obtained from a http.Request
|
||||
//
|
||||
// swagger:parameters showStatus
|
||||
type ShowStatusParams struct { |
||||
|
||||
// HTTP Request Object
|
||||
HTTPRequest *http.Request `json:"-"` |
||||
} |
||||
|
||||
// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface
|
||||
// for simple values it will use straight method calls.
|
||||
//
|
||||
// To ensure default values, the struct must have been initialized with NewShowStatusParams() beforehand.
|
||||
func (o *ShowStatusParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { |
||||
var res []error |
||||
|
||||
o.HTTPRequest = r |
||||
|
||||
if len(res) > 0 { |
||||
return errors.CompositeValidationError(res...) |
||||
} |
||||
return nil |
||||
} |
@ -0,0 +1,58 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package status_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"net/http" |
||||
|
||||
"github.com/go-openapi/runtime" |
||||
|
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/models" |
||||
) |
||||
|
||||
// ShowStatusOKCode is the HTTP code returned for type ShowStatusOK
|
||||
const ShowStatusOKCode int = 200 |
||||
|
||||
/*ShowStatusOK Status information of the system |
||||
|
||||
swagger:response showStatusOK |
||||
*/ |
||||
type ShowStatusOK struct { |
||||
|
||||
/* |
||||
In: Body |
||||
*/ |
||||
Payload *models.Status `json:"body,omitempty"` |
||||
} |
||||
|
||||
// NewShowStatusOK creates ShowStatusOK with default headers values
|
||||
func NewShowStatusOK() *ShowStatusOK { |
||||
|
||||
return &ShowStatusOK{} |
||||
} |
||||
|
||||
// WithPayload adds the payload to the show status o k response
|
||||
func (o *ShowStatusOK) WithPayload(payload *models.Status) *ShowStatusOK { |
||||
o.Payload = payload |
||||
return o |
||||
} |
||||
|
||||
// SetPayload sets the payload to the show status o k response
|
||||
func (o *ShowStatusOK) SetPayload(payload *models.Status) { |
||||
o.Payload = payload |
||||
} |
||||
|
||||
// WriteResponse to the client
|
||||
func (o *ShowStatusOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { |
||||
|
||||
rw.WriteHeader(200) |
||||
if o.Payload != nil { |
||||
payload := o.Payload |
||||
if err := producer.Produce(rw, payload); err != nil { |
||||
panic(err) // let the recovery middleware deal with this
|
||||
} |
||||
} |
||||
} |
@ -0,0 +1,87 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package status_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the generate command
|
||||
|
||||
import ( |
||||
"errors" |
||||
"net/url" |
||||
golangswaggerpaths "path" |
||||
) |
||||
|
||||
// ShowStatusURL generates an URL for the show status operation
|
||||
type ShowStatusURL struct { |
||||
_basePath string |
||||
} |
||||
|
||||
// WithBasePath sets the base path for this url builder, only required when it's different from the
|
||||
// base path specified in the swagger spec.
|
||||
// When the value of the base path is an empty string
|
||||
func (o *ShowStatusURL) WithBasePath(bp string) *ShowStatusURL { |
||||
o.SetBasePath(bp) |
||||
return o |
||||
} |
||||
|
||||
// SetBasePath sets the base path for this url builder, only required when it's different from the
|
||||
// base path specified in the swagger spec.
|
||||
// When the value of the base path is an empty string
|
||||
func (o *ShowStatusURL) SetBasePath(bp string) { |
||||
o._basePath = bp |
||||
} |
||||
|
||||
// Build a url path and query string
|
||||
func (o *ShowStatusURL) Build() (*url.URL, error) { |
||||
var _result url.URL |
||||
|
||||
var _path = "/status" |
||||
|
||||
_basePath := o._basePath |
||||
if _basePath == "" { |
||||
_basePath = "/api/v0.1" |
||||
} |
||||
_result.Path = golangswaggerpaths.Join(_basePath, _path) |
||||
|
||||
return &_result, nil |
||||
} |
||||
|
||||
// Must is a helper function to panic when the url builder returns an error
|
||||
func (o *ShowStatusURL) Must(u *url.URL, err error) *url.URL { |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
if u == nil { |
||||
panic("url can't be nil") |
||||
} |
||||
return u |
||||
} |
||||
|
||||
// String returns the string representation of the path with query string
|
||||
func (o *ShowStatusURL) String() string { |
||||
return o.Must(o.Build()).String() |
||||
} |
||||
|
||||
// BuildFull builds a full url with scheme, host, path and query string
|
||||
func (o *ShowStatusURL) BuildFull(scheme, host string) (*url.URL, error) { |
||||
if scheme == "" { |
||||
return nil, errors.New("scheme is required for a full url on ShowStatusURL") |
||||
} |
||||
if host == "" { |
||||
return nil, errors.New("host is required for a full url on ShowStatusURL") |
||||
} |
||||
|
||||
base, err := o.Build() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
base.Scheme = scheme |
||||
base.Host = host |
||||
return base, nil |
||||
} |
||||
|
||||
// StringFull returns the string representation of a complete url
|
||||
func (o *ShowStatusURL) StringFull(scheme, host string) string { |
||||
return o.Must(o.BuildFull(scheme, host)).String() |
||||
} |
@ -0,0 +1,71 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package sync_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the generate command
|
||||
|
||||
import ( |
||||
"net/http" |
||||
|
||||
"github.com/go-openapi/runtime/middleware" |
||||
) |
||||
|
||||
// SyncFlavorsHandlerFunc turns a function with the right signature into a sync flavors handler
|
||||
type SyncFlavorsHandlerFunc func(SyncFlavorsParams, interface{}) middleware.Responder |
||||
|
||||
// Handle executing the request and returning a response
|
||||
func (fn SyncFlavorsHandlerFunc) Handle(params SyncFlavorsParams, principal interface{}) middleware.Responder { |
||||
return fn(params, principal) |
||||
} |
||||
|
||||
// SyncFlavorsHandler interface for that can handle valid sync flavors params
|
||||
type SyncFlavorsHandler interface { |
||||
Handle(SyncFlavorsParams, interface{}) middleware.Responder |
||||
} |
||||
|
||||
// NewSyncFlavors creates a new http.Handler for the sync flavors operation
|
||||
func NewSyncFlavors(ctx *middleware.Context, handler SyncFlavorsHandler) *SyncFlavors { |
||||
return &SyncFlavors{Context: ctx, Handler: handler} |
||||
} |
||||
|
||||
/*SyncFlavors swagger:route GET /sync/flavors syncManagement syncFlavors |
||||
|
||||
Sync the OpenStack's flavors data in the system |
||||
|
||||
*/ |
||||
type SyncFlavors struct { |
||||
Context *middleware.Context |
||||
Handler SyncFlavorsHandler |
||||
} |
||||
|
||||
func (o *SyncFlavors) ServeHTTP(rw http.ResponseWriter, r *http.Request) { |
||||
route, rCtx, _ := o.Context.RouteInfo(r) |
||||
if rCtx != nil { |
||||
r = rCtx |
||||
} |
||||
var Params = NewSyncFlavorsParams() |
||||
|
||||
uprinc, aCtx, err := o.Context.Authorize(r, route) |
||||
if err != nil { |
||||
o.Context.Respond(rw, r, route.Produces, route, err) |
||||
return |
||||
} |
||||
if aCtx != nil { |
||||
r = aCtx |
||||
} |
||||
var principal interface{} |
||||
if uprinc != nil { |
||||
principal = uprinc |
||||
} |
||||
|
||||
if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params
|
||||
o.Context.Respond(rw, r, route.Produces, route, err) |
||||
return |
||||
} |
||||
|
||||
res := o.Handler.Handle(Params, principal) // actually handle the request
|
||||
|
||||
o.Context.Respond(rw, r, route.Produces, route, res) |
||||
|
||||
} |
@ -0,0 +1,45 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package sync_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"net/http" |
||||
|
||||
"github.com/go-openapi/errors" |
||||
"github.com/go-openapi/runtime/middleware" |
||||
) |
||||
|
||||
// NewSyncFlavorsParams creates a new SyncFlavorsParams object
|
||||
// no default values defined in spec.
|
||||
func NewSyncFlavorsParams() SyncFlavorsParams { |
||||
|
||||
return SyncFlavorsParams{} |
||||
} |
||||
|
||||
// SyncFlavorsParams contains all the bound params for the sync flavors operation
|
||||
// typically these are obtained from a http.Request
|
||||
//
|
||||
// swagger:parameters syncFlavors
|
||||
type SyncFlavorsParams struct { |
||||
|
||||
// HTTP Request Object
|
||||
HTTPRequest *http.Request `json:"-"` |
||||
} |
||||
|
||||
// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface
|
||||
// for simple values it will use straight method calls.
|
||||
//
|
||||
// To ensure default values, the struct must have been initialized with NewSyncFlavorsParams() beforehand.
|
||||
func (o *SyncFlavorsParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { |
||||
var res []error |
||||
|
||||
o.HTTPRequest = r |
||||
|
||||
if len(res) > 0 { |
||||
return errors.CompositeValidationError(res...) |
||||
} |
||||
return nil |
||||
} |
@ -0,0 +1,106 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package sync_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"net/http" |
||||
|
||||
"github.com/go-openapi/runtime" |
||||
|
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/models" |
||||
) |
||||
|
||||
// SyncFlavorsOKCode is the HTTP code returned for type SyncFlavorsOK
|
||||
const SyncFlavorsOKCode int = 200 |
||||
|
||||
/*SyncFlavorsOK The load of data was completely successfully |
||||
|
||||
swagger:response syncFlavorsOK |
||||
*/ |
||||
type SyncFlavorsOK struct { |
||||
} |
||||
|
||||
// NewSyncFlavorsOK creates SyncFlavorsOK with default headers values
|
||||
func NewSyncFlavorsOK() *SyncFlavorsOK { |
||||
|
||||
return &SyncFlavorsOK{} |
||||
} |
||||
|
||||
// WriteResponse to the client
|
||||
func (o *SyncFlavorsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { |
||||
|
||||
rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses
|
||||
|
||||
rw.WriteHeader(200) |
||||
} |
||||
|
||||
// SyncFlavorsAcceptedCode is the HTTP code returned for type SyncFlavorsAccepted
|
||||
const SyncFlavorsAcceptedCode int = 202 |
||||
|
||||
/*SyncFlavorsAccepted Operation done but there might have been some fails when adding part of the data |
||||
|
||||
swagger:response syncFlavorsAccepted |
||||
*/ |
||||
type SyncFlavorsAccepted struct { |
||||
} |
||||
|
||||
// NewSyncFlavorsAccepted creates SyncFlavorsAccepted with default headers values
|
||||
func NewSyncFlavorsAccepted() *SyncFlavorsAccepted { |
||||
|
||||
return &SyncFlavorsAccepted{} |
||||
} |
||||
|
||||
// WriteResponse to the client
|
||||
func (o *SyncFlavorsAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { |
||||
|
||||
rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses
|
||||
|
||||
rw.WriteHeader(202) |
||||
} |
||||
|
||||
// SyncFlavorsInternalServerErrorCode is the HTTP code returned for type SyncFlavorsInternalServerError
|
||||
const SyncFlavorsInternalServerErrorCode int = 500 |
||||
|
||||
/*SyncFlavorsInternalServerError Something unexpected happend, error raised |
||||
|
||||
swagger:response syncFlavorsInternalServerError |
||||
*/ |
||||
type SyncFlavorsInternalServerError struct { |
||||
|
||||
/* |
||||
In: Body |
||||
*/ |
||||
Payload *models.ErrorResponse `json:"body,omitempty"` |
||||
} |
||||
|
||||
// NewSyncFlavorsInternalServerError creates SyncFlavorsInternalServerError with default headers values
|
||||
func NewSyncFlavorsInternalServerError() *SyncFlavorsInternalServerError { |
||||
|
||||
return &SyncFlavorsInternalServerError{} |
||||
} |
||||
|
||||
// WithPayload adds the payload to the sync flavors internal server error response
|
||||
func (o *SyncFlavorsInternalServerError) WithPayload(payload *models.ErrorResponse) *SyncFlavorsInternalServerError { |
||||
o.Payload = payload |
||||
return o |
||||
} |
||||
|
||||
// SetPayload sets the payload to the sync flavors internal server error response
|
||||
func (o *SyncFlavorsInternalServerError) SetPayload(payload *models.ErrorResponse) { |
||||
o.Payload = payload |
||||
} |
||||
|
||||
// WriteResponse to the client
|
||||
func (o *SyncFlavorsInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { |
||||
|
||||
rw.WriteHeader(500) |
||||
if o.Payload != nil { |
||||
payload := o.Payload |
||||
if err := producer.Produce(rw, payload); err != nil { |
||||
panic(err) // let the recovery middleware deal with this
|
||||
} |
||||
} |
||||
} |
@ -0,0 +1,87 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package sync_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the generate command
|
||||
|
||||
import ( |
||||
"errors" |
||||
"net/url" |
||||
golangswaggerpaths "path" |
||||
) |
||||
|
||||
// SyncFlavorsURL generates an URL for the sync flavors operation
|
||||
type SyncFlavorsURL struct { |
||||
_basePath string |
||||
} |
||||
|
||||
// WithBasePath sets the base path for this url builder, only required when it's different from the
|
||||
// base path specified in the swagger spec.
|
||||
// When the value of the base path is an empty string
|
||||
func (o *SyncFlavorsURL) WithBasePath(bp string) *SyncFlavorsURL { |
||||
o.SetBasePath(bp) |
||||
return o |
||||
} |
||||
|
||||
// SetBasePath sets the base path for this url builder, only required when it's different from the
|
||||
// base path specified in the swagger spec.
|
||||
// When the value of the base path is an empty string
|
||||
func (o *SyncFlavorsURL) SetBasePath(bp string) { |
||||
o._basePath = bp |
||||
} |
||||
|
||||
// Build a url path and query string
|
||||
func (o *SyncFlavorsURL) Build() (*url.URL, error) { |
||||
var _result url.URL |
||||
|
||||
var _path = "/sync/flavors" |
||||
|
||||
_basePath := o._basePath |
||||
if _basePath == "" { |
||||
_basePath = "/api/v0.1" |
||||
} |
||||
_result.Path = golangswaggerpaths.Join(_basePath, _path) |
||||
|
||||
return &_result, nil |
||||
} |
||||
|
||||
// Must is a helper function to panic when the url builder returns an error
|
||||
func (o *SyncFlavorsURL) Must(u *url.URL, err error) *url.URL { |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
if u == nil { |
||||
panic("url can't be nil") |
||||
} |
||||
return u |
||||
} |
||||
|
||||
// String returns the string representation of the path with query string
|
||||
func (o *SyncFlavorsURL) String() string { |
||||
return o.Must(o.Build()).String() |
||||
} |
||||
|
||||
// BuildFull builds a full url with scheme, host, path and query string
|
||||
func (o *SyncFlavorsURL) BuildFull(scheme, host string) (*url.URL, error) { |
||||
if scheme == "" { |
||||
return nil, errors.New("scheme is required for a full url on SyncFlavorsURL") |
||||
} |
||||
if host == "" { |
||||
return nil, errors.New("host is required for a full url on SyncFlavorsURL") |
||||
} |
||||
|
||||
base, err := o.Build() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
base.Scheme = scheme |
||||
base.Host = host |
||||
return base, nil |
||||
} |
||||
|
||||
// StringFull returns the string representation of a complete url
|
||||
func (o *SyncFlavorsURL) StringFull(scheme, host string) string { |
||||
return o.Must(o.BuildFull(scheme, host)).String() |
||||
} |
@ -0,0 +1,71 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package sync_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the generate command
|
||||
|
||||
import ( |
||||
"net/http" |
||||
|
||||
"github.com/go-openapi/runtime/middleware" |
||||
) |
||||
|
||||
// SyncHierarchyHandlerFunc turns a function with the right signature into a sync hierarchy handler
|
||||
type SyncHierarchyHandlerFunc func(SyncHierarchyParams, interface{}) middleware.Responder |
||||
|
||||
// Handle executing the request and returning a response
|
||||
func (fn SyncHierarchyHandlerFunc) Handle(params SyncHierarchyParams, principal interface{}) middleware.Responder { |
||||
return fn(params, principal) |
||||
} |
||||
|
||||
// SyncHierarchyHandler interface for that can handle valid sync hierarchy params
|
||||
type SyncHierarchyHandler interface { |
||||
Handle(SyncHierarchyParams, interface{}) middleware.Responder |
||||
} |
||||
|
||||
// NewSyncHierarchy creates a new http.Handler for the sync hierarchy operation
|
||||
func NewSyncHierarchy(ctx *middleware.Context, handler SyncHierarchyHandler) *SyncHierarchy { |
||||
return &SyncHierarchy{Context: ctx, Handler: handler} |
||||
} |
||||
|
||||
/*SyncHierarchy swagger:route GET /sync/hierarchy syncManagement syncHierarchy |
||||
|
||||
syncs all the organizations, projects, resources hierarchy from LEXIS |
||||
|
||||
*/ |
||||
type SyncHierarchy struct { |
||||
Context *middleware.Context |
||||
Handler SyncHierarchyHandler |
||||
} |
||||
|
||||
func (o *SyncHierarchy) ServeHTTP(rw http.ResponseWriter, r *http.Request) { |
||||
route, rCtx, _ := o.Context.RouteInfo(r) |
||||
if rCtx != nil { |
||||
r = rCtx |
||||
} |
||||
var Params = NewSyncHierarchyParams() |
||||
|
||||
uprinc, aCtx, err := o.Context.Authorize(r, route) |
||||
if err != nil { |
||||
o.Context.Respond(rw, r, route.Produces, route, err) |
||||
return |
||||
} |
||||
if aCtx != nil { |
||||
r = aCtx |
||||
} |
||||
var principal interface{} |
||||
if uprinc != nil { |
||||
principal = uprinc |
||||
} |
||||
|
||||
if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params
|
||||
o.Context.Respond(rw, r, route.Produces, route, err) |
||||
return |
||||
} |
||||
|
||||
res := o.Handler.Handle(Params, principal) // actually handle the request
|
||||
|
||||
o.Context.Respond(rw, r, route.Produces, route, res) |
||||
|
||||
} |
@ -0,0 +1,45 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package sync_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"net/http" |
||||
|
||||
"github.com/go-openapi/errors" |
||||
"github.com/go-openapi/runtime/middleware" |
||||
) |
||||
|
||||
// NewSyncHierarchyParams creates a new SyncHierarchyParams object
|
||||
// no default values defined in spec.
|
||||
func NewSyncHierarchyParams() SyncHierarchyParams { |
||||
|
||||
return SyncHierarchyParams{} |
||||
} |
||||
|
||||
// SyncHierarchyParams contains all the bound params for the sync hierarchy operation
|
||||
// typically these are obtained from a http.Request
|
||||
//
|
||||
// swagger:parameters syncHierarchy
|
||||
type SyncHierarchyParams struct { |
||||
|
||||
// HTTP Request Object
|
||||
HTTPRequest *http.Request `json:"-"` |
||||
} |
||||
|
||||
// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface
|
||||
// for simple values it will use straight method calls.
|
||||
//
|
||||
// To ensure default values, the struct must have been initialized with NewSyncHierarchyParams() beforehand.
|
||||
func (o *SyncHierarchyParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { |
||||
var res []error |
||||
|
||||
o.HTTPRequest = r |
||||
|
||||
if len(res) > 0 { |
||||
return errors.CompositeValidationError(res...) |
||||
} |
||||
return nil |
||||
} |
@ -0,0 +1,106 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package sync_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"net/http" |
||||
|
||||
"github.com/go-openapi/runtime" |
||||
|
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/models" |
||||
) |
||||
|
||||
// SyncHierarchyOKCode is the HTTP code returned for type SyncHierarchyOK
|
||||
const SyncHierarchyOKCode int = 200 |
||||
|
||||
/*SyncHierarchyOK The load of data was completely successfully |
||||
|
||||
swagger:response syncHierarchyOK |
||||
*/ |
||||
type SyncHierarchyOK struct { |
||||
} |
||||
|
||||
// NewSyncHierarchyOK creates SyncHierarchyOK with default headers values
|
||||
func NewSyncHierarchyOK() *SyncHierarchyOK { |
||||
|
||||
return &SyncHierarchyOK{} |
||||
} |
||||
|
||||
// WriteResponse to the client
|
||||
func (o *SyncHierarchyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { |
||||
|
||||
rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses
|
||||
|
||||
rw.WriteHeader(200) |
||||
} |
||||
|
||||
// SyncHierarchyAcceptedCode is the HTTP code returned for type SyncHierarchyAccepted
|
||||
const SyncHierarchyAcceptedCode int = 202 |
||||
|
||||
/*SyncHierarchyAccepted Operation done but there might have been some fails when adding part of the data |
||||
|
||||
swagger:response syncHierarchyAccepted |
||||
*/ |
||||
type SyncHierarchyAccepted struct { |
||||
} |
||||
|
||||
// NewSyncHierarchyAccepted creates SyncHierarchyAccepted with default headers values
|
||||
func NewSyncHierarchyAccepted() *SyncHierarchyAccepted { |
||||
|
||||
return &SyncHierarchyAccepted{} |
||||
} |
||||
|
||||
// WriteResponse to the client
|
||||
func (o *SyncHierarchyAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { |
||||
|
||||
rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses
|
||||
|
||||
rw.WriteHeader(202) |
||||
} |
||||
|
||||
// SyncHierarchyInternalServerErrorCode is the HTTP code returned for type SyncHierarchyInternalServerError
|
||||
const SyncHierarchyInternalServerErrorCode int = 500 |
||||
|
||||
/*SyncHierarchyInternalServerError Something unexpected happend, error raised |
||||
|
||||
swagger:response syncHierarchyInternalServerError |
||||
*/ |
||||
type SyncHierarchyInternalServerError struct { |
||||
|
||||
/* |
||||
In: Body |
||||
*/ |
||||
Payload *models.ErrorResponse `json:"body,omitempty"` |
||||
} |
||||
|
||||
// NewSyncHierarchyInternalServerError creates SyncHierarchyInternalServerError with default headers values
|
||||
func NewSyncHierarchyInternalServerError() *SyncHierarchyInternalServerError { |
||||
|
||||
return &SyncHierarchyInternalServerError{} |
||||
} |
||||
|
||||
// WithPayload adds the payload to the sync hierarchy internal server error response
|
||||
func (o *SyncHierarchyInternalServerError) WithPayload(payload *models.ErrorResponse) *SyncHierarchyInternalServerError { |
||||
o.Payload = payload |
||||
return o |
||||
} |
||||
|
||||
// SetPayload sets the payload to the sync hierarchy internal server error response
|
||||
func (o *SyncHierarchyInternalServerError) SetPayload(payload *models.ErrorResponse) { |
||||
o.Payload = payload |
||||
} |
||||
|
||||
// WriteResponse to the client
|
||||
func (o *SyncHierarchyInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { |
||||
|
||||
rw.WriteHeader(500) |
||||
if o.Payload != nil { |
||||
payload := o.Payload |
||||
if err := producer.Produce(rw, payload); err != nil { |
||||
panic(err) // let the recovery middleware deal with this
|
||||
} |
||||
} |
||||
} |
@ -0,0 +1,87 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package sync_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the generate command
|
||||
|
||||
import ( |
||||
"errors" |
||||
"net/url" |
||||
golangswaggerpaths "path" |
||||
) |
||||
|
||||
// SyncHierarchyURL generates an URL for the sync hierarchy operation
|
||||
type SyncHierarchyURL struct { |
||||
_basePath string |
||||
} |
||||
|
||||
// WithBasePath sets the base path for this url builder, only required when it's different from the
|
||||
// base path specified in the swagger spec.
|
||||
// When the value of the base path is an empty string
|
||||
func (o *SyncHierarchyURL) WithBasePath(bp string) *SyncHierarchyURL { |
||||
o.SetBasePath(bp) |
||||
return o |
||||
} |
||||
|
||||
// SetBasePath sets the base path for this url builder, only required when it's different from the
|
||||
// base path specified in the swagger spec.
|
||||
// When the value of the base path is an empty string
|
||||
func (o *SyncHierarchyURL) SetBasePath(bp string) { |
||||
o._basePath = bp |
||||
} |
||||
|
||||
// Build a url path and query string
|
||||
func (o *SyncHierarchyURL) Build() (*url.URL, error) { |
||||
var _result url.URL |
||||
|
||||
var _path = "/sync/hierarchy" |
||||
|
||||
_basePath := o._basePath |
||||
if _basePath == "" { |
||||
_basePath = "/api/v0.1" |
||||
} |
||||
_result.Path = golangswaggerpaths.Join(_basePath, _path) |
||||
|
||||
return &_result, nil |
||||
} |
||||
|
||||
// Must is a helper function to panic when the url builder returns an error
|
||||
func (o *SyncHierarchyURL) Must(u *url.URL, err error) *url.URL { |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
if u == nil { |
||||
panic("url can't be nil") |
||||
} |
||||
return u |
||||
} |
||||
|
||||
// String returns the string representation of the path with query string
|
||||
func (o *SyncHierarchyURL) String() string { |
||||
return o.Must(o.Build()).String() |
||||
} |
||||
|
||||
// BuildFull builds a full url with scheme, host, path and query string
|
||||
func (o *SyncHierarchyURL) BuildFull(scheme, host string) (*url.URL, error) { |
||||
if scheme == "" { |
||||
return nil, errors.New("scheme is required for a full url on SyncHierarchyURL") |
||||
} |
||||
if host == "" { |
||||
return nil, errors.New("host is required for a full url on SyncHierarchyURL") |
||||
} |
||||
|
||||
base, err := o.Build() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
base.Scheme = scheme |
||||
base.Host = host |
||||
return base, nil |
||||
} |
||||
|
||||
// StringFull returns the string representation of a complete url
|
||||
func (o *SyncHierarchyURL) StringFull(scheme, host string) string { |
||||
return o.Must(o.BuildFull(scheme, host)).String() |
||||
} |
@ -0,0 +1,71 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package trigger_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the generate command
|
||||
|
||||
import ( |
||||
"net/http" |
||||
|
||||
"github.com/go-openapi/runtime/middleware" |
||||
) |
||||
|
||||
// UDRRedoHandlerFunc turns a function with the right signature into a u d r redo handler
|
||||
type UDRRedoHandlerFunc func(UDRRedoParams, interface{}) middleware.Responder |
||||
|
||||
// Handle executing the request and returning a response
|
||||
func (fn UDRRedoHandlerFunc) Handle(params UDRRedoParams, principal interface{}) middleware.Responder { |
||||
return fn(params, principal) |
||||
} |
||||
|
||||
// UDRRedoHandler interface for that can handle valid u d r redo params
|
||||
type UDRRedoHandler interface { |
||||
Handle(UDRRedoParams, interface{}) middleware.Responder |
||||
} |
||||
|
||||
// NewUDRRedo creates a new http.Handler for the u d r redo operation
|
||||
func NewUDRRedo(ctx *middleware.Context, handler UDRRedoHandler) *UDRRedo { |
||||
return &UDRRedo{Context: ctx, Handler: handler} |
||||
} |
||||
|
||||
/*UDRRedo swagger:route GET /trigger/udrsredo triggerManagement uDRRedo |
||||
|
||||
Redo of UDRs from the specific dates and with the specifc interval |
||||
|
||||
*/ |
||||
type UDRRedo struct { |
||||
Context *middleware.Context |
||||
Handler UDRRedoHandler |
||||
} |
||||
|
||||
func (o *UDRRedo) ServeHTTP(rw http.ResponseWriter, r *http.Request) { |
||||
route, rCtx, _ := o.Context.RouteInfo(r) |
||||
if rCtx != nil { |
||||
r = rCtx |
||||
} |
||||
var Params = NewUDRRedoParams() |
||||
|
||||
uprinc, aCtx, err := o.Context.Authorize(r, route) |
||||
if err != nil { |
||||
o.Context.Respond(rw, r, route.Produces, route, err) |
||||
return |
||||
} |
||||
if aCtx != nil { |
||||
r = aCtx |
||||
} |
||||
var principal interface{} |
||||
if uprinc != nil { |
||||
principal = uprinc |
||||
} |
||||
|
||||
if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params
|
||||
o.Context.Respond(rw, r, route.Produces, route, err) |
||||
return |
||||
} |
||||
|
||||
res := o.Handler.Handle(Params, principal) // actually handle the request
|
||||
|
||||
o.Context.Respond(rw, r, route.Produces, route, res) |
||||
|
||||
} |
@ -0,0 +1,168 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package trigger_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"net/http" |
||||
|
||||
"github.com/go-openapi/errors" |
||||
"github.com/go-openapi/runtime" |
||||
"github.com/go-openapi/runtime/middleware" |
||||
"github.com/go-openapi/strfmt" |
||||
"github.com/go-openapi/validate" |
||||
) |
||||
|
||||
// NewUDRRedoParams creates a new UDRRedoParams object
|
||||
// no default values defined in spec.
|
||||
func NewUDRRedoParams() UDRRedoParams { |
||||
|
||||
return UDRRedoParams{} |
||||
} |
||||
|
||||
// UDRRedoParams contains all the bound params for the u d r redo operation
|
||||
// typically these are obtained from a http.Request
|
||||
//
|
||||
// swagger:parameters UDRRedo
|
||||
type UDRRedoParams struct { |
||||
|
||||
// HTTP Request Object
|
||||
HTTPRequest *http.Request `json:"-"` |
||||
|
||||
/*Datetime from which to regenerate the udrs |
||||
In: query |
||||
*/ |
||||
From *strfmt.DateTime |
||||
/*Interval to do increments |
||||
In: query |
||||
*/ |
||||
Interval *string |
||||
/*Datetime until which to regenerate the udrs |
||||
In: query |
||||
*/ |
||||
To *strfmt.DateTime |
||||
} |
||||
|
||||
// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface
|
||||
// for simple values it will use straight method calls.
|
||||
//
|
||||
// To ensure default values, the struct must have been initialized with NewUDRRedoParams() beforehand.
|
||||
func (o *UDRRedoParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { |
||||
var res []error |
||||
|
||||
o.HTTPRequest = r |
||||
|
||||
qs := runtime.Values(r.URL.Query()) |
||||
|
||||
qFrom, qhkFrom, _ := qs.GetOK("from") |
||||
if err := o.bindFrom(qFrom, qhkFrom, route.Formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
qInterval, qhkInterval, _ := qs.GetOK("interval") |
||||
if err := o.bindInterval(qInterval, qhkInterval, route.Formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
qTo, qhkTo, _ := qs.GetOK("to") |
||||
if err := o.bindTo(qTo, qhkTo, route.Formats); err != nil { |
||||
res = append(res, err) |
||||
} |
||||
|
||||
if len(res) > 0 { |
||||
return errors.CompositeValidationError(res...) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// bindFrom binds and validates parameter From from query.
|
||||
func (o *UDRRedoParams) bindFrom(rawData []string, hasKey bool, formats strfmt.Registry) error { |
||||
var raw string |
||||
if len(rawData) > 0 { |
||||
raw = rawData[len(rawData)-1] |
||||
} |
||||
|
||||
// Required: false
|
||||
// AllowEmptyValue: false
|
||||
if raw == "" { // empty values pass all other validations
|
||||
return nil |
||||
} |
||||
|
||||
// Format: datetime
|
||||
value, err := formats.Parse("datetime", raw) |
||||
if err != nil { |
||||
return errors.InvalidType("from", "query", "strfmt.DateTime", raw) |
||||
} |
||||
o.From = (value.(*strfmt.DateTime)) |
||||
|
||||
if err := o.validateFrom(formats); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// validateFrom carries on validations for parameter From
|
||||
func (o *UDRRedoParams) validateFrom(formats strfmt.Registry) error { |
||||
|
||||
if err := validate.FormatOf("from", "query", "datetime", o.From.String(), formats); err != nil { |
||||
return err |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// bindInterval binds and validates parameter Interval from query.
|
||||
func (o *UDRRedoParams) bindInterval(rawData []string, hasKey bool, formats strfmt.Registry) error { |
||||
var raw string |
||||
if len(rawData) > 0 { |
||||
raw = rawData[len(rawData)-1] |
||||
} |
||||
|
||||
// Required: false
|
||||
// AllowEmptyValue: false
|
||||
if raw == "" { // empty values pass all other validations
|
||||
return nil |
||||
} |
||||
|
||||
o.Interval = &raw |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// bindTo binds and validates parameter To from query.
|
||||
func (o *UDRRedoParams) bindTo(rawData []string, hasKey bool, formats strfmt.Registry) error { |
||||
var raw string |
||||
if len(rawData) > 0 { |
||||
raw = rawData[len(rawData)-1] |
||||
} |
||||
|
||||
// Required: false
|
||||
// AllowEmptyValue: false
|
||||
if raw == "" { // empty values pass all other validations
|
||||
return nil |
||||
} |
||||
|
||||
// Format: datetime
|
||||
value, err := formats.Parse("datetime", raw) |
||||
if err != nil { |
||||
return errors.InvalidType("to", "query", "strfmt.DateTime", raw) |
||||
} |
||||
o.To = (value.(*strfmt.DateTime)) |
||||
|
||||
if err := o.validateTo(formats); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// validateTo carries on validations for parameter To
|
||||
func (o *UDRRedoParams) validateTo(formats strfmt.Registry) error { |
||||
|
||||
if err := validate.FormatOf("to", "query", "datetime", o.To.String(), formats); err != nil { |
||||
return err |
||||
} |
||||
return nil |
||||
} |
@ -0,0 +1,102 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package trigger_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import ( |
||||
"net/http" |
||||
|
||||
"github.com/go-openapi/runtime" |
||||
|
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/models" |
||||
) |
||||
|
||||
// UDRRedoOKCode is the HTTP code returned for type UDRRedoOK
|
||||
const UDRRedoOKCode int = 200 |
||||
|
||||
/*UDRRedoOK Generation task executed successfully. |
||||
|
||||
swagger:response uDRRedoOK |
||||
*/ |
||||
type UDRRedoOK struct { |
||||
|
||||
/* |
||||
In: Body |
||||
*/ |
||||
Payload *models.ItemCreatedResponse `json:"body,omitempty"` |
||||
} |
||||
|
||||
// NewUDRRedoOK creates UDRRedoOK with default headers values
|
||||
func NewUDRRedoOK() *UDRRedoOK { |
||||
|
||||
return &UDRRedoOK{} |
||||
} |
||||
|
||||
// WithPayload adds the payload to the u d r redo o k response
|
||||
func (o *UDRRedoOK) WithPayload(payload *models.ItemCreatedResponse) *UDRRedoOK { |
||||
o.Payload = payload |
||||
return o |
||||
} |
||||
|
||||
// SetPayload sets the payload to the u d r redo o k response
|
||||
func (o *UDRRedoOK) SetPayload(payload *models.ItemCreatedResponse) { |
||||
o.Payload = payload |
||||
} |
||||
|
||||
// WriteResponse to the client
|
||||
func (o *UDRRedoOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { |
||||
|
||||
rw.WriteHeader(200) |
||||
if o.Payload != nil { |
||||
payload := o.Payload |
||||
if err := producer.Produce(rw, payload); err != nil { |
||||
panic(err) // let the recovery middleware deal with this
|
||||
} |
||||
} |
||||
} |
||||
|
||||
// UDRRedoInternalServerErrorCode is the HTTP code returned for type UDRRedoInternalServerError
|
||||
const UDRRedoInternalServerErrorCode int = 500 |
||||
|
||||
/*UDRRedoInternalServerError Something unexpected happend, error raised |
||||
|
||||
swagger:response uDRRedoInternalServerError |
||||
*/ |
||||
type UDRRedoInternalServerError struct { |
||||
|
||||
/* |
||||
In: Body |
||||
*/ |
||||
Payload *models.ErrorResponse `json:"body,omitempty"` |
||||
} |
||||
|
||||
// NewUDRRedoInternalServerError creates UDRRedoInternalServerError with default headers values
|
||||
func NewUDRRedoInternalServerError() *UDRRedoInternalServerError { |
||||
|
||||
return &UDRRedoInternalServerError{} |
||||
} |
||||
|
||||
// WithPayload adds the payload to the u d r redo internal server error response
|
||||
func (o *UDRRedoInternalServerError) WithPayload(payload *models.ErrorResponse) *UDRRedoInternalServerError { |
||||
o.Payload = payload |
||||
return o |
||||
} |
||||
|
||||
// SetPayload sets the payload to the u d r redo internal server error response
|
||||
func (o *UDRRedoInternalServerError) SetPayload(payload *models.ErrorResponse) { |
||||
o.Payload = payload |
||||
} |
||||
|
||||
// WriteResponse to the client
|
||||
func (o *UDRRedoInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { |
||||
|
||||
rw.WriteHeader(500) |
||||
if o.Payload != nil { |
||||
payload := o.Payload |
||||
if err := producer.Produce(rw, payload); err != nil { |
||||
panic(err) // let the recovery middleware deal with this
|
||||
} |
||||
} |
||||
} |
@ -0,0 +1,123 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package trigger_management |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the generate command
|
||||
|
||||
import ( |
||||
"errors" |
||||
"net/url" |
||||
golangswaggerpaths "path" |
||||
|
||||
"github.com/go-openapi/strfmt" |
||||
) |
||||
|
||||
// UDRRedoURL generates an URL for the u d r redo operation
|
||||
type UDRRedoURL struct { |
||||
From *strfmt.DateTime |
||||
Interval *string |
||||
To *strfmt.DateTime |
||||
|
||||
_basePath string |
||||
// avoid unkeyed usage
|
||||
_ struct{} |
||||
} |
||||
|
||||
// WithBasePath sets the base path for this url builder, only required when it's different from the
|
||||
// base path specified in the swagger spec.
|
||||
// When the value of the base path is an empty string
|
||||
func (o *UDRRedoURL) WithBasePath(bp string) *UDRRedoURL { |
||||
o.SetBasePath(bp) |
||||
return o |
||||
} |
||||
|
||||
// SetBasePath sets the base path for this url builder, only required when it's different from the
|
||||
// base path specified in the swagger spec.
|
||||
// When the value of the base path is an empty string
|
||||
func (o *UDRRedoURL) SetBasePath(bp string) { |
||||
o._basePath = bp |
||||
} |
||||
|
||||
// Build a url path and query string
|
||||
func (o *UDRRedoURL) Build() (*url.URL, error) { |
||||
var _result url.URL |
||||
|
||||
var _path = "/trigger/udrsredo" |
||||
|
||||
_basePath := o._basePath |
||||
if _basePath == "" { |
||||
_basePath = "/api/v0.1" |
||||
} |
||||
_result.Path = golangswaggerpaths.Join(_basePath, _path) |
||||
|
||||
qs := make(url.Values) |
||||
|
||||
var fromQ string |
||||
if o.From != nil { |
||||
fromQ = o.From.String() |
||||
} |
||||
if fromQ != "" { |
||||
qs.Set("from", fromQ) |
||||
} |
||||
|
||||
var intervalQ string |
||||
if o.Interval != nil { |
||||
intervalQ = *o.Interval |
||||
} |
||||
if intervalQ != "" { |
||||
qs.Set("interval", intervalQ) |
||||
} |
||||
|
||||
var toQ string |
||||
if o.To != nil { |
||||
toQ = o.To.String() |
||||
} |
||||
if toQ != "" { |
||||
qs.Set("to", toQ) |
||||
} |
||||
|
||||
_result.RawQuery = qs.Encode() |
||||
|
||||
return &_result, nil |
||||
} |
||||
|
||||
// Must is a helper function to panic when the url builder returns an error
|
||||
func (o *UDRRedoURL) Must(u *url.URL, err error) *url.URL { |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
if u == nil { |
||||
panic("url can't be nil") |
||||
} |
||||
return u |
||||
} |
||||
|
||||
// String returns the string representation of the path with query string
|
||||
func (o *UDRRedoURL) String() string { |
||||
return o.Must(o.Build()).String() |
||||
} |
||||
|
||||
// BuildFull builds a full url with scheme, host, path and query string
|
||||
func (o *UDRRedoURL) BuildFull(scheme, host string) (*url.URL, error) { |
||||
if scheme == "" { |
||||
return nil, errors.New("scheme is required for a full url on UDRRedoURL") |
||||
} |
||||
if host == "" { |
||||
return nil, errors.New("host is required for a full url on UDRRedoURL") |
||||
} |
||||
|
||||
base, err := o.Build() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
base.Scheme = scheme |
||||
base.Host = host |
||||
return base, nil |
||||
} |
||||
|
||||
// StringFull returns the string representation of a complete url
|
||||
func (o *UDRRedoURL) StringFull(scheme, host string) string { |
||||
return o.Must(o.BuildFull(scheme, host)).String() |
||||
} |
@ -0,0 +1,5 @@ |
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package restapi |
||||
|
||||
// this file is intentionally empty. Otherwise go-swagger will generate a server which we don't want
|
@ -0,0 +1 @@ |
||||
DUMMY FILE! |
@ -0,0 +1,86 @@ |
||||
# Welcome to the configuration file for this |
||||
# |
||||
# ██████╗██╗ ██╗ ██████╗██╗ ██████╗ ██████╗ ███████╗ |
||||
# ██╔════╝╚██╗ ██╔╝██╔════╝██║ ██╔═══██╗██╔══██╗██╔════╝ |
||||
# ██║ ╚████╔╝ ██║ ██║ ██║ ██║██████╔╝███████╗ |
||||
# ██║ ╚██╔╝ ██║ ██║ ██║ ██║██╔═══╝ ╚════██║ |
||||
# ╚██████╗ ██║ ╚██████╗███████╗╚██████╔╝██║ ███████║ |
||||
# ╚═════╝ ╚═╝ ╚═════╝╚══════╝ ╚═════╝ ╚═╝ ╚══════╝ |
||||
# |
||||
# ██╗ █████╗ ██████╗ ███████╗ |
||||
# ██║ ██╔══██╗██╔══██╗██╔════╝ |
||||
# ██║ ███████║██████╔╝███████╗ |
||||
# ██║ ██╔══██║██╔══██╗╚════██║ |
||||
# ███████╗██║ ██║██████╔╝███████║ |
||||
# ╚══════╝╚═╝ ╚═╝╚═════╝ ╚══════╝ |
||||
# |
||||
# uService! |
||||
|
||||
[APIKEY] |
||||
Enabled = true |
||||
Key = "X-API-KEY" |
||||
Place = "header" |
||||
Token = "1234567890abcdefghi" |
||||
|
||||
[DATABASE] |
||||
# Duration style: Xh, Xm, Xs... |
||||
CacheRetention = "24h" |
||||
|
||||
[GENERAL] |
||||
CertificateFile = "./cert.crt" |
||||
CertificateKey = "./key.key" |
||||
CORSEnabled = false |
||||
CORSHeaders = [ "*" ] |
||||
CORSMethods = [ "GET", "POST" ] |
||||
CORSOrigins = [ "" ] |
||||
HttpsEnabled = false |
||||
InsecureSkipVerify = false |
||||
LogFile = "./SERVICE.log" |
||||
# LogLevel = TRACE | DEBUG | INFO | WARNING | ERROR |
||||
LogLevel = "TRACE" |
||||
LogToConsole = true |
||||
ServerPort = 8000 |
||||
|
||||
[GENERAL.SERVICES] |
||||
Billing = "billing:8001" |
||||
CDR = "cdr:8002" |
||||
CreditSystem = "creditsystem:8003" |
||||
CustomerDB = "customerdb:8004" |
||||
EventsEngine = "eventsengine:8005" |
||||
PlanManager = "planmanager:8006" |
||||
UDR = "udr:8007" |
||||
|
||||
[KEYCLOAK] |
||||
ClientID = "CyclopsDeploy" |
||||
ClientSecret = "093d537e-205e-4a4c-94c6-3d9b744e2675" |
||||
Enabled = true |
||||
Host = "keycloak" |
||||
Port = 8000 |
||||
Realm = "Development" |
||||
RedirectURL = "" |
||||
UseHttp = true |
||||
|
||||
[OPENSTACK] |
||||
Domain = "" |
||||
Filters = [ "filter1", "filter2", "filter3" ] |
||||
Keystone = "" |
||||
Password = "" |
||||
Project = "" |
||||
Regions = [ "Region1", "Region2" ] |
||||
User = "" |
||||
|
||||
[SKUS] |
||||
SKUs = [ "vcpu", "ram", "rootdisk", "ephemeraldisk", "floatingip", "blockstorage", "objectstorage", "license", "titanxp", "t4", "p100", "rootdisk_ssd", "ephemeraldisk_ssd", "blockstorage_ssd" ] |
||||
|
||||
[PRICES] |
||||
BLOCKSTORAGE = 0 |
||||
BLOCKSTORAGE_SSD = 0 |
||||
EPHEMERALDISK = 0 |
||||
EPHEMERALDISK_SSD = 0 |
||||
FLOATINGIP = 0 |
||||
LICENSE = 0 |
||||
OBJECTSTORAGE = 0 |
||||
RAM = 0 |
||||
ROOTDISK = 0 |
||||
ROOTDISK_SSD = 0 |
||||
VCPU = 0 |
@ -0,0 +1,43 @@ |
||||
version: '3' |
||||
|
||||
services: |
||||
|
||||
db: |
||||
image: postgres:latest |
||||
networks: |
||||
- cyclopsnet |
||||
environment: |
||||
POSTGRES_PASSWORD: pass1234 |
||||
POSTGRES_DB: cyclops |
||||
POSTGRES_USER: cyclops |
||||
ports: |
||||
- 5432:5432 |
||||
volumes: |
||||
- postgresql:/var/lib/postgresql |
||||
# This needs explicit mapping due to https://github.com/docker-library/postgres/blob/4e48e3228a30763913ece952c611e5e9b95c8759/Dockerfile.template#L52 |
||||
- postgresql_data:/var/lib/postgresql/data |
||||
|
||||
|
||||
service: |
||||
image: lexis-extension:latest |
||||
restart: always |
||||
environment: |
||||
WAIT_HOSTS: db:5432 |
||||
networks: |
||||
- cyclopsnet |
||||
depends_on: |
||||
- "db" |
||||
ports: |
||||
- 8000:8000 |
||||
volumes: |
||||
- ${PWD}/config.toml:/config.toml |
||||
- ${PWD}/cert.crt:/cert.crt |
||||
- ${PWD}/key.key:/key.key |
||||
|
||||
networks: |
||||
cyclopsnet: |
||||
driver: bridge |
||||
|
||||
volumes: |
||||
postgresql: |
||||
postgresql_data: |
@ -0,0 +1 @@ |
||||
DUMMY FILE! |
@ -0,0 +1,553 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"net/url" |
||||
"strings" |
||||
"time" |
||||
|
||||
httptransport "github.com/go-openapi/runtime/client" |
||||
"github.com/go-openapi/strfmt" |
||||
"github.com/prometheus/client_golang/prometheus" |
||||
"github.com/Cyclops-Labs/cyclops-4-hpc.git/extensions/lexis/server/cacheManager" |
||||
cdrClient "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/cdr/client" |
||||
cdrUsage "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/cdr/client/usage_management" |
||||
cusClient "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/customerdb/client" |
||||
cusCustomer "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/customerdb/client/customer_management" |
||||
cusProduct "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/customerdb/client/product_management" |
||||
cusReseller "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/customerdb/client/reseller_management" |
||||
pmClient "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/plan-manager/client" |
||||
pmBundle "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/plan-manager/client/bundle_management" |
||||
pmCycle "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/plan-manager/client/cycle_management" |
||||
pmPlan "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/plan-manager/client/plan_management" |
||||
pmSku "github.com/Cyclops-Labs/cyclops-4-hpc.git/services/plan-manager/client/sku_management" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
// cacheStart handles the initialization of the cache mechanism service.
|
||||
// Returns:
|
||||
// - A cacheManager reference struct already initialized and ready to be used.
|
||||
func cacheStart(metrics *prometheus.GaugeVec) *cacheManager.CacheManager { |
||||
|
||||
l.Trace.Printf("[CACHE][INIT] Intializing cache mechanism.\n") |
||||
|
||||
cacheDuration, _ := time.ParseDuration(cfg.DB.CacheRetention) |
||||
|
||||
c := cacheManager.New(metrics, cacheDuration, cfg.APIKey.Token) |
||||
|
||||
resellerFunction := func(id interface{}, token string) (interface{}, error) { |
||||
|
||||
config := cusClient.Config{ |
||||
URL: &url.URL{ |
||||
Host: cfg.General.Services["customerdb"], |
||||
Path: cusClient.DefaultBasePath, |
||||
Scheme: "http", |
||||
}, |
||||
AuthInfo: httptransport.APIKeyAuth(cfg.APIKey.Key, cfg.APIKey.Place, cfg.APIKey.Token), |
||||
} |
||||
|
||||
if token != "" { |
||||
|
||||
config.AuthInfo = httptransport.BearerToken(token) |
||||
|
||||
} |
||||
|
||||
client := cusClient.New(config) |
||||
ctx := context.Background() |
||||
|
||||
i := id.(string) |
||||
|
||||
if i != "ALL" { |
||||
|
||||
params := cusReseller.NewGetResellerParams().WithID(i) |
||||
|
||||
r, e := client.ResellerManagement.GetReseller(ctx, params) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE][CUSDB-FUNCTION] There was a problem while retrieving the reseller with id [ %v ]. Error: %v", i, e) |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
return *r.Payload, nil |
||||
|
||||
} |
||||
|
||||
params := cusReseller.NewListResellersParams() |
||||
|
||||
r, e := client.ResellerManagement.ListResellers(ctx, params) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE][CUSDB-FUNCTION] There was a problem while retrieving the list of resellers. Error: %v", e) |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
return r.Payload, nil |
||||
|
||||
} |
||||
|
||||
customerFunction := func(id interface{}, token string) (interface{}, error) { |
||||
|
||||
config := cusClient.Config{ |
||||
URL: &url.URL{ |
||||
Host: cfg.General.Services["customerdb"], |
||||
Path: cusClient.DefaultBasePath, |
||||
Scheme: "http", |
||||
}, |
||||
AuthInfo: httptransport.APIKeyAuth(cfg.APIKey.Key, cfg.APIKey.Place, cfg.APIKey.Token), |
||||
} |
||||
|
||||
if token != "" { |
||||
|
||||
config.AuthInfo = httptransport.BearerToken(token) |
||||
|
||||
} |
||||
|
||||
client := cusClient.New(config) |
||||
ctx := context.Background() |
||||
|
||||
i := id.(string) |
||||
|
||||
if i != "ALL" { |
||||
|
||||
params := cusCustomer.NewGetCustomerParams().WithID(i) |
||||
|
||||
r, e := client.CustomerManagement.GetCustomer(ctx, params) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE][CUSDB-FUNCTION] There was a problem while retrieving the customer with id [ %v ]. Error: %v", i, e) |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
return *r.Payload, nil |
||||
|
||||
} |
||||
|
||||
params := cusCustomer.NewListCustomersParams() |
||||
|
||||
r, e := client.CustomerManagement.ListCustomers(ctx, params) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE][CUSDB-FUNCTION] There was a problem while retrieving the list of customers. Error: %v", e) |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
return r.Payload, nil |
||||
|
||||
} |
||||
|
||||
productFunction := func(id interface{}, token string) (interface{}, error) { |
||||
|
||||
config := cusClient.Config{ |
||||
URL: &url.URL{ |
||||
Host: cfg.General.Services["customerdb"], |
||||
Path: cusClient.DefaultBasePath, |
||||
Scheme: "http", |
||||
}, |
||||
AuthInfo: httptransport.APIKeyAuth(cfg.APIKey.Key, cfg.APIKey.Place, cfg.APIKey.Token), |
||||
} |
||||
|
||||
if token != "" { |
||||
|
||||
config.AuthInfo = httptransport.BearerToken(token) |
||||
|
||||
} |
||||
|
||||
client := cusClient.New(config) |
||||
ctx := context.Background() |
||||
|
||||
i := id.(string) |
||||
|
||||
if i != "ALL" { |
||||
|
||||
params := cusProduct.NewGetProductParams().WithID(i) |
||||
|
||||
r, e := client.ProductManagement.GetProduct(ctx, params) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE][CUSDB-FUNCTION] There was a problem while retrieving the product with id [ %v ]. Error: %v", i, e) |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
return *r.Payload, nil |
||||
|
||||
} |
||||
|
||||
params := cusProduct.NewListProductsParams() |
||||
|
||||
r, e := client.ProductManagement.ListProducts(ctx, params) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE][CUSDB-FUNCTION] There was a problem while retrieving the list of products. Error: %v", e) |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
return r.Payload, nil |
||||
|
||||
} |
||||
|
||||
// id == id0[,id1,...,idN]?from?to
|
||||
cdrFunction := func(id interface{}, token string) (interface{}, error) { |
||||
|
||||
config := cdrClient.Config{ |
||||
URL: &url.URL{ |
||||
Host: cfg.General.Services["cdr"], |
||||
Path: cdrClient.DefaultBasePath, |
||||
Scheme: "http", |
||||
}, |
||||
AuthInfo: httptransport.APIKeyAuth(cfg.APIKey.Key, cfg.APIKey.Place, cfg.APIKey.Token), |
||||
} |
||||
|
||||
if token != "" { |
||||
|
||||
config.AuthInfo = httptransport.BearerToken(token) |
||||
|
||||
} |
||||
|
||||
idSplit := strings.SplitN(id.(string), "?", 3) |
||||
|
||||
i := idSplit[0] |
||||
|
||||
from, e := time.Parse(time.RFC3339Nano, idSplit[1]) |
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE][CDR-FUNCTION] There was a problem while parsing the datetime [ %v ]. Error: %v", idSplit[1], e) |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
f := (strfmt.DateTime)(from) |
||||
|
||||
to, e := time.Parse(time.RFC3339Nano, idSplit[2]) |
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE][CDR-FUNCTION] There was a problem while parsing the datetime [ %v ]. Error: %v", idSplit[2], e) |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
t := (strfmt.DateTime)(to) |
||||
|
||||
client := cdrClient.New(config) |
||||
ctx := context.Background() |
||||
|
||||
if strings.Contains(i, ",") { |
||||
|
||||
params := cdrUsage.NewGetSystemUsageParams().WithIdlist(&i).WithFrom(&f).WithTo(&t) |
||||
|
||||
r, e := client.UsageManagement.GetSystemUsage(ctx, params) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE][CDR-FUNCTION] There was a problem while retrieving all the CDRs from the system. Error: %v", e) |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
return r.Payload, nil |
||||
|
||||
} |
||||
|
||||
if i != "ALL" { |
||||
|
||||
params := cdrUsage.NewGetUsageParams().WithID(i).WithFrom(&f).WithTo(&t) |
||||
|
||||
r, e := client.UsageManagement.GetUsage(ctx, params) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE][CDR-FUNCTION] There was a problem while retrieving the CDRs under the id [ %v ]. Error: %v", id, e) |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
return r.Payload, nil |
||||
|
||||
} |
||||
|
||||
params := cdrUsage.NewGetSystemUsageParams().WithFrom(&f).WithTo(&t) |
||||
|
||||
r, e := client.UsageManagement.GetSystemUsage(ctx, params) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE][CDR-FUNCTION] There was a problem while retrieving all the CDRs from the system. Error: %v", e) |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
return r.Payload, nil |
||||
|
||||
} |
||||
|
||||
skuFunction := func(id interface{}, token string) (interface{}, error) { |
||||
|
||||
config := pmClient.Config{ |
||||
URL: &url.URL{ |
||||
Host: cfg.General.Services["planmanager"], |
||||
Path: pmClient.DefaultBasePath, |
||||
Scheme: "http", |
||||
}, |
||||
AuthInfo: httptransport.APIKeyAuth(cfg.APIKey.Key, cfg.APIKey.Place, cfg.APIKey.Token), |
||||
} |
||||
|
||||
if token != "" { |
||||
|
||||
config.AuthInfo = httptransport.BearerToken(token) |
||||
|
||||
} |
||||
|
||||
client := pmClient.New(config) |
||||
ctx := context.Background() |
||||
|
||||
if id.(string) != "ALL" { |
||||
|
||||
params := pmSku.NewGetSkuParams().WithID(id.(string)) |
||||
|
||||
r, e := client.SkuManagement.GetSku(ctx, params) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE][SKU-FUNCTION] There was a problem while retrieving the sku [ %v ]. Error: %v", id, e) |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
return r.Payload, nil |
||||
|
||||
} |
||||
|
||||
params := pmSku.NewListSkusParams() |
||||
|
||||
r, e := client.SkuManagement.ListSkus(ctx, params) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE][SKU-FUNCTION] There was a problem while retrieving the skus list. Error: %v", e) |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
return r.Payload, nil |
||||
|
||||
} |
||||
|
||||
planFunction := func(id interface{}, token string) (interface{}, error) { |
||||
|
||||
config := pmClient.Config{ |
||||
URL: &url.URL{ |
||||
Host: cfg.General.Services["planmanager"], |
||||
Path: pmClient.DefaultBasePath, |
||||
Scheme: "http", |
||||
}, |
||||
AuthInfo: httptransport.APIKeyAuth(cfg.APIKey.Key, cfg.APIKey.Place, cfg.APIKey.Token), |
||||
} |
||||
|
||||
if token != "" { |
||||
|
||||
config.AuthInfo = httptransport.BearerToken(token) |
||||
|
||||
} |
||||
|
||||
client := pmClient.New(config) |
||||
ctx := context.Background() |
||||
|
||||
if id.(string) != "ALL" { |
||||
|
||||
var planID string |
||||
|
||||
if id.(string) == "DEFAULT" { |
||||
|
||||
planID = cfg.Plans.Default |
||||
|
||||
} else { |
||||
|
||||
planID = id.(string) |
||||
|
||||
} |
||||
|
||||
params := pmPlan.NewGetCompletePlanParams().WithID(planID) |
||||
|
||||
r, e := client.PlanManagement.GetCompletePlan(ctx, params) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE][PLAN-FUNCTION] There was a problem while retrieving the plan [ %v ]. Error: %v", id, e) |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
return *r.Payload, nil |
||||
|
||||
} |
||||
|
||||
params := pmPlan.NewListCompletePlansParams() |
||||
|
||||
r, e := client.PlanManagement.ListCompletePlans(ctx, params) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE][PLAN-FUNCTION] There was a problem while retrieving the plan list. Error: %v", e) |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
return r.Payload, nil |
||||
|
||||
} |
||||
|
||||
bundleFunction := func(id interface{}, token string) (interface{}, error) { |
||||
|
||||
config := pmClient.Config{ |
||||
URL: &url.URL{ |
||||
Host: cfg.General.Services["planmanager"], |
||||
Path: pmClient.DefaultBasePath, |
||||
Scheme: "http", |
||||
}, |
||||
AuthInfo: httptransport.APIKeyAuth(cfg.APIKey.Key, cfg.APIKey.Place, cfg.APIKey.Token), |
||||
} |
||||
|
||||
if token != "" { |
||||
|
||||
config.AuthInfo = httptransport.BearerToken(token) |
||||
|
||||
} |
||||
|
||||
client := pmClient.New(config) |
||||
ctx := context.Background() |
||||
|
||||
if id.(string) != "ALL" { |
||||
|
||||
params := pmBundle.NewGetSkuBundleParams().WithID(id.(string)) |
||||
|
||||
r, e := client.BundleManagement.GetSkuBundle(ctx, params) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE][BUNDLE-FUNCTION] There was a problem while retrieving the skubundle [ %v ]. Error: %v", id, e) |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
return *r.Payload, nil |
||||
|
||||
} |
||||
|
||||
params := pmBundle.NewListSkuBundlesParams() |
||||
|
||||
r, e := client.BundleManagement.ListSkuBundles(ctx, params) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE][BUNDLE-FUNCTION] There was a problem while retrieving the skubundle list. Error: %v", e) |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
return r.Payload, nil |
||||
|
||||
} |
||||
|
||||
cycleFunction := func(id interface{}, token string) (interface{}, error) { |
||||
|
||||
config := pmClient.Config{ |
||||
URL: &url.URL{ |
||||
Host: cfg.General.Services["planmanager"], |
||||
Path: pmClient.DefaultBasePath, |
||||
Scheme: "http", |
||||
}, |
||||
AuthInfo: httptransport.APIKeyAuth(cfg.APIKey.Key, cfg.APIKey.Place, cfg.APIKey.Token), |
||||
} |
||||
|
||||
if token != "" { |
||||
|
||||
config.AuthInfo = httptransport.BearerToken(token) |
||||
|
||||
} |
||||
|
||||
client := pmClient.New(config) |
||||
ctx := context.Background() |
||||
|
||||
var params *pmCycle.ListCyclesParams |
||||
|
||||
if id.(string) == "ALL" { |
||||
|
||||
params = pmCycle.NewListCyclesParams() |
||||
|
||||
} else { |
||||
|
||||
ty := id.(string) |
||||
|
||||
params = pmCycle.NewListCyclesParams().WithType(&ty) |
||||
|
||||
} |
||||
|
||||
r, e := client.CycleManagement.ListCycles(ctx, params) |
||||
|
||||
if e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE][CYCLE-FUNCTION] There was a problem while retrieving the cycle list. Error: %v", e) |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
return r.Payload, nil |
||||
|
||||
} |
||||
|
||||
c.Add("reseller", resellerFunction) |
||||
l.Trace.Printf("[CACHE][INIT] Reseller fetcher added to the cache.\n") |
||||
|
||||
c.Add("customer", customerFunction) |
||||
l.Trace.Printf("[CACHE][INIT] Customer fetcher added to the cache.\n") |
||||
|
||||
c.Add("product", productFunction) |
||||
l.Trace.Printf("[CACHE][INIT] Product fetcher added to the cache.\n") |
||||
|
||||
c.Add("cdr", cdrFunction) |
||||
l.Trace.Printf("[CACHE][INIT] CDR usage fetcher added to the cache.\n") |
||||
|
||||
c.Add("sku", skuFunction) |
||||
l.Trace.Printf("[CACHE][INIT] SKU fetcher added to the cache.\n") |
||||
|
||||
c.Add("plan", planFunction) |
||||
l.Trace.Printf("[CACHE][INIT] Plan fetcher added to the cache.\n") |
||||
|
||||
c.Add("bundle", bundleFunction) |
||||
l.Trace.Printf("[CACHE][INIT] SkuBundle fetcher added to the cache.\n") |
||||
|
||||
c.Add("cycle", cycleFunction) |
||||
l.Trace.Printf("[CACHE][INIT] Life Cycle fetcher added to the cache.\n") |
||||
|
||||
return c |
||||
|
||||
} |
@ -0,0 +1,281 @@ |
||||
package cacheManager |
||||
|
||||
import ( |
||||
"fmt" |
||||
"strings" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/prometheus/client_golang/prometheus" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
// cacheFetchFunction it will need the id to retrieve and as and option a Bearer
|
||||
// Keycloak token can be provided when called.
|
||||
// It shall return the object and errors in case of problems.
|
||||
type cacheFetchFunction func(interface{}, string) (interface{}, error) |
||||
|
||||
// cache struct is the main "object" which handles the cache and its items.
|
||||
// It also contains a map with the conversions of interfaces to strings.
|
||||
type cache struct { |
||||
////conversionDictionary map[string]string
|
||||
data map[string]*cacheItem |
||||
fetchers map[string]cacheFetchFunction |
||||
mutex sync.RWMutex |
||||
} |
||||
|
||||
// cacheItem struct referes to the data of each element saved in the cache.
|
||||
type cacheItem struct { |
||||
fetcher cacheFetchFunction |
||||
lastUpdate time.Time |
||||
value interface{} |
||||
} |
||||
|
||||
// CacheManager is the struct defined to group and contain all the methods
|
||||
// that interact with the caching mechanism.
|
||||
type CacheManager struct { |
||||
APIKey string |
||||
duration time.Duration |
||||
metrics *prometheus.GaugeVec |
||||
store cache |
||||
} |
||||
|
||||
// New is the function to create the struct CacheManager.
|
||||
// Parameters:
|
||||
// - t: a time.Duration with the max duration alive of the cache elements.
|
||||
// - k: string containing the APIKey/token in case of need.
|
||||
// Returns:
|
||||
// - CacheManager: struct to interact with CacheManager subsystem functionalities.
|
||||
func New(metrics *prometheus.GaugeVec, t time.Duration, k string) *CacheManager { |
||||
|
||||
l.Trace.Printf("[CACHE] Initializing the cache service.\n") |
||||
|
||||
c := CacheManager{ |
||||
APIKey: k, |
||||
duration: t, |
||||
metrics: metrics, |
||||
store: cache{ |
||||
data: make(map[string]*cacheItem), |
||||
fetchers: make(map[string]cacheFetchFunction), |
||||
}, |
||||
} |
||||
|
||||
return &c |
||||
|
||||
} |
||||
|
||||
// Add function job is to insert a new model in the cache.
|
||||
// What it does is link the model with a fetching function and, if wanted, with
|
||||
// a plain text name, so later in order to retrieve things from the cache they
|
||||
// can be refereced either by the struct model or the plain text name.
|
||||
// Paramenters:
|
||||
// - plainName: a case insensitive name/alias to retrieve the data.
|
||||
// - fetcher: the cacheFetchFunction used to retrieve the data.
|
||||
func (c *CacheManager) Add(plainName string, fetcher cacheFetchFunction) { |
||||
|
||||
l.Trace.Printf("[CACHE] Adding a new object fetcher in the cache.\n") |
||||
|
||||
key := strings.ToUpper(plainName) |
||||
|
||||
c.store.mutex.Lock() |
||||
|
||||
c.store.fetchers[key] = fetcher |
||||
|
||||
c.store.mutex.Unlock() |
||||
|
||||
c.metrics.With(prometheus.Labels{"state": "OK", "resource": "Models in Cache"}).Inc() |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
// fetch function job is to retrieve a new and updated copy of the remote object.
|
||||
// Paramenters:
|
||||
// - item: a string used as key-value in the cache storage to identify the item
|
||||
// that is going to be updated.
|
||||
// - token: Keycloak Bearer token, completely optional.
|
||||
// Returns:
|
||||
// - e: error in case of something went wrong while setting up the new association.
|
||||
func (c *CacheManager) fetch(item string, token string) (e error) { |
||||
|
||||
l.Trace.Printf("[CACHE] Fetching the item [ %v ] from the remote location.\n", item) |
||||
|
||||
c.store.mutex.RLock() |
||||
|
||||
object := c.store.data[item] |
||||
|
||||
c.store.mutex.RUnlock() |
||||
|
||||
id := strings.SplitN(item, "-", 2)[1] |
||||
|
||||
uValue, e := object.fetcher(id, token) |
||||
|
||||
if e == nil { |
||||
|
||||
l.Trace.Printf("[CACHE] Item [ %v ] retrieved from the remote location and saved in the cache.\n", item) |
||||
|
||||
object.value = uValue |
||||
object.lastUpdate = time.Now() |
||||
|
||||
} else { |
||||
|
||||
l.Warning.Printf("[CACHE] Something went wrong while retrieving the item. Error: %v\n", e) |
||||
|
||||
} |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
// fetch function job is to create the new item in the cache and retrieve a new
|
||||
// and updated initial copy of the remote object to be saved in the cache.
|
||||
// Paramenters:
|
||||
// - item: a string used as key-value in the cache storage to identify the item
|
||||
// that is going to be updated.
|
||||
// - token: Keycloak Bearer token, completely optional.
|
||||
// Returns:
|
||||
// - e: error in case of something went wrong while setting up the new association.
|
||||
func (c *CacheManager) init(item string, token string) (e error) { |
||||
|
||||
l.Trace.Printf("[CACHE] Fetching the item [ %v ] from the remote location.\n", item) |
||||
|
||||
key := strings.Split(item, "-")[0] |
||||
id := strings.SplitN(item, "-", 2)[1] |
||||
|
||||
uValue, e := c.store.fetchers[key](id, token) |
||||
|
||||
if e == nil { |
||||
|
||||
l.Trace.Printf("[CACHE] Item [ %v ] retrieved from the remote location and saved in the cache.\n", item) |
||||
|
||||
i := cacheItem{ |
||||
fetcher: c.store.fetchers[key], |
||||
lastUpdate: time.Now(), |
||||
value: uValue, |
||||
} |
||||
|
||||
c.store.mutex.Lock() |
||||
|
||||
c.store.data[item] = &i |
||||
|
||||
c.store.mutex.Unlock() |
||||
|
||||
} else { |
||||
|
||||
l.Warning.Printf("[CACHE] Something went wrong while retrieving the item. Error: %v\n", e) |
||||
|
||||
} |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
// key is a function to ensure that the creation of the the item key for the
|
||||
// cache is consistent across all the functions.
|
||||
// Paramenters:
|
||||
// - id: the reference id of the object to be retrieved
|
||||
// - model: the alias text used to identify the source of objects.
|
||||
// Returns:
|
||||
// - s: the key string
|
||||
func (c *CacheManager) key(id interface{}, model string) (s string) { |
||||
|
||||
s = fmt.Sprintf("%v-%v", strings.ToUpper(model), id) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
// Get function job is to retrieve an object from the cache or fetch it from the
|
||||
// source and upgrade the copy in the cache in case the expiration time has been
|
||||
// exceeded.
|
||||
// Paramenters:
|
||||
// - id: the reference id of the object.
|
||||
// - model: the text alias set to reference the model.
|
||||
// - token: Keycloak Bearer token, completely optional.
|
||||
// Returns:
|
||||
// - The object associated with the request
|
||||
// - An error raised in case something went wrong while retrieving the object.
|
||||
func (c *CacheManager) Get(id interface{}, model string, token string) (interface{}, error) { |
||||
|
||||
l.Trace.Printf("[CACHE] Retrieving object [ %v, %v ] from the cache.\n", id, model) |
||||
|
||||
item := c.key(id, model) |
||||
|
||||
c.store.mutex.RLock() |
||||
|
||||
object, exists := c.store.data[item] |
||||
|
||||
c.store.mutex.RUnlock() |
||||
|
||||
if !exists { |
||||
|
||||
l.Trace.Printf("[CACHE] Object [ %v ] first time requested, including in the cache.\n", item) |
||||
|
||||
if e := c.init(item, token); e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE] Something went wrong while adding the new item [ %v ] to the cache. Error: %v\n", item, e) |
||||
|
||||
c.metrics.With(prometheus.Labels{"state": "FAIL", "resource": "Total objects cached"}).Inc() |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
l.Trace.Printf("[CACHE] Object [ %v ] retrieved from the cache.\n", item) |
||||
|
||||
c.store.mutex.RLock() |
||||
|
||||
o := c.store.data[item].value |
||||
|
||||
c.store.mutex.RUnlock() |
||||
|
||||
c.metrics.With(prometheus.Labels{"state": "OK", "resource": "Total objects cached"}).Inc() |
||||
|
||||
return o, nil |
||||
|
||||
} |
||||
|
||||
l.Trace.Printf("[CACHE] Object [ %v ] exists in the cache.\n", item) |
||||
|
||||
c.store.mutex.RLock() |
||||
|
||||
diff := (time.Now()).Sub(c.store.data[item].lastUpdate) |
||||
|
||||
c.store.mutex.RUnlock() |
||||
|
||||
if diff <= c.duration { |
||||
|
||||
l.Trace.Printf("[CACHE] Object [ %v ] cache hasn't expired yet.\n", item) |
||||
|
||||
c.metrics.With(prometheus.Labels{"state": "OK", "resource": "Total objects retrieved from cache"}).Inc() |
||||
|
||||
return object.value, nil |
||||
|
||||
} |
||||
|
||||
l.Warning.Printf("[CACHE] Object [ %v ] cache has expired. Starting the upgrade.\n", item) |
||||
|
||||
if e := c.fetch(item, token); e != nil { |
||||
|
||||
l.Warning.Printf("[CACHE] Something went wrong while fetching the updated data for the object [ %v ] to the cache. Error: %v\n", item, e) |
||||
|
||||
c.metrics.With(prometheus.Labels{"state": "FAIL", "resource": "Total objects refreshed"}).Inc() |
||||
|
||||
return nil, e |
||||
|
||||
} |
||||
|
||||
l.Trace.Printf("[CACHE] Object [ %v ] updated retrieved from the cache.\n", item) |
||||
|
||||
c.store.mutex.RLock() |
||||
|
||||
o := c.store.data[item].value |
||||
|
||||
c.store.mutex.RUnlock() |
||||
|
||||
c.metrics.With(prometheus.Labels{"state": "OK", "resource": "Total objects refreshed"}).Inc() |
||||
c.metrics.With(prometheus.Labels{"state": "OK", "resource": "Total objects retrieved from cache"}).Inc() |
||||
|
||||
return o, nil |
||||
|
||||
} |
@ -0,0 +1,258 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"strings" |
||||
|
||||
"github.com/segmentio/encoding/json" |
||||
|
||||
"github.com/spf13/viper" |
||||
l "gitlab.com/cyclops-utilities/logging" |
||||
) |
||||
|
||||
// The following structs: apikey, dbConfig, eventsConfig, generalConfig,
|
||||
// kafkaConfig, and keycloakConfig are part of the configuration struct which
|
||||
// acts as the main reference for configuration parameters in the system.
|
||||
type apiKey struct { |
||||
Enabled bool `json:"enabled"` |
||||
Key string |
||||
Place string |
||||
Token string `json:"token"` |
||||
} |
||||
|
||||
type configuration struct { |
||||
APIKey apiKey |
||||
Credit creditConfig |
||||
DB dbConfig |
||||
Events eventsConfig |
||||
General generalConfig |
||||
Kafka kafkaConfig |
||||
Keycloak keycloakConfig `json:"keycloak"` |
||||
OpenStack openStackConfig |
||||
Plans planConfig |
||||
Prometheus prometheusConfig |
||||
} |
||||
|
||||
type creditConfig struct { |
||||
Usage bool |
||||
} |
||||
|
||||
type dbConfig struct { |
||||
CacheRetention string |
||||
DbName string |
||||
Host string |
||||
Password string |
||||
Port int |
||||
SSLMode string |
||||
Username string |
||||
} |
||||
|
||||
type eventsConfig struct { |
||||
Filters []string |
||||
} |
||||
|
||||
type generalConfig struct { |
||||
CertificateFile string `json:"certificate_file"` |
||||
CertificateKey string `json:"certificate_key"` |
||||
CORSEnabled bool |
||||
CORSHeaders []string |
||||
CORSMethods []string |
||||
CORSOrigins []string |
||||
HTTPSEnabled bool |
||||
InsecureSkipVerify bool |
||||
LogFile string |
||||
LogLevel string |
||||
LogToConsole bool |
||||
ServerPort int |
||||
Services map[string]string |
||||
} |
||||
|
||||
type kafkaConfig struct { |
||||
Brokers []string |
||||
MaxBytes int |
||||
MinBytes int |
||||
Offset int64 |
||||
Partition int |
||||
TopicsIn []string |
||||
TopicsOut []string |
||||
TLSEnabled bool |
||||
} |
||||
|
||||
type keycloakConfig struct { |
||||
ClientID string `json:"client_id"` |
||||
ClientSecret string `json:"client_secret"` |
||||
Enabled bool `json:"enabled"` |
||||
Host string `json:"host"` |
||||
Port int `json:"port"` |
||||
Realm string `json:"realm"` |
||||
RedirectURL string `json:"redirect_url"` |
||||
UseHTTP bool `json:"use_http"` |
||||
} |
||||
|
||||
type openStackConfig struct { |
||||
Domain string |
||||
Filters []string |
||||
Keystone string |
||||
Password string |
||||
Project string |
||||
Regions []string |
||||
User string |
||||
} |
||||
|
||||
type planConfig struct { |
||||
Default string |
||||
} |
||||
|
||||
type prometheusConfig struct { |
||||
Host string |
||||
MetricsExport bool |
||||
MetricsPort string |
||||
MetricsRoute string |
||||
} |
||||
|
||||
// dumpConfig 's job is to dumps the configuration in JSON format to the log
|
||||
// system. It makes use of the masking function to keep some secrecy in the log.
|
||||
// Parameters:
|
||||
// - c: configuration type containing the config present in the system.
|
||||
func dumpConfig(c configuration) { |
||||
cfgCopy := c |
||||
|
||||
// deal with configuration params that should be masked
|
||||
cfgCopy.APIKey.Token = masked(c.APIKey.Token, 4) |
||||
cfgCopy.DB.Password = masked(c.DB.Password, 4) |
||||
cfgCopy.Keycloak.ClientSecret = masked(c.Keycloak.ClientSecret, 4) |
||||
cfgCopy.OpenStack.Password = masked(c.OpenStack.Password, 4) |
||||
|
||||
// mmrshalindent creates a string containing newlines; each line starts with
|
||||
// two spaces and two spaces are added for each indent...
|
||||
configJSON, _ := json.MarshalIndent(cfgCopy, " ", " ") |
||||
|
||||
l.Info.Printf("[CONFIG] Configuration settings:\n") |
||||
|
||||
l.Info.Printf("%v\n", string(configJSON)) |
||||
|
||||
} |
||||
|
||||
// masked 's job is to return asterisks in place of the characters in a
|
||||
// string with the exception of the last indicated.
|
||||
// Parameters:
|
||||
// - s: string to be masked
|
||||
// - unmaskedChars: int with the amount (counting from the end of the string) of
|
||||
// characters to keep unmasked.
|
||||
// Returns:
|
||||
// - returnString: the s string passed as parameter masked.
|
||||
func masked(s string, unmaskedChars int) (returnString string) { |
||||
|
||||
if len(s) <= unmaskedChars { |
||||
|
||||
returnString = s |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
asteriskString := strings.Repeat("*", (len(s) - unmaskedChars)) |
||||
|
||||
returnString = asteriskString + string(s[len(s)-unmaskedChars:]) |
||||
|
||||
return |
||||
|
||||
} |
||||
|
||||
// parseConfig handles the filling of the config struct with the data Viper gets
|
||||
// from the configuration file.
|
||||
// Returns:
|
||||
// - c: the configuration struct filled with the relevant parsed configuration.
|
||||
func parseConfig() (c configuration) { |
||||
|
||||
l.Trace.Printf("[CONFIG] Retrieving configuration.\n") |
||||
|
||||
c = configuration{ |
||||
|
||||
APIKey: apiKey{ |
||||
Enabled: viper.GetBool("apikey.enabled"), |
||||
Key: viper.GetString("apikey.key"), |
||||
Place: viper.GetString("apikey.place"), |
||||
Token: viper.GetString("apikey.token"), |
||||
}, |
||||
|
||||
Credit: creditConfig{ |
||||
Usage: viper.GetBool("credit.usageinsteadofcost"), |
||||
}, |
||||
|
||||
DB: dbConfig{ |
||||
CacheRetention: viper.GetString("portaldb.cacheretention"), |
||||
DbName: viper.GetString("portaldb.dbname"), |
||||
Host: viper.GetString("portaldb.host"), |
||||
Password: viper.GetString("portaldb.password"), |
||||
Port: viper.GetInt("portaldb.port"), |
||||
SSLMode: viper.GetString("portaldb.sslmode"), |
||||
Username: viper.GetString("portaldb.username"), |
||||
}, |
||||
|
||||
Events: eventsConfig{ |
||||
Filters: viper.GetStringSlice("events.filters"), |
||||
}, |
||||
|
||||
General: generalConfig{ |
||||
CertificateFile: viper.GetString("general.certificatefile"), |
||||
CertificateKey: viper.GetString("general.certificatekey"), |
||||
CORSEnabled: viper.GetBool("general.corsenabled"), |
||||
CORSHeaders: viper.GetStringSlice("general.corsheaders"), |
||||
CORSMethods: viper.GetStringSlice("general.corsmethods"), |
||||
CORSOrigins: viper.GetStringSlice("general.corsorigins"), |
||||
HTTPSEnabled: viper.GetBool("general.httpsenabled"), |
||||
InsecureSkipVerify: viper.GetBool("general.insecureskipverify"), |
||||
LogFile: viper.GetString("general.logfile"), |
||||
LogLevel: viper.GetString("general.loglevel"), |
||||
LogToConsole: viper.GetBool("general.logtoconsole"), |
||||
ServerPort: viper.GetInt("general.serverport"), |
||||
Services: viper.GetStringMapString("general.services"), |
||||
}, |
||||
|
||||
Kafka: kafkaConfig{ |
||||
Brokers: viper.GetStringSlice("kafka.brokers"), |
||||
MaxBytes: viper.GetInt("kafka.sizemax"), |
||||
MinBytes: viper.GetInt("kafka.sizemin"), |
||||
Offset: viper.GetInt64("kafka.offset"), |
||||
Partition: viper.GetInt("kafka.partition"), |
||||
TopicsIn: viper.GetStringSlice("kafka." + service + "in"), |
||||
TopicsOut: viper.GetStringSlice("kafka." + service + "out"), |
||||
TLSEnabled: viper.GetBool("kafka.tlsenabled"), |
||||
}, |
||||
|
||||
Keycloak: keycloakConfig{ |
||||
ClientID: viper.GetString("keycloak.clientid"), |
||||
ClientSecret: viper.GetString("keycloak.clientsecret"), |
||||
Enabled: viper.GetBool("keycloak.enabled"), |
||||
Host: viper.GetString("keycloak.host"), |
||||
Port: viper.GetInt("keycloak.port"), |
||||
Realm: viper.GetString("keycloak.realm"), |
||||
RedirectURL: viper.GetString("keycloak.redirecturl"), |
||||
UseHTTP: viper.GetBool("keycloak.usehttp"), |
||||
}, |
||||
|
||||
OpenStack: openStackConfig{ |
||||
Domain: viper.GetString("openstack.domain"), |
||||
Filters: viper.GetStringSlice("openstack.filters"), |
||||
Keystone: viper.GetString("openstack.keystone"), |
||||
Password: viper.GetString("openstack.password"), |
||||
Project: viper.GetString("openstack.project"), |
||||
Regions: viper.GetStringSlice("openstack.region"), |
||||
User: viper.GetString("openstack.user"), |
||||
}, |
||||
|
||||
Plans: planConfig{ |
||||
Default: viper.GetString("Plans.default"), |
||||
}, |
||||
|
||||
Prometheus: prometheusConfig{ |
||||
Host: viper.GetString("prometheus.host"), |
||||
MetricsExport: viper.GetBool("prometheus.metricsexport"), |
||||
MetricsPort: viper.GetString("prometheus.metricsport"), |
||||
MetricsRoute: viper.GetString("prometheus.metricsroute"), |
||||
}, |
||||
} |
||||
|
||||
return |
||||
|
||||
} |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue