diff --git a/cmd/woodpecker-autoscaler/main.go b/cmd/woodpecker-autoscaler/main.go index feaa7ca..2675f70 100644 --- a/cmd/woodpecker-autoscaler/main.go +++ b/cmd/woodpecker-autoscaler/main.go @@ -14,6 +14,7 @@ import ( "go.woodpecker-ci.org/autoscaler/config" "go.woodpecker-ci.org/autoscaler/engine" "go.woodpecker-ci.org/autoscaler/providers/hetznercloud" + "go.woodpecker-ci.org/autoscaler/providers/scaleway" "go.woodpecker-ci.org/autoscaler/server" ) @@ -25,6 +26,13 @@ func setupProvider(ctx *cli.Context, config *config.Config) (engine.Provider, er // Enable it again when the issue is fixed. // case "linode": // return linode.New(ctx, config) + case "scaleway": + scwCfg, err := scaleway.FromCLI(ctx) + if err != nil { + return nil, err + } + + return scaleway.New(scwCfg, config) case "": return nil, fmt.Errorf("please select a provider") } @@ -77,6 +85,9 @@ func run(ctx *cli.Context) error { return fmt.Errorf("can't parse reconciliation-interval: %w", err) } + // Run a reconcile loop at start-up to avoid waiting 1m or more + autoscaler.Reconcile(ctx.Context) + for { select { case <-ctx.Done(): @@ -115,6 +126,7 @@ func main() { // Register hetznercloud flags app.Flags = append(app.Flags, hetznercloud.DriverFlags...) + app.Flags = append(app.Flags, scaleway.ProviderFlags...) // Register linode flags // TODO: Temp disabled due to the security issue https://github.com/woodpecker-ci/autoscaler/issues/91 // Enable it again when the issue is fixed. diff --git a/go.mod b/go.mod index 01d00a4..916a6f4 100644 --- a/go.mod +++ b/go.mod @@ -3,11 +3,13 @@ module go.woodpecker-ci.org/autoscaler go 1.21 require ( + github.com/cenkalti/backoff/v4 v4.2.1 github.com/franela/goblin v0.0.0-20211003143422-0a4f594942bf github.com/hetznercloud/hcloud-go/v2 v2.7.0 github.com/joho/godotenv v1.5.1 github.com/linode/linodego v1.32.0 github.com/rs/zerolog v1.32.0 + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 github.com/stretchr/testify v1.9.0 github.com/urfave/cli/v2 v2.27.1 go.woodpecker-ci.org/woodpecker/v2 v2.4.1 @@ -35,5 +37,6 @@ require ( golang.org/x/text v0.14.0 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/ini.v1 v1.66.6 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 636c396..fb10f80 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -7,6 +9,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0q github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/franela/goblin v0.0.0-20211003143422-0a4f594942bf h1:NrF81UtW8gG2LBGkXFQFqlfNnvMt9WdB46sfdJY4oqc= github.com/franela/goblin v0.0.0-20211003143422-0a4f594942bf/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/go-resty/resty/v2 v2.12.0 h1:rsVL8P90LFvkUYq/V5BTVe203WfRIU4gvcf+yfzJzGA= @@ -50,6 +54,8 @@ github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0= github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 h1:/8rfZAdFfafRXOgz+ZpMZZWZ5pYggCY9t7e/BvjaBHM= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho= @@ -122,5 +128,7 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/providers/scaleway/config.go b/providers/scaleway/config.go new file mode 100644 index 0000000..1bc4a52 --- /dev/null +++ b/providers/scaleway/config.go @@ -0,0 +1,91 @@ +package scaleway + +import ( + "errors" + + "github.com/cenkalti/backoff/v4" + "github.com/scaleway/scaleway-sdk-go/api/instance/v1" + "github.com/scaleway/scaleway-sdk-go/scw" +) + +// Config is the Scaleway Provider specific configuration +// +// This is decoupled from the CLI interface for future-proofing reasons. +// Please, see ProviderFlags for information on how to configure the provider from the +// CLI or environment variables. +type Config struct { + // ApiToken of Scaleway IAM + // + // Creating a standalone IAM Applications is recommended to segregate + // permissions. + SecretKey string `json:"secret_key"` + AccessKey string `json:"access_key"` + DefaultProjectID string `json:"default_project_id"` + ClientRetry backoff.BackOff + InstancePool map[string]InstancePool `json:"instance_pool"` +} + +// Locality defines a geographical area +// +// Scaleway Cloud has multiple Region that are made of several Zones. +// Exactly one of Zones or Region SHOULD be set, +// if both are set, use Region and ignore Zones +type Locality struct { + Zones []scw.Zone `json:"zones,omitempty"` + Region *scw.Region `json:"region,omitempty"` +} + +// InstancePool is used as a template to spawn your instances +type InstancePool struct { + // Locality where your instances should live + // The Provider will try to spread your + // instances evenly among Locality.Zones if possible + Locality Locality `json:"locality"` + // ProjectID where resources should be applied + ProjectID *string `json:"project_id,omitempty"` + // Prefix is added before each instance name + Prefix string `json:"prefix"` + // Tags added to the placement group and its instances + Tags []string `json:"tags"` + // DynamicIPRequired: define if a dynamic IPv4 is required for the Instance. + DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"` + // RoutedIPEnabled: if true, configure the Instance, so it uses the new routed IP mode. + RoutedIPEnabled *bool `json:"routed_ip_enabled,omitempty"` + // CommercialType: define the Instance commercial type (i.e. GP1-S). + CommercialType string `json:"commercial_type,omitempty"` + // Image: instance image ID or label. + Image string `json:"image,omitempty"` + // EnableIPv6: true if IPv6 is enabled on the server. + EnableIPv6 bool `json:"enable_ipv6,omitempty"` + // PublicIPs to attach to your instance indexed per instance.IPType + PublicIPs map[instance.IPType]int `json:"public_ips,omitempty"` + // SecurityGroups to use per zone + SecurityGroups map[scw.Zone]string `json:"security_groups,omitempty"` + // Storage of the block storage associated with your Instances + // It should be a multiple of 512 bytes, in future version we could give + // more customisation over the volumes used by the agents + Storage scw.Size `json:"storage"` +} + +func (l Locality) ResolveZones() ([]scw.Zone, error) { + if l.Region != nil { + if !l.Region.Exists() { + return nil, errors.New("you specified an invalid region: " + l.Region.String()) + } + + return l.Region.GetZones(), nil + } + + zones := l.Zones + if len(zones) == 0 { + return nil, errors.New("you need to specify a valid locality") + } + + for _, zone := range zones { + if !zone.Exists() { + return nil, errors.New("you specified a non-existing zone: " + zone.String()) + } + } + + return zones, nil +} diff --git a/providers/scaleway/doc.go b/providers/scaleway/doc.go new file mode 100644 index 0000000..aa702ab --- /dev/null +++ b/providers/scaleway/doc.go @@ -0,0 +1,14 @@ +// Package scaleway implements a way to use the Scaleway Cloud Provider for your +// Woodpecker CIs. +// +// This package contains subpackage per Scaleway Instance API version. +// +// # Limitations +// +// - As of now, we can only deploy instances in single-zones. +// +// Authors: +// - Enzo "raskyld" Nocera [@raskyld@social.vivaldi.net] +// +// [@raskyld@social.vivaldi.net]: https://social.vivaldi.net/@raskyld +package scaleway diff --git a/providers/scaleway/errors.go b/providers/scaleway/errors.go new file mode 100644 index 0000000..5615d8a --- /dev/null +++ b/providers/scaleway/errors.go @@ -0,0 +1,41 @@ +package scaleway + +import ( + "log/slog" + + "github.com/scaleway/scaleway-sdk-go/api/instance/v1" + "github.com/scaleway/scaleway-sdk-go/scw" +) + +type InstanceAlreadyExistsError struct { + inst *instance.Server +} + +type InstanceDoesNotExists struct { + InstanceName string + Project string + Zones []scw.Zone +} + +func (i InstanceAlreadyExistsError) Error() string { + return "instance already exists" +} + +func (i InstanceAlreadyExistsError) LogValue() slog.Value { + return slog.GroupValue(slog.String("err", i.Error()), + slog.Group("instance", slog.String("id", i.inst.ID), slog.String("name", i.inst.Name), + slog.String("zone", i.inst.Zone.String()), slog.String("project", i.inst.Project))) +} + +func (i InstanceDoesNotExists) Error() string { + return "instance does not exist" +} + +func (i InstanceDoesNotExists) LogValue() slog.Value { + zones := make([]string, 0, len(i.Zones)) + for _, zone := range i.Zones { + zones = append(zones, zone.String()) + } + + return slog.GroupValue(slog.String("name", i.InstanceName), slog.String("project", i.Project), slog.Any("zones", zones)) +} diff --git a/providers/scaleway/flags.go b/providers/scaleway/flags.go new file mode 100644 index 0000000..01e2c50 --- /dev/null +++ b/providers/scaleway/flags.go @@ -0,0 +1,147 @@ +package scaleway + +import ( + "errors" + "os" + + "github.com/scaleway/scaleway-sdk-go/scw" + "github.com/urfave/cli/v2" +) + +const ( + DefaultPool = "default" + DefaultAgentStorageGB = 25 + + category = "Scaleway" + flagPrefix = "scw" + envPrefix = "WOODPECKER_SCW" +) + +var ProviderFlags = []cli.Flag{ + &cli.StringFlag{ + Name: flagPrefix + "-access-key", + Usage: "Scaleway IAM API Token Access Key", + EnvVars: []string{envPrefix + "_ACCESS_KEY"}, + // NB(raskyld): We should recommend the usage of file-system to users + // Most container runtimes support mounting secrets into the fs + // natively. + FilePath: os.Getenv(envPrefix + "_ACCESS_KEY_FILE"), + Category: category, + }, + &cli.StringFlag{ + Name: flagPrefix + "-secret-key", + Usage: "Scaleway IAM API Token Secret Key", + EnvVars: []string{envPrefix + "_SECRET_KEY"}, + // NB(raskyld): We should recommend the usage of file-system to users + // Most container runtimes support mounting secrets into the fs + // natively. + FilePath: os.Getenv(envPrefix + "_SECRET_KEY_FILE"), + Category: category, + }, + // TODO(raskyld): implement multi-AZ + &cli.StringFlag{ + Name: flagPrefix + "-zone", + Usage: "Scaleway Zone where to spawn instances", + EnvVars: []string{envPrefix + "_ZONE"}, + Category: category, + Value: scw.ZoneFrPar2.String(), + }, + &cli.StringFlag{ + Name: flagPrefix + "-instance-type", + Usage: "Scaleway Instance type to spawn", + EnvVars: []string{envPrefix + "_INSTANCE_TYPE"}, + Category: category, + }, + &cli.StringSliceFlag{ + Name: flagPrefix + "-tags", + Usage: "Comma separated list of tags to uniquely identify the instances spawned", + EnvVars: []string{envPrefix + "_TAGS"}, + Category: category, + }, + &cli.StringFlag{ + Name: flagPrefix + "-project", + Usage: "Scaleway Project ID in which to spawn the instances", + EnvVars: []string{envPrefix + "_PROJECT"}, + Category: category, + }, + &cli.StringFlag{ + Name: flagPrefix + "-prefix", + Usage: "Prefix prepended before any Scaleway resource name", + EnvVars: []string{envPrefix + "_PREFIX"}, + Category: category, + Value: "wip-woodpecker-ci-autoscaler", + }, + &cli.BoolFlag{ + Name: flagPrefix + "-enable-ipv6", + Usage: "Enable IPv6 for the instances", + EnvVars: []string{envPrefix + "_ENABLE_IPV6"}, + Category: category, + }, + &cli.StringFlag{ + Name: flagPrefix + "-image", + Usage: "The base image for your instance", + EnvVars: []string{envPrefix + "_IMAGE"}, + Category: category, + Value: "ubuntu_jammy", + }, + &cli.Uint64Flag{ + Name: flagPrefix + "-storage-size", + Usage: "How much storage to provision for your agents in GB", + EnvVars: []string{envPrefix + "_STORAGE_SIZE"}, + Category: category, + Value: DefaultAgentStorageGB, + }, +} + +func FromCLI(c *cli.Context) (Config, error) { + if !c.IsSet(flagPrefix + "-instance-type") { + return Config{}, errors.New("you must specify an instance type") + } + + if !c.IsSet(flagPrefix + "-tags") { + return Config{}, errors.New("you must specify tags to apply to your resources") + } + + if !c.IsSet(flagPrefix + "-project") { + return Config{}, errors.New("you must specify in which project resources should be spawned") + } + + if !c.IsSet(flagPrefix + "-secret-key") { + return Config{}, errors.New("you must specify a secret key") + } + + if !c.IsSet(flagPrefix + "-access-key") { + return Config{}, errors.New("you must specify an access key") + } + + zone := scw.Zone(c.String(flagPrefix + "-zone")) + if !zone.Exists() { + return Config{}, errors.New(zone.String() + " is not a valid zone") + } + + cfg := Config{ + SecretKey: c.String(flagPrefix + "-secret-key"), + AccessKey: c.String(flagPrefix + "-access-key"), + DefaultProjectID: c.String(flagPrefix + "-project"), + } + + cfg.InstancePool = map[string]InstancePool{ + DefaultPool: { + Locality: Locality{ + Zones: []scw.Zone{zone}, + }, + ProjectID: scw.StringPtr(c.String(flagPrefix + "-project")), + Prefix: c.String(flagPrefix + "-prefix"), + Tags: c.StringSlice(flagPrefix + "-tags"), + // We do not need stables IP for our JIT runners + DynamicIPRequired: scw.BoolPtr(true), + CommercialType: c.String(flagPrefix + "-instance-type"), + Image: c.String(flagPrefix + "-image"), + EnableIPv6: c.Bool(flagPrefix + "-enable-ipv6"), + //nolint:gomnd + Storage: scw.Size(c.Uint64(flagPrefix+"-storage-size") * 1e9), + }, + } + + return cfg, nil +} diff --git a/providers/scaleway/provider.go b/providers/scaleway/provider.go new file mode 100644 index 0000000..3263baf --- /dev/null +++ b/providers/scaleway/provider.go @@ -0,0 +1,256 @@ +package scaleway + +import ( + "bytes" + "context" + "errors" + "math/rand" + "text/template" + "time" + + "github.com/scaleway/scaleway-sdk-go/api/instance/v1" + "github.com/scaleway/scaleway-sdk-go/scw" + + "go.woodpecker-ci.org/autoscaler/config" + "go.woodpecker-ci.org/autoscaler/engine" + "go.woodpecker-ci.org/woodpecker/v2/woodpecker-go/woodpecker" +) + +type Provider struct { + scwCfg Config + engineCfg *config.Config + client *scw.Client +} + +func New(scwCfg Config, engineCfg *config.Config) (engine.Provider, error) { + client, err := scw.NewClient(scw.WithDefaultProjectID(scwCfg.DefaultProjectID), scw.WithAuth(scwCfg.AccessKey, scwCfg.SecretKey)) + if err != nil { + return nil, err + } + + return &Provider{ + scwCfg: scwCfg, + client: client, + engineCfg: engineCfg, + }, nil +} + +func (p *Provider) DeployAgent(ctx context.Context, agent *woodpecker.Agent) error { + _, err := p.getInstance(ctx, agent.Name) + if err != nil { + var doesNotExists *InstanceDoesNotExists + if !errors.As(err, &doesNotExists) { + return err + } + } + + inst, err := p.createInstance(ctx, agent) + if err != nil { + return err + } + + err = p.setCloudInit(ctx, agent, inst) + if err != nil { + return err + } + + // NB(raskyld): use the value for logging purpose once we implement slog + _, err = p.bootInstance(ctx, inst) + return err +} + +func (p *Provider) RemoveAgent(ctx context.Context, agent *woodpecker.Agent) error { + inst, err := p.getInstance(ctx, agent.Name) + if err != nil { + return err + } + + return p.deleteInstance(ctx, inst) +} + +func (p *Provider) ListDeployedAgentNames(ctx context.Context) ([]string, error) { + instances, err := p.getAllInstances(ctx) + if err != nil { + return nil, err + } + + names := make([]string, 0, len(instances)) + for _, inst := range instances { + names = append(names, inst.Name) + } + + return names, nil +} + +func (p *Provider) getInstance(ctx context.Context, name string) (*instance.Server, error) { + pool := p.scwCfg.InstancePool[DefaultPool] + zones, err := pool.Locality.ResolveZones() + if err != nil { + return nil, err + } + + api := instance.NewAPI(p.client) + project := pool.ProjectID + + if project == nil { + project = &p.scwCfg.DefaultProjectID + } + + for _, zone := range zones { + req := instance.ListServersRequest{ + Zone: zone, + Project: project, + Name: scw.StringPtr(name), + Tags: pool.Tags, + } + + resp, err := api.ListServers(&req, scw.WithContext(ctx)) + if err != nil { + return nil, err + } + + if resp.TotalCount > 0 { + // TODO(raskyld): add a warning if there are more than 1 found, it means there are orphan resources + return resp.Servers[0], nil + } + } + + return nil, &InstanceDoesNotExists{ + InstanceName: name, + Project: *project, + Zones: zones, + } +} + +func (p *Provider) getAllInstances(ctx context.Context) ([]*instance.Server, error) { + pool := p.scwCfg.InstancePool[DefaultPool] + zones, err := pool.Locality.ResolveZones() + if err != nil { + return nil, err + } + + api := instance.NewAPI(p.client) + instances := make([]*instance.Server, 0, 150) + + for _, zone := range zones { + // TODO(raskyld): handle pagination for cases with more than 50 agents running per region + req := instance.ListServersRequest{ + Zone: zone, + Project: pool.ProjectID, + Tags: pool.Tags, + } + + resp, err := api.ListServers(&req, scw.WithContext(ctx)) + if err != nil { + return nil, err + } + + if resp.TotalCount > 0 { + instances = append(instances, resp.Servers...) + } + } + + return instances, nil +} + +func (p *Provider) createInstance(ctx context.Context, agent *woodpecker.Agent) (*instance.Server, error) { + pool := p.scwCfg.InstancePool[DefaultPool] + zones, err := pool.Locality.ResolveZones() + if err != nil { + return nil, err + } + + // TODO(raskyld): Implement a well-balanced zone anti-affinity to spread instance + // evenly among zones for greater resilience. + random := rand.New(rand.NewSource(time.Now().Unix())) + zone := zones[random.Intn(len(zones))] + + api := instance.NewAPI(p.client) + + req := instance.CreateServerRequest{ + Zone: zone, + Name: agent.Name, + DynamicIPRequired: scw.BoolPtr(true), + CommercialType: pool.CommercialType, + Image: pool.Image, + Volumes: map[string]*instance.VolumeServerTemplate{ + "0": { + Boot: scw.BoolPtr(true), + Size: scw.SizePtr(pool.Storage), + VolumeType: instance.VolumeVolumeTypeBSSD, + }, + }, + EnableIPv6: pool.EnableIPv6, + Project: pool.ProjectID, + Tags: pool.Tags, + } + + res, err := api.CreateServer(&req, scw.WithContext(ctx)) + if err != nil { + return nil, err + } + + return res.Server, nil +} + +func (p *Provider) setCloudInit(ctx context.Context, agent *woodpecker.Agent, inst *instance.Server) error { + tpl, err := template.New("user-data").Parse(engine.CloudInitUserDataUbuntuDefault) + if err != nil { + return err + } + + ud, err := engine.RenderUserDataTemplate(p.engineCfg, agent, tpl) + if err != nil { + return err + } + + api := instance.NewAPI(p.client) + + req := instance.SetServerUserDataRequest{ + Zone: inst.Zone, + ServerID: inst.ID, + Key: "cloud-init", + Content: bytes.NewBufferString(ud), + } + + err = api.SetServerUserData(&req, scw.WithContext(ctx)) + if err != nil { + return err + } + + return nil +} + +func (p *Provider) deleteInstance(ctx context.Context, inst *instance.Server) error { + err := p.haltInstance(ctx, inst) + if err != nil { + return err + } + + api := instance.NewAPI(p.client) + + return api.DeleteServer(&instance.DeleteServerRequest{ + Zone: inst.Zone, + ServerID: inst.ID, + }, scw.WithContext(ctx)) +} + +func (p *Provider) bootInstance(ctx context.Context, inst *instance.Server) (*instance.ServerActionResponse, error) { + api := instance.NewAPI(p.client) + + return api.ServerAction(&instance.ServerActionRequest{ + Zone: inst.Zone, + ServerID: inst.ID, + Action: instance.ServerActionPoweron, + }, scw.WithContext(ctx)) +} + +func (p *Provider) haltInstance(ctx context.Context, inst *instance.Server) error { + api := instance.NewAPI(p.client) + + return api.ServerActionAndWait(&instance.ServerActionAndWaitRequest{ + Zone: inst.Zone, + ServerID: inst.ID, + Action: instance.ServerActionPoweroff, + }, scw.WithContext(ctx)) +}