From ef873e188c16e15fc54af57ff75ed48b7f7414ec Mon Sep 17 00:00:00 2001
From: sp-yduck <sudo20.t.ab@gmail.com>
Date: Fri, 3 Nov 2023 19:04:10 +0900
Subject: [PATCH] support resource pool

---
 api/v1beta1/proxmoxcluster_types.go           |  3 +
 api/v1beta1/proxmoxmachine_types.go           |  4 +
 cloud/interfaces.go                           |  2 +
 cloud/scope/cluster.go                        | 21 ++--
 cloud/scope/machine.go                        | 97 ++++++++++---------
 cloud/services/compute/instance/pool.go       | 12 +++
 cloud/services/compute/instance/reconcile.go  |  8 +-
 .../compute/resourcepool/reconcile.go         | 84 ++++++++++++++++
 .../compute/resourcepool/reconcile_test.go    | 61 ++++++++++++
 .../services/compute/resourcepool/service.go  | 23 +++++
 .../compute/resourcepool/suite_test.go        | 45 +++++++++
 cloud/services/compute/storage/reconcile.go   |  2 +-
 .../compute/storage/reconcile_test.go         |  7 ++
 ...ture.cluster.x-k8s.io_proxmoxclusters.yaml |  3 +
 ...ture.cluster.x-k8s.io_proxmoxmachines.yaml |  4 +
 ...ster.x-k8s.io_proxmoxmachinetemplates.yaml |  4 +
 controllers/proxmoxcluster_controller.go      |  3 +
 go.mod                                        |  2 +-
 go.sum                                        |  4 +-
 internal/fake/cluster_scope.go                |  8 ++
 20 files changed, 340 insertions(+), 57 deletions(-)
 create mode 100644 cloud/services/compute/instance/pool.go
 create mode 100644 cloud/services/compute/resourcepool/reconcile.go
 create mode 100644 cloud/services/compute/resourcepool/reconcile_test.go
 create mode 100644 cloud/services/compute/resourcepool/service.go
 create mode 100644 cloud/services/compute/resourcepool/suite_test.go

diff --git a/api/v1beta1/proxmoxcluster_types.go b/api/v1beta1/proxmoxcluster_types.go
index 8d53e39..ca4cfb0 100644
--- a/api/v1beta1/proxmoxcluster_types.go
+++ b/api/v1beta1/proxmoxcluster_types.go
@@ -37,6 +37,9 @@ type ProxmoxClusterSpec struct {
 
 	// storage is used for storing cloud init snippet
 	Storage Storage `json:"storage,omitempty"`
+
+	// resource pool to be used for cappx managed vms by default
+	ResourcePool string `json:"resourcePool,omitempty"`
 }
 
 // ProxmoxClusterStatus defines the observed state of ProxmoxCluster
diff --git a/api/v1beta1/proxmoxmachine_types.go b/api/v1beta1/proxmoxmachine_types.go
index c83693d..6cf9db7 100644
--- a/api/v1beta1/proxmoxmachine_types.go
+++ b/api/v1beta1/proxmoxmachine_types.go
@@ -41,6 +41,10 @@ type ProxmoxMachineSpec struct {
 	// cappx will use random storage if empty
 	Storage string `json:"storage,omitempty"`
 
+	// Resource Pool that vm will be added to.
+	// Use ProxmoxCluster.spec.resroucePool if empty
+	ResourcePool string `json:"resourcePool,omitempty"`
+
 	// +kubebuilder:validation:Minimum:=0
 	// VMID is proxmox qemu's id
 	VMID *int `json:"vmID,omitempty"`
diff --git a/cloud/interfaces.go b/cloud/interfaces.go
index d6548e7..eba0745 100644
--- a/cloud/interfaces.go
+++ b/cloud/interfaces.go
@@ -36,6 +36,7 @@ type ClusterGetter interface {
 	// FailureDomains() clusterv1.FailureDomains
 	ControlPlaneEndpoint() clusterv1.APIEndpoint
 	Storage() infrav1.Storage
+	ResourcePool() string
 }
 
 type ClusterSettter interface {
@@ -62,6 +63,7 @@ type MachineGetter interface {
 	GetInstanceStatus() *infrav1.InstanceStatus
 	GetClusterStorage() infrav1.Storage
 	GetStorage() string
+	GetResourcePool() string
 	GetCloudInit() infrav1.CloudInit
 	GetNetwork() infrav1.Network
 	GetHardware() infrav1.Hardware
diff --git a/cloud/scope/cluster.go b/cloud/scope/cluster.go
index b4ab8b6..1653f08 100644
--- a/cloud/scope/cluster.go
+++ b/cloud/scope/cluster.go
@@ -90,6 +90,10 @@ func (s *ClusterScope) Namespace() string {
 	return s.Cluster.Namespace
 }
 
+func (s *ClusterScope) CloudClient() *proxmox.Service {
+	return s.ProxmoxServices.Compute
+}
+
 func (s *ClusterScope) ControlPlaneEndpoint() clusterv1.APIEndpoint {
 	return s.ProxmoxCluster.Spec.ControlPlaneEndpoint
 }
@@ -105,12 +109,13 @@ func (s *ClusterScope) Storage() infrav1.Storage {
 	return s.ProxmoxCluster.Spec.Storage
 }
 
-func (s *ClusterScope) CloudClient() *proxmox.Service {
-	return s.ProxmoxServices.Compute
-}
-
-func (s *ClusterScope) Close() error {
-	return s.PatchObject()
+// retrun default values if it's not specified
+func (s *ClusterScope) ResourcePool() string {
+	if s.ProxmoxCluster.Spec.ResourcePool == "" {
+		// use cluster name as default value
+		s.ProxmoxCluster.Spec.ResourcePool = s.Name()
+	}
+	return s.ProxmoxCluster.Spec.ResourcePool
 }
 
 func (s *ClusterScope) SetReady() {
@@ -125,6 +130,10 @@ func (s *ClusterScope) SetStorage(storage infrav1.Storage) {
 	s.ProxmoxCluster.Spec.Storage = storage
 }
 
+func (s *ClusterScope) Close() error {
+	return s.PatchObject()
+}
+
 // PatchObject persists the cluster configuration and status.
 func (s *ClusterScope) PatchObject() error {
 	return s.patchHelper.Patch(context.TODO(), s.ProxmoxCluster)
diff --git a/cloud/scope/machine.go b/cloud/scope/machine.go
index 647fc39..0f78c05 100644
--- a/cloud/scope/machine.go
+++ b/cloud/scope/machine.go
@@ -86,6 +86,18 @@ type MachineScope struct {
 	SchedulerManager *scheduler.Manager
 }
 
+func (m *MachineScope) Name() string {
+	return m.ProxmoxMachine.Name
+}
+
+func (m *MachineScope) Namespace() string {
+	return m.ProxmoxMachine.Namespace
+}
+
+func (m *MachineScope) Annotations() map[string]string {
+	return m.ProxmoxMachine.Annotations
+}
+
 func (m *MachineScope) CloudClient() *proxmox.Service {
 	return m.ClusterGetter.CloudClient()
 }
@@ -96,41 +108,50 @@ func (m *MachineScope) GetScheduler(client *proxmox.Service) *scheduler.Schedule
 	return sched
 }
 
+// return ProxmoxCluster.spec.storage
 func (m *MachineScope) GetClusterStorage() infrav1.Storage {
 	return m.ClusterGetter.Storage()
 }
 
-func (m *MachineScope) GetStorage() string {
-	return m.ProxmoxMachine.Spec.Storage
+func (m *MachineScope) GetVMID() *int {
+	return m.ProxmoxMachine.Spec.VMID
 }
 
-func (m *MachineScope) Name() string {
-	return m.ProxmoxMachine.Name
+func (m *MachineScope) NodeName() string {
+	return m.ProxmoxMachine.Spec.Node
 }
 
-func (m *MachineScope) Namespace() string {
-	return m.ProxmoxMachine.Namespace
+func (m *MachineScope) GetImage() infrav1.Image {
+	return m.ProxmoxMachine.Spec.Image
 }
 
-func (m *MachineScope) Annotations() map[string]string {
-	return m.ProxmoxMachine.Annotations
+func (m *MachineScope) GetStorage() string {
+	return m.ProxmoxMachine.Spec.Storage
 }
 
-func (m *MachineScope) NodeName() string {
-	return m.ProxmoxMachine.Spec.Node
+// return ProxmoxCluster.spec.resourcePool if empty
+func (m *MachineScope) GetResourcePool() string {
+	if m.ProxmoxMachine.Spec.ResourcePool == "" {
+		m.ProxmoxMachine.Spec.ResourcePool = m.ClusterGetter.ResourcePool()
+	}
+	return m.ProxmoxMachine.Spec.ResourcePool
 }
 
-func (m *MachineScope) SetNodeName(name string) {
-	m.ProxmoxMachine.Spec.Node = name
+func (m *MachineScope) GetCloudInit() infrav1.CloudInit {
+	return m.ProxmoxMachine.Spec.CloudInit
 }
 
-func (m *MachineScope) SetStorage(name string) {
-	m.ProxmoxMachine.Spec.Storage = name
+func (m *MachineScope) GetNetwork() infrav1.Network {
+	return m.ProxmoxMachine.Spec.Network
 }
 
-// func (m *MachineScope) Client() Compute {
-// 	return m.ClusterGetter.Client()
-// }
+func (m *MachineScope) GetHardware() infrav1.Hardware {
+	return m.ProxmoxMachine.Spec.Hardware
+}
+
+func (m *MachineScope) GetOptions() infrav1.Options {
+	return m.ProxmoxMachine.Spec.Options
+}
 
 func (m *MachineScope) GetBootstrapData() (string, error) {
 	if m.Machine.Spec.Bootstrap.DataSecretName == nil {
@@ -151,19 +172,10 @@ func (m *MachineScope) GetBootstrapData() (string, error) {
 	return string(value), nil
 }
 
-func (m *MachineScope) Close() error {
-	return m.PatchObject()
-}
-
 func (m *MachineScope) GetInstanceStatus() *infrav1.InstanceStatus {
 	return m.ProxmoxMachine.Status.InstanceStatus
 }
 
-// SetInstanceStatus sets the ProxmoxMachine instance status.
-func (m *MachineScope) SetInstanceStatus(v infrav1.InstanceStatus) {
-	m.ProxmoxMachine.Status.InstanceStatus = &v
-}
-
 func (m *MachineScope) GetBiosUUID() *string {
 	parsed, err := noderefutil.NewProviderID(m.GetProviderID()) //nolint: staticcheck
 	if err != nil {
@@ -179,28 +191,12 @@ func (m *MachineScope) GetProviderID() string {
 	return ""
 }
 
-func (m *MachineScope) GetVMID() *int {
-	return m.ProxmoxMachine.Spec.VMID
-}
-
-func (m *MachineScope) GetImage() infrav1.Image {
-	return m.ProxmoxMachine.Spec.Image
-}
-
-func (m *MachineScope) GetCloudInit() infrav1.CloudInit {
-	return m.ProxmoxMachine.Spec.CloudInit
-}
-
-func (m *MachineScope) GetNetwork() infrav1.Network {
-	return m.ProxmoxMachine.Spec.Network
-}
-
-func (m *MachineScope) GetHardware() infrav1.Hardware {
-	return m.ProxmoxMachine.Spec.Hardware
+func (m *MachineScope) SetNodeName(name string) {
+	m.ProxmoxMachine.Spec.Node = name
 }
 
-func (m *MachineScope) GetOptions() infrav1.Options {
-	return m.ProxmoxMachine.Spec.Options
+func (m *MachineScope) SetStorage(name string) {
+	m.ProxmoxMachine.Spec.Storage = name
 }
 
 // SetProviderID sets the ProxmoxMachine providerID in spec.
@@ -221,6 +217,11 @@ func (m *MachineScope) SetConfigStatus(config api.VirtualMachineConfig) {
 	m.ProxmoxMachine.Status.Config = config
 }
 
+// SetInstanceStatus sets the ProxmoxMachine instance status.
+func (m *MachineScope) SetInstanceStatus(v infrav1.InstanceStatus) {
+	m.ProxmoxMachine.Status.InstanceStatus = &v
+}
+
 func (m *MachineScope) SetReady() {
 	m.ProxmoxMachine.Status.Ready = true
 }
@@ -233,6 +234,10 @@ func (m *MachineScope) SetFailureReason(v capierrors.MachineStatusError) {
 	m.ProxmoxMachine.Status.FailureReason = &v
 }
 
+func (m *MachineScope) Close() error {
+	return m.PatchObject()
+}
+
 // PatchObject persists the cluster configuration and status.
 func (s *MachineScope) PatchObject() error {
 	return s.patchHelper.Patch(context.TODO(), s.ProxmoxMachine)
diff --git a/cloud/services/compute/instance/pool.go b/cloud/services/compute/instance/pool.go
new file mode 100644
index 0000000..fd879f2
--- /dev/null
+++ b/cloud/services/compute/instance/pool.go
@@ -0,0 +1,12 @@
+package instance
+
+import "context"
+
+// add vm to resource pool
+func (s *Service) reconcileResourcePool(ctx context.Context) error {
+	pool, err := s.client.Pool(ctx, s.scope.GetResourcePool())
+	if err != nil {
+		return err
+	}
+	return pool.AddVMs(ctx, []int{*s.scope.GetVMID()})
+}
diff --git a/cloud/services/compute/instance/reconcile.go b/cloud/services/compute/instance/reconcile.go
index 5fd5d25..26a3b22 100644
--- a/cloud/services/compute/instance/reconcile.go
+++ b/cloud/services/compute/instance/reconcile.go
@@ -33,7 +33,6 @@ func (s *Service) Reconcile(ctx context.Context) error {
 		return err
 	}
 
-	log.Info(fmt.Sprintf("Reconciled instance: bios-uuid=%s", *uuid))
 	if err := s.scope.SetProviderID(*uuid); err != nil {
 		return err
 	}
@@ -46,6 +45,13 @@ func (s *Service) Reconcile(ctx context.Context) error {
 		return err
 	}
 	s.scope.SetConfigStatus(*config)
+
+	// add vm to resource pool
+	if err := s.reconcileResourcePool(ctx); err != nil {
+		return err
+	}
+
+	log.Info("Reconciled instance")
 	return nil
 }
 
diff --git a/cloud/services/compute/resourcepool/reconcile.go b/cloud/services/compute/resourcepool/reconcile.go
new file mode 100644
index 0000000..c7bf77e
--- /dev/null
+++ b/cloud/services/compute/resourcepool/reconcile.go
@@ -0,0 +1,84 @@
+package resourcepool
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/sp-yduck/proxmox-go/api"
+	"github.com/sp-yduck/proxmox-go/proxmox"
+	"github.com/sp-yduck/proxmox-go/rest"
+	"sigs.k8s.io/controller-runtime/pkg/log"
+)
+
+const (
+	DefaultBasePath = "/var/lib/vz"
+)
+
+func (s *Service) Reconcile(ctx context.Context) error {
+	log := log.FromContext(ctx)
+	log.Info("Reconciling resource pool")
+
+	pool, err := s.createOrGetResourcePool(ctx)
+	if err != nil {
+		return err
+	}
+
+	if err := pool.AddStorages(ctx, []string{s.scope.Storage().Name}); err != nil {
+		log.Error(err, "failed to add sotrage to pool")
+		return err
+	}
+
+	log.Info("Reconciled resource pool")
+	return nil
+}
+
+func (s *Service) Delete(ctx context.Context) error {
+	log := log.FromContext(ctx)
+	log.Info("Deleteing resource pool")
+	return s.deleteResourcePool(ctx)
+}
+
+func (s *Service) createOrGetResourcePool(ctx context.Context) (*proxmox.Pool, error) {
+	log := log.FromContext(ctx)
+
+	pool, err := s.client.Pool(ctx, s.scope.ResourcePool())
+	if err != nil {
+		if rest.IsNotFound(err) {
+			log.Info("resource pool not found. it will be created")
+			return s.createResourcePool(ctx)
+		}
+		log.Error(err, "failed to get resource pool")
+		return nil, err
+	}
+	return pool, nil
+}
+
+func (s *Service) createResourcePool(ctx context.Context) (*proxmox.Pool, error) {
+	pool := api.ResourcePool{
+		PoolID:  s.scope.ResourcePool(),
+		Comment: fmt.Sprintf("Default Resource Pool used for %s cluster", s.scope.Name()),
+	}
+	return s.client.CreatePool(ctx, pool)
+}
+
+func (s *Service) deleteResourcePool(ctx context.Context) error {
+	log := log.FromContext(ctx)
+	poolid := s.scope.ResourcePool()
+	pool, err := s.client.Pool(ctx, poolid)
+	if err != nil {
+		if rest.IsNotFound(err) {
+			log.Info("resource pool not found or already deleted")
+			return nil
+		}
+		return err
+	}
+	members, err := pool.GetMembers(ctx)
+	if err != nil {
+		return err
+	}
+	if len(members) != 0 {
+		log.Info("resource pool not empty, skipping deletion")
+		return nil
+	}
+	return s.client.DeletePool(ctx, poolid)
+}
diff --git a/cloud/services/compute/resourcepool/reconcile_test.go b/cloud/services/compute/resourcepool/reconcile_test.go
new file mode 100644
index 0000000..78c29f7
--- /dev/null
+++ b/cloud/services/compute/resourcepool/reconcile_test.go
@@ -0,0 +1,61 @@
+package resourcepool_test
+
+import (
+	"context"
+
+	. "github.com/onsi/ginkgo/v2"
+	. "github.com/onsi/gomega"
+
+	"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/services/compute/resourcepool"
+	"github.com/sp-yduck/cluster-api-provider-proxmox/internal/fake"
+)
+
+var _ = Describe("Delete", Label("integration", "resourcepool"), func() {
+	var service *resourcepool.Service
+
+	BeforeEach(func() {
+		scope := fake.NewClusterScope(proxmoxSvc)
+		service = resourcepool.NewService(scope)
+		err := service.Reconcile(context.Background())
+		Expect(err).NotTo(HaveOccurred())
+	})
+	AfterEach(func() {})
+
+	Context("Reconcile on Delete resource pool", func() {
+		It("should not error", func() {
+
+			// there should be pool
+			err := service.Delete(context.Background())
+			Expect(err).NotTo(HaveOccurred())
+
+			// there should be no pool already
+			err = service.Delete(context.Background())
+			Expect(err).NotTo(HaveOccurred())
+		})
+	})
+})
+
+var _ = Describe("Reconcile", Label("integration", "resourcepool"), func() {
+	var service *resourcepool.Service
+
+	BeforeEach(func() {
+		scope := fake.NewClusterScope(proxmoxSvc)
+		service = resourcepool.NewService(scope)
+	})
+	AfterEach(func() {
+		err := service.Delete(context.Background())
+		Expect(err).NotTo(HaveOccurred())
+	})
+
+	Context("Reconcile resourcepool", func() {
+		It("shold not error", func() {
+			// there should be no resourcepool yet
+			err := service.Reconcile(context.Background())
+			Expect(err).NotTo(HaveOccurred())
+
+			// there should be resourcepool already
+			err = service.Reconcile(context.Background())
+			Expect(err).NotTo(HaveOccurred())
+		})
+	})
+})
diff --git a/cloud/services/compute/resourcepool/service.go b/cloud/services/compute/resourcepool/service.go
new file mode 100644
index 0000000..758ad52
--- /dev/null
+++ b/cloud/services/compute/resourcepool/service.go
@@ -0,0 +1,23 @@
+package resourcepool
+
+import (
+	"github.com/sp-yduck/proxmox-go/proxmox"
+
+	"github.com/sp-yduck/cluster-api-provider-proxmox/cloud"
+)
+
+type Scope interface {
+	cloud.Cluster
+}
+
+type Service struct {
+	scope  Scope
+	client proxmox.Service
+}
+
+func NewService(s Scope) *Service {
+	return &Service{
+		scope:  s,
+		client: *s.CloudClient(),
+	}
+}
diff --git a/cloud/services/compute/resourcepool/suite_test.go b/cloud/services/compute/resourcepool/suite_test.go
new file mode 100644
index 0000000..795416f
--- /dev/null
+++ b/cloud/services/compute/resourcepool/suite_test.go
@@ -0,0 +1,45 @@
+package resourcepool_test
+
+import (
+	"os"
+	"testing"
+
+	. "github.com/onsi/ginkgo/v2"
+	. "github.com/onsi/gomega"
+	"github.com/sp-yduck/proxmox-go/proxmox"
+	logf "sigs.k8s.io/controller-runtime/pkg/log"
+	"sigs.k8s.io/controller-runtime/pkg/log/zap"
+)
+
+var (
+	proxmoxSvc *proxmox.Service
+)
+
+func TestResourcePool(t *testing.T) {
+	RegisterFailHandler(Fail)
+	RunSpecs(t, "ResourcePool Suite")
+}
+
+var _ = BeforeSuite(func() {
+	logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
+
+	if GinkgoLabelFilter() != "unit" {
+		By("setup proxmox client to do integration test")
+		url := os.Getenv("PROXMOX_URL")
+		user := os.Getenv("PROXMOX_USER")
+		password := os.Getenv("PROXMOX_PASSWORD")
+		tokenid := os.Getenv("PROXMOX_TOKENID")
+		secret := os.Getenv("PROXMOX_SECRET")
+
+		authConfig := proxmox.AuthConfig{
+			Username: user,
+			Password: password,
+			TokenID:  tokenid,
+			Secret:   secret,
+		}
+		param := proxmox.NewParams(url, authConfig, proxmox.ClientConfig{InsecureSkipVerify: true})
+		var err error
+		proxmoxSvc, err = proxmox.GetOrCreateService(param)
+		Expect(err).NotTo(HaveOccurred())
+	}
+})
diff --git a/cloud/services/compute/storage/reconcile.go b/cloud/services/compute/storage/reconcile.go
index 2364f4d..93e7960 100644
--- a/cloud/services/compute/storage/reconcile.go
+++ b/cloud/services/compute/storage/reconcile.go
@@ -39,7 +39,7 @@ func (s *Service) createOrGetStorage(ctx context.Context) error {
 	opts := generateVMStorageOptions(s.scope)
 	if err := s.getStorage(ctx, opts.Storage); err != nil {
 		if rest.IsNotFound(err) {
-			log.Info("storage %s not found. it will be created")
+			log.Info("storage not found. it will be created")
 			return s.createStorage(ctx, opts)
 		}
 		return err
diff --git a/cloud/services/compute/storage/reconcile_test.go b/cloud/services/compute/storage/reconcile_test.go
index f309df6..bceda52 100644
--- a/cloud/services/compute/storage/reconcile_test.go
+++ b/cloud/services/compute/storage/reconcile_test.go
@@ -18,13 +18,20 @@ var _ = Describe("Delete", Label("integration", "storage"), func() {
 	BeforeEach(func() {
 		scope := fake.NewClusterScope(proxmoxSvc)
 		service = storage.NewService(scope)
+		err := service.Reconcile(context.Background())
+		Expect(err).NotTo(HaveOccurred())
 	})
 	AfterEach(func() {})
 
 	Context("Reconcile on Delete Storage", func() {
 		It("should not error", func() {
+			// there should be storage
 			err := service.Delete(context.Background())
 			Expect(err).NotTo(HaveOccurred())
+
+			// there should no storage already
+			err = service.Delete(context.Background())
+			Expect(err).NotTo(HaveOccurred())
 		})
 	})
 })
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml
index 97c36bb..c0eed80 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml
@@ -67,6 +67,9 @@ spec:
                 - host
                 - port
                 type: object
+              resourcePool:
+                description: resource pool to be used for cappx managed vms by default
+                type: string
               serverRef:
                 description: ServerRef is used for configuring Proxmox client
                 properties:
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml
index 64d73e4..dedca63 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml
@@ -506,6 +506,10 @@ spec:
               providerID:
                 description: ProviderID
                 type: string
+              resourcePool:
+                description: Resource Pool that vm will be added to. Use ProxmoxCluster.spec.resroucePool
+                  if empty
+                type: string
               storage:
                 description: Storage is name of proxmox storage used by this node.
                   The storage must support "images(VM Disks)" type of content. cappx
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml
index dee6f6b..5f02049 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml
@@ -530,6 +530,10 @@ spec:
                       providerID:
                         description: ProviderID
                         type: string
+                      resourcePool:
+                        description: Resource Pool that vm will be added to. Use ProxmoxCluster.spec.resroucePool
+                          if empty
+                        type: string
                       storage:
                         description: Storage is name of proxmox storage used by this
                           node. The storage must support "images(VM Disks)" type of
diff --git a/controllers/proxmoxcluster_controller.go b/controllers/proxmoxcluster_controller.go
index fa9bbd0..a7c6f6c 100644
--- a/controllers/proxmoxcluster_controller.go
+++ b/controllers/proxmoxcluster_controller.go
@@ -35,6 +35,7 @@ import (
 	infrav1 "github.com/sp-yduck/cluster-api-provider-proxmox/api/v1beta1"
 	"github.com/sp-yduck/cluster-api-provider-proxmox/cloud"
 	"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scope"
+	"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/services/compute/resourcepool"
 	"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/services/compute/storage"
 )
 
@@ -116,6 +117,7 @@ func (r *ProxmoxClusterReconciler) reconcile(ctx context.Context, clusterScope *
 
 	reconcilers := []cloud.Reconciler{
 		storage.NewService(clusterScope),
+		resourcepool.NewService(clusterScope),
 	}
 
 	for _, r := range reconcilers {
@@ -146,6 +148,7 @@ func (r *ProxmoxClusterReconciler) reconcileDelete(ctx context.Context, clusterS
 
 	reconcilers := []cloud.Reconciler{
 		storage.NewService(clusterScope),
+		resourcepool.NewService(clusterScope),
 	}
 
 	for _, r := range reconcilers {
diff --git a/go.mod b/go.mod
index e8838f7..f000cb6 100644
--- a/go.mod
+++ b/go.mod
@@ -8,7 +8,7 @@ require (
 	github.com/onsi/ginkgo/v2 v2.13.0
 	github.com/onsi/gomega v1.29.0
 	github.com/pkg/errors v0.9.1
-	github.com/sp-yduck/proxmox-go v0.0.0-20231028112758-ef366c611274
+	github.com/sp-yduck/proxmox-go v0.0.0-alpha20
 	gopkg.in/yaml.v3 v3.0.1
 	k8s.io/api v0.27.2
 	k8s.io/apimachinery v0.27.2
diff --git a/go.sum b/go.sum
index 8930d70..9a4b732 100644
--- a/go.sum
+++ b/go.sum
@@ -417,8 +417,8 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic
 github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/sp-yduck/proxmox-go v0.0.0-20231028112758-ef366c611274 h1:XhsJkLNMEreFwJh88jimmxrfW5yeH/a2t30GdhPxles=
-github.com/sp-yduck/proxmox-go v0.0.0-20231028112758-ef366c611274/go.mod h1:iEI7ilRwyUaWvoxuXs/30UJ2f8Z2TWXCW1cZ3QeU2JY=
+github.com/sp-yduck/proxmox-go v0.0.0-alpha20 h1:C6/flqB0VRHgQNZrLFCXJ2wtmECgXdzIGd1Rmiu80WQ=
+github.com/sp-yduck/proxmox-go v0.0.0-alpha20/go.mod h1:iEI7ilRwyUaWvoxuXs/30UJ2f8Z2TWXCW1cZ3QeU2JY=
 github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
 github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
 github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
diff --git a/internal/fake/cluster_scope.go b/internal/fake/cluster_scope.go
index c3485cf..dd2d2f5 100644
--- a/internal/fake/cluster_scope.go
+++ b/internal/fake/cluster_scope.go
@@ -13,6 +13,7 @@ type FakeClusterScope struct {
 	namespace            string
 	controlPlaneEndpoint clusterv1.APIEndpoint
 	storage              infrav1.Storage
+	resourcePool         string
 }
 
 func NewClusterScope(client *proxmox.Service) *FakeClusterScope {
@@ -43,6 +44,13 @@ func (f *FakeClusterScope) Storage() infrav1.Storage {
 	return f.storage
 }
 
+func (f *FakeClusterScope) ResourcePool() string {
+	if f.resourcePool == "" {
+		return f.name
+	}
+	return f.resourcePool
+}
+
 func (f *FakeClusterScope) CloudClient() *proxmox.Service {
 	return f.cloudClient
 }