This commit is contained in:
2025-03-25 23:39:38 +02:00
parent 567935bf7f
commit ae058f92e6
23 changed files with 1395 additions and 44 deletions

View File

@@ -0,0 +1,224 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
traefikv3 "github.com/traefik/traefik/v3/pkg/provider/kubernetes/crd/traefikio/v1alpha1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
servermanagerv1alpha1 "git.acooldomain.co/server-manager/kubernetes-operator/api/v1alpha1"
)
type ServerManagerReconcilerConfig struct {
DomainLabel string `yaml:"domain_label"`
DefaultDomain string `yaml:"default_domain"`
}
// ServerManagerReconciler reconciles a ServerManager object
type ServerManagerReconciler struct {
client.Client
Scheme *runtime.Scheme
Config ServerManagerReconcilerConfig
}
// +kubebuilder:rbac:groups=server-manager.acooldomain.co,resources=servermanagers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=server-manager.acooldomain.co,resources=servermanagers/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=server-manager.acooldomain.co,resources=servermanagers/finalizers,verbs=update
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the ServerManager object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/reconcile
func (r *ServerManagerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logging := log.FromContext(ctx)
s := &servermanagerv1alpha1.ServerManager{}
err := r.Get(ctx, req.NamespacedName, s)
if err != nil {
if errors.IsNotFound(err) {
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
pvc := r.ServerPvc(s)
found := &corev1.PersistentVolumeClaim{}
err = r.Get(ctx, client.ObjectKey{Namespace: pvc.Namespace, Name: pvc.Name}, found)
if err != nil {
if errors.IsNotFound(err) {
err = r.Create(ctx, pvc)
if err != nil {
return ctrl.Result{Requeue: true}, err
}
} else {
return ctrl.Result{}, err
}
}
serverPod := r.ServerPod(s, pvc)
if serverPod != nil {
found := &corev1.Pod{}
err = r.Get(ctx, client.ObjectKey{Namespace: pvc.Namespace, Name: pvc.Name}, found)
if err != nil {
if errors.IsNotFound(err) {
err = r.Create(ctx, serverPod)
return ctrl.Result{Requeue: true}, err
} else {
return ctrl.Result{}, err
}
}
domain := r.Config.DefaultDomain
if r.Config.DomainLabel != "" {
node := &corev1.Node{}
err = r.Get(ctx, client.ObjectKeyFromObject(&corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: found.Spec.NodeName}}), node)
if err != nil {
logging.Error(err, fmt.Sprintf("Failed to find node %s", found.Spec.NodeName))
return ctrl.Result{}, err
}
labelDomain, ok := node.GetLabels()[r.Config.DomainLabel]
if ok {
domain = labelDomain
}
}
if domain != s.Status.Server.Domain {
s.Status.Server.Domain = domain
err = r.Update(ctx, s)
return ctrl.Result{}, err
}
}
service := r.ServerService(s)
return ctrl.Result{}, nil
}
func (r *ServerManagerReconciler) ServerService(s *servermanagerv1alpha1.ServerManager) *corev1.Service {
ports := make([]corev1.ServicePort, len(s.Spec.Server.Ports))
for i, port := range s.Spec.Server.Ports {
ports[i] = corev1.ServicePort{NodePort: 0, Port: port.Port, TargetPort: intstr.FromInt32(port.Port), Name: fmt.Sprintf("%s-%d", port.Protocol, port.Port)}
}
service := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: s.Name,
Namespce: s.Namespace,
},
Spec: corev1.ServiceSpec{
Ports: ports,
Selector: map[string]string{
"server": s.Spec.Id,
},
},
}
controllerutil.SetControllerReference(s, service, r.Scheme)
return service
}
func (r *ServerManagerReconciler) ServerPvc(s *servermanagerv1alpha1.ServerManager) *corev1.PersistentVolumeClaim {
pvc := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: s.Name,
Namespace: s.Namespace,
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany},
},
}
controllerutil.SetControllerReference(s, pvc, r.Scheme)
return pvc
}
func (r *ServerManagerReconciler) ServerPod(s *servermanagerv1alpha1.ServerManager, pvc *corev1.PersistentVolumeClaim) *corev1.Pod {
if !s.Spec.Server.On {
return nil
}
ports := make([]corev1.ContainerPort, len(s.Spec.Server.Ports))
for i, port := range s.Spec.Server.Ports {
ports[i] = corev1.ContainerPort{
ContainerPort: port.Port,
Protocol: port.Protocol,
}
}
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: s.Name,
Namespace: s.Namespace,
},
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{
Name: "volume",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
},
},
},
},
Containers: []corev1.Container{
{
Name: "server",
Image: s.Spec.Server.Image,
Command: s.Spec.Server.Command,
Args: s.Spec.Server.Args,
WorkingDir: s.Spec.Server.WorkingDir,
Ports: ports,
VolumeMounts: []corev1.VolumeMount{{
Name: "volume",
MountPath: s.Spec.Server.WorkingDir,
}},
Stdin: true,
TTY: true,
},
},
},
}
controllerutil.SetControllerReference(s, pod, r.Scheme)
return pod
}
// SetupWithManager sets up the controller with the Manager.
func (r *ServerManagerReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&servermanagerv1alpha1.ServerManager{}).
Owns(&corev1.Pod{}).
Owns(&corev1.Service{}).
Owns(&corev1.PersistentVolumeClaim{}).
Owns(&traefikv3.IngressRoute{}).
Complete(r)
}

View File

@@ -0,0 +1,84 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
servermanagerv1alpha1 "git.acooldomain.co/server-manager/kubernetes-operator/api/v1alpha1"
)
var _ = Describe("ServerManager Controller", func() {
Context("When reconciling a resource", func() {
const resourceName = "test-resource"
ctx := context.Background()
typeNamespacedName := types.NamespacedName{
Name: resourceName,
Namespace: "default", // TODO(user):Modify as needed
}
servermanager := &servermanagerv1alpha1.ServerManager{}
BeforeEach(func() {
By("creating the custom resource for the Kind ServerManager")
err := k8sClient.Get(ctx, typeNamespacedName, servermanager)
if err != nil && errors.IsNotFound(err) {
resource := &servermanagerv1alpha1.ServerManager{
ObjectMeta: metav1.ObjectMeta{
Name: resourceName,
Namespace: "default",
},
// TODO(user): Specify other spec details if needed.
}
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
}
})
AfterEach(func() {
// TODO(user): Cleanup logic after each test, like removing the resource instance.
resource := &servermanagerv1alpha1.ServerManager{}
err := k8sClient.Get(ctx, typeNamespacedName, resource)
Expect(err).NotTo(HaveOccurred())
By("Cleanup the specific resource instance ServerManager")
Expect(k8sClient.Delete(ctx, resource)).To(Succeed())
})
It("should successfully reconcile the resource", func() {
By("Reconciling the created resource")
controllerReconciler := &ServerManagerReconciler{
Client: k8sClient,
Scheme: k8sClient.Scheme(),
}
_, err := controllerReconciler.Reconcile(ctx, reconcile.Request{
NamespacedName: typeNamespacedName,
})
Expect(err).NotTo(HaveOccurred())
// TODO(user): Add more specific assertions depending on your controller's reconciliation logic.
// Example: If you expect a certain status condition after reconciliation, verify it here.
})
})
})

View File

@@ -0,0 +1,96 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"path/filepath"
"runtime"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
servermanagerv1alpha1 "git.acooldomain.co/server-manager/kubernetes-operator/api/v1alpha1"
// +kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
var ctx context.Context
var cancel context.CancelFunc
func TestControllers(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Controller Suite")
}
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
ctx, cancel = context.WithCancel(context.TODO())
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
ErrorIfCRDPathMissing: true,
// The BinaryAssetsDirectory is only required if you want to run the tests directly
// without call the makefile target test. If not informed it will look for the
// default path defined in controller-runtime which is /usr/local/kubebuilder/.
// Note that you must have the required binaries setup under the bin directory to perform
// the tests directly. When we run make test it will be setup and used automatically.
BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s",
fmt.Sprintf("1.31.0-%s-%s", runtime.GOOS, runtime.GOARCH)),
}
var err error
// cfg is defined in this file globally.
cfg, err = testEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
err = servermanagerv1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).NotTo(HaveOccurred())
Expect(k8sClient).NotTo(BeNil())
})
var _ = AfterSuite(func() {
By("tearing down the test environment")
cancel()
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})