334 lines
9.7 KiB
Go
334 lines
9.7 KiB
Go
/*
|
|
Copyright 2025.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package controller
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"strings"
|
|
"time"
|
|
|
|
traefikv3 "github.com/traefik/traefik/v3/pkg/provider/kubernetes/crd/traefikio/v1alpha1"
|
|
corev1 "k8s.io/api/core/v1"
|
|
"k8s.io/apimachinery/pkg/api/errors"
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/runtime"
|
|
"k8s.io/apimachinery/pkg/util/intstr"
|
|
ctrl "sigs.k8s.io/controller-runtime"
|
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
|
"sigs.k8s.io/controller-runtime/pkg/log"
|
|
|
|
servermanagerv1alpha1 "git.acooldomain.co/server-manager/kubernetes-operator/api/v1alpha1"
|
|
)
|
|
|
|
type MiddlewareRef struct {
|
|
Name string `yaml:"name"`
|
|
Namespace string `yaml:"namespace"`
|
|
}
|
|
|
|
type BrowserConfig struct {
|
|
Middleware MiddlewareRef `yaml:"middleware"`
|
|
}
|
|
|
|
type ServerManagerReconcilerConfig struct {
|
|
Browser BrowserConfig `yaml:"browser_middleware"`
|
|
DomainLabel string `yaml:"domain_label"`
|
|
DefaultDomain string `yaml:"default_domain"`
|
|
}
|
|
|
|
// ServerManagerReconciler reconciles a ServerManager object
|
|
type ServerManagerReconciler struct {
|
|
client.Client
|
|
Scheme *runtime.Scheme
|
|
Config ServerManagerReconcilerConfig
|
|
}
|
|
|
|
// +kubebuilder:rbac:groups=server-manager.acooldomain.co,resources=servermanagers,verbs=get;list;watch;create;update;patch;delete
|
|
// +kubebuilder:rbac:groups=server-manager.acooldomain.co,resources=servermanagers/status,verbs=get;update;patch
|
|
// +kubebuilder:rbac:groups=server-manager.acooldomain.co,resources=servermanagers/finalizers,verbs=update
|
|
|
|
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
|
// move the current state of the cluster closer to the desired state.
|
|
// TODO(user): Modify the Reconcile function to compare the state specified by
|
|
// the ServerManager object against the actual cluster state, and then
|
|
// perform operations to make the cluster state reflect the state specified by
|
|
// the user.
|
|
//
|
|
// For more details, check Reconcile and its Result here:
|
|
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/reconcile
|
|
func (r *ServerManagerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
|
logging := log.FromContext(ctx)
|
|
s := &servermanagerv1alpha1.ServerManager{}
|
|
|
|
err := r.Get(ctx, req.NamespacedName, s)
|
|
if err != nil {
|
|
if errors.IsNotFound(err) {
|
|
return ctrl.Result{}, nil
|
|
}
|
|
return ctrl.Result{}, err
|
|
}
|
|
|
|
pvc := r.ServerPvc(s)
|
|
found_pvc := &corev1.PersistentVolumeClaim{}
|
|
err = r.Get(ctx, client.ObjectKey{Namespace: pvc.Namespace, Name: pvc.Name}, found_pvc)
|
|
if err != nil {
|
|
if errors.IsNotFound(err) {
|
|
err = r.Create(ctx, pvc)
|
|
if err != nil {
|
|
return ctrl.Result{Requeue: true}, err
|
|
}
|
|
} else {
|
|
return ctrl.Result{}, err
|
|
}
|
|
}
|
|
logging.Info("verified pvc")
|
|
|
|
serverPod := r.ServerPod(s, pvc)
|
|
found := &corev1.Pod{}
|
|
err = r.Get(ctx, client.ObjectKey{Namespace: pvc.Namespace, Name: pvc.Name}, found)
|
|
if err == nil && !s.Spec.Server.On {
|
|
err = r.Delete(ctx, serverPod)
|
|
return ctrl.Result{Requeue: true}, err
|
|
}
|
|
|
|
if err != nil {
|
|
if errors.IsNotFound(err) {
|
|
if s.Spec.Server.On {
|
|
err = r.Create(ctx, serverPod)
|
|
return ctrl.Result{Requeue: true}, err
|
|
}
|
|
} else {
|
|
return ctrl.Result{}, err
|
|
}
|
|
}
|
|
|
|
logging.Info("verified pod")
|
|
|
|
if found.Spec.NodeName == "" {
|
|
logging.Info("waiting for pod to start 2")
|
|
return ctrl.Result{RequeueAfter: time.Second * 10}, nil
|
|
}
|
|
|
|
domain := r.Config.DefaultDomain
|
|
if r.Config.DomainLabel != "" {
|
|
node := &corev1.Node{}
|
|
err = r.Get(ctx, client.ObjectKeyFromObject(&corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: found.Spec.NodeName}}), node)
|
|
if err != nil {
|
|
logging.Error(err, fmt.Sprintf("Failed to find node %s", found.Spec.NodeName))
|
|
return ctrl.Result{}, err
|
|
}
|
|
|
|
labelDomain, ok := node.GetLabels()[r.Config.DomainLabel]
|
|
if ok {
|
|
domain = labelDomain
|
|
}
|
|
}
|
|
|
|
logging.Info("got domain", "domain", domain)
|
|
|
|
if domain != s.Status.Server.Domain {
|
|
s.Status.Server.Domain = domain
|
|
logging.Info("updating ServerManager object", "NewDomain", domain, "OldDomain", s.Status.Server.Domain)
|
|
err = r.Status().Update(ctx, s)
|
|
logging.Info(fmt.Sprintf("%#v", err))
|
|
return ctrl.Result{}, err
|
|
}
|
|
|
|
service := r.ServerService(s)
|
|
foundService := &corev1.Service{}
|
|
err = r.Get(ctx, client.ObjectKeyFromObject(service), foundService)
|
|
if err == nil && !s.Spec.Server.On {
|
|
err = r.Delete(ctx, service)
|
|
return ctrl.Result{Requeue: true}, err
|
|
}
|
|
if err != nil {
|
|
if !errors.IsNotFound(err) {
|
|
return ctrl.Result{}, err
|
|
}
|
|
if s.Spec.Server.On {
|
|
err = r.Create(ctx, service)
|
|
return ctrl.Result{Requeue: true}, err
|
|
}
|
|
}
|
|
logging.Info(fmt.Sprintf("verified service %#v", foundService))
|
|
|
|
return ctrl.Result{}, nil
|
|
}
|
|
|
|
func (r *ServerManagerReconciler) BrowserPod(s *servermanagerv1alpha1.ServerManager, pvc *corev1.PersistentVolumeClaim) *corev1.Pod {
|
|
ports := make([]corev1.ContainerPort, len(s.Spec.Server.Ports))
|
|
|
|
for i, port := range s.Spec.Server.Ports {
|
|
ports[i] = corev1.ContainerPort{
|
|
ContainerPort: port.Port,
|
|
Protocol: port.Protocol,
|
|
}
|
|
}
|
|
|
|
pod := &corev1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-browser", s.Name),
|
|
Namespace: s.Namespace,
|
|
Labels: map[string]string{"server": s.Name},
|
|
},
|
|
Spec: corev1.PodSpec{
|
|
Volumes: []corev1.Volume{
|
|
{
|
|
Name: "volume",
|
|
VolumeSource: corev1.VolumeSource{
|
|
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
|
ClaimName: pvc.Name,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
InitContainers: []corev1.Container{
|
|
{
|
|
Name: "proxy-setter",
|
|
Image: "filebrowser/filebrowser",
|
|
ImagePullPolicy: corev1.PullAlways,
|
|
WorkingDir: s.Spec.Server.WorkingDir,
|
|
Ports: ports,
|
|
Args: []string{},
|
|
},
|
|
},
|
|
Containers: []corev1.Container{
|
|
{
|
|
Name: "browser",
|
|
Image: "filebrowser/filebrowser",
|
|
ImagePullPolicy: corev1.PullAlways,
|
|
Ports: ports,
|
|
Args: []string{},
|
|
VolumeMounts: []corev1.VolumeMount{{
|
|
Name: "volume",
|
|
MountPath: s.Spec.Server.WorkingDir,
|
|
}},
|
|
Stdin: true,
|
|
TTY: true,
|
|
},
|
|
},
|
|
},
|
|
}
|
|
controllerutil.SetControllerReference(s, pod, r.Scheme)
|
|
return pod
|
|
}
|
|
|
|
func (r *ServerManagerReconciler) ServerService(s *servermanagerv1alpha1.ServerManager) *corev1.Service {
|
|
ports := make([]corev1.ServicePort, len(s.Spec.Server.Ports))
|
|
for i, port := range s.Spec.Server.Ports {
|
|
ports[i] = corev1.ServicePort{NodePort: 0, Port: port.Port, TargetPort: intstr.FromInt32(port.Port), Name: fmt.Sprintf("%s-%d", strings.ToLower(string(port.Protocol)), port.Port)}
|
|
}
|
|
service := &corev1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: s.Name,
|
|
Namespace: s.Namespace,
|
|
},
|
|
Spec: corev1.ServiceSpec{
|
|
Type: corev1.ServiceTypeNodePort,
|
|
Ports: ports,
|
|
Selector: map[string]string{
|
|
"server": s.Name,
|
|
},
|
|
},
|
|
}
|
|
controllerutil.SetControllerReference(s, service, r.Scheme)
|
|
return service
|
|
}
|
|
|
|
func (r *ServerManagerReconciler) ServerPvc(s *servermanagerv1alpha1.ServerManager) *corev1.PersistentVolumeClaim {
|
|
pvc := &corev1.PersistentVolumeClaim{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: s.Name,
|
|
Namespace: s.Namespace,
|
|
},
|
|
|
|
Spec: corev1.PersistentVolumeClaimSpec{
|
|
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany},
|
|
Resources: corev1.VolumeResourceRequirements{
|
|
Requests: corev1.ResourceList{corev1.ResourceStorage: resource.MustParse(s.Spec.Storage)},
|
|
},
|
|
},
|
|
}
|
|
|
|
controllerutil.SetControllerReference(s, pvc, r.Scheme)
|
|
|
|
return pvc
|
|
}
|
|
|
|
func (r *ServerManagerReconciler) ServerPod(s *servermanagerv1alpha1.ServerManager, pvc *corev1.PersistentVolumeClaim) *corev1.Pod {
|
|
ports := make([]corev1.ContainerPort, len(s.Spec.Server.Ports))
|
|
|
|
for i, port := range s.Spec.Server.Ports {
|
|
ports[i] = corev1.ContainerPort{
|
|
ContainerPort: port.Port,
|
|
Protocol: port.Protocol,
|
|
}
|
|
}
|
|
|
|
pod := &corev1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: s.Name,
|
|
Namespace: s.Namespace,
|
|
Labels: map[string]string{"server": s.Name},
|
|
},
|
|
Spec: corev1.PodSpec{
|
|
Volumes: []corev1.Volume{
|
|
{
|
|
Name: "volume",
|
|
VolumeSource: corev1.VolumeSource{
|
|
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
|
ClaimName: pvc.Name,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
Containers: []corev1.Container{
|
|
{
|
|
Name: "server",
|
|
Image: s.Spec.Server.Image,
|
|
ImagePullPolicy: corev1.PullAlways,
|
|
Command: s.Spec.Server.Command,
|
|
Args: s.Spec.Server.Args,
|
|
WorkingDir: s.Spec.Server.WorkingDir,
|
|
Ports: ports,
|
|
VolumeMounts: []corev1.VolumeMount{{
|
|
Name: "volume",
|
|
MountPath: s.Spec.Server.WorkingDir,
|
|
}},
|
|
Stdin: true,
|
|
TTY: true,
|
|
},
|
|
},
|
|
},
|
|
}
|
|
controllerutil.SetControllerReference(s, pod, r.Scheme)
|
|
return pod
|
|
}
|
|
|
|
// SetupWithManager sets up the controller with the Manager.
|
|
func (r *ServerManagerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|
return ctrl.NewControllerManagedBy(mgr).
|
|
For(&servermanagerv1alpha1.ServerManager{}).
|
|
Owns(&corev1.Pod{}).
|
|
Owns(&corev1.Service{}).
|
|
Owns(&corev1.PersistentVolumeClaim{}).
|
|
Owns(&traefikv3.IngressRoute{}).
|
|
Complete(r)
|
|
}
|