All checks were successful
Build and Push Docker Image / Build image (push) Successful in 2m6s
750 lines
23 KiB
Go
750 lines
23 KiB
Go
/*
|
|
Copyright 2025.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package controller
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"strings"
|
|
"time"
|
|
|
|
traefikv3 "github.com/traefik/traefik/v3/pkg/provider/kubernetes/crd/traefikio/v1alpha1"
|
|
corev1 "k8s.io/api/core/v1"
|
|
"k8s.io/apimachinery/pkg/api/errors"
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/runtime"
|
|
"k8s.io/apimachinery/pkg/util/intstr"
|
|
ctrl "sigs.k8s.io/controller-runtime"
|
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
|
"sigs.k8s.io/controller-runtime/pkg/log"
|
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
|
|
|
servermanagerv1alpha1 "git.acooldomain.co/server-manager/kubernetes-operator/api/v1alpha1"
|
|
)
|
|
|
|
type MiddlewareRef struct {
|
|
Name string `yaml:"name"`
|
|
Namespace string `yaml:"namespace"`
|
|
}
|
|
|
|
type TraefikService struct {
|
|
Name string `yaml:"name"`
|
|
Kind string `yaml:"kind,omitempty"`
|
|
Namespace string `yaml:"namespace,omitempty"`
|
|
Port intstr.IntOrString `yaml:"port,omitempty"`
|
|
Scheme string `yaml:"scheme,omitempty"`
|
|
Strategy string `yaml:"strategy,omitempty"`
|
|
PassHostHeader *bool `yaml:"passHostHeader,omitempty"`
|
|
ServersTransport string `yaml:"serversTransport,omitempty"`
|
|
Weight *int `yaml:"weight,omitempty"`
|
|
NativeLB *bool `yaml:"nativeLB,omitempty"`
|
|
NodePortLB bool `yaml:"nodePortLB,omitempty"`
|
|
}
|
|
|
|
type TraefikRoute struct {
|
|
Match string `yaml:"match"`
|
|
Kind string `yaml:"kind,omitempty"`
|
|
Priority int `yaml:"priority,omitempty"`
|
|
Syntax string `yaml:"syntax,omitempty"`
|
|
Services []TraefikService `yaml:"services,omitempty"`
|
|
Middlewares []MiddlewareRef `yaml:"middlewares,omitempty"`
|
|
}
|
|
|
|
type BrowserConfig struct {
|
|
Middleware MiddlewareRef `yaml:"middleware"`
|
|
AuthHeader string `yaml:"auth_header"`
|
|
AdditionalRoutes []TraefikRoute `yaml:"additional_routes"`
|
|
Entrypoints []string `yaml:"entrypoints"`
|
|
CertResolver string `yaml:"cert_resolver"`
|
|
Domain string `yaml:"domain"`
|
|
SubPath string `yaml:"sub_path"`
|
|
}
|
|
|
|
type ServerManagerReconcilerConfig struct {
|
|
Browser BrowserConfig `yaml:"browser"`
|
|
DomainLabel string `yaml:"domain_label"`
|
|
DefaultDomain string `yaml:"default_domain"`
|
|
}
|
|
|
|
// ServerManagerReconciler reconciles a ServerManager object
|
|
type ServerManagerReconciler struct {
|
|
client.Client
|
|
Scheme *runtime.Scheme
|
|
Config ServerManagerReconcilerConfig
|
|
}
|
|
|
|
// +kubebuilder:rbac:groups=server-manager.acooldomain.co,resources=servermanagers,verbs=get;list;watch;create;update;patch;delete
|
|
// +kubebuilder:rbac:groups=server-manager.acooldomain.co,resources=servermanagers/status,verbs=get;update;patch
|
|
// +kubebuilder:rbac:groups=server-manager.acooldomain.co,resources=images,verbs=list;get
|
|
// +kubebuilder:rbac:groups=traefik.io,resources=ingressroutes,verbs=get;list;watch;create;update;patch;delete
|
|
// +kubebuilder:rbac:groups=,resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete
|
|
// +kubebuilder:rbac:groups=server-manager.acooldomain.co,resources=servermanagers/finalizers,verbs=update
|
|
// +kubebuilder:rbac:groups=,resources=services,verbs=get;list;watch;create;update;patch;delete
|
|
// +kubebuilder:rbac:groups=,resources=pods,verbs=get;list;watch;create;delete
|
|
|
|
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
|
// move the current state of the cluster closer to the desired state.
|
|
// TODO(user): Modify the Reconcile function to compare the state specified by
|
|
// the ServerManager object against the actual cluster state, and then
|
|
// perform operations to make the cluster state reflect the state specified by
|
|
// the user.
|
|
//
|
|
// For more details, check Reconcile and its Result here:
|
|
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/reconcile
|
|
func (r *ServerManagerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
|
logging := log.FromContext(ctx)
|
|
s := &servermanagerv1alpha1.ServerManager{}
|
|
statusChanged := false
|
|
|
|
err := r.Get(ctx, req.NamespacedName, s)
|
|
if err != nil {
|
|
if errors.IsNotFound(err) {
|
|
return ctrl.Result{}, nil
|
|
}
|
|
return ctrl.Result{}, err
|
|
}
|
|
|
|
pvc := r.ServerPvc(s)
|
|
found_pvc := &corev1.PersistentVolumeClaim{}
|
|
err = r.Get(ctx, client.ObjectKey{Namespace: pvc.Namespace, Name: pvc.Name}, found_pvc)
|
|
if err != nil {
|
|
if errors.IsNotFound(err) {
|
|
err = r.Create(ctx, pvc)
|
|
if err != nil {
|
|
logging.Info("creating server pvc")
|
|
return ctrl.Result{Requeue: true}, err
|
|
}
|
|
} else {
|
|
return ctrl.Result{}, err
|
|
}
|
|
}
|
|
logging.Info("verified pvc")
|
|
|
|
browserPvc := r.BrowserPvc(s)
|
|
foundBrowserPvc := &corev1.PersistentVolumeClaim{}
|
|
err = r.Get(ctx, client.ObjectKey{Namespace: browserPvc.Namespace, Name: browserPvc.Name}, foundBrowserPvc)
|
|
if err != nil {
|
|
if errors.IsNotFound(err) {
|
|
err = r.Create(ctx, browserPvc)
|
|
if err != nil {
|
|
logging.Info("creating server browserPvc")
|
|
return ctrl.Result{Requeue: true}, err
|
|
}
|
|
} else {
|
|
return ctrl.Result{}, err
|
|
}
|
|
}
|
|
logging.Info("verified browserPvc")
|
|
|
|
image, err := r.GetImage(ctx, s)
|
|
if err != nil {
|
|
logging.Error(err, "Failed to get image")
|
|
return reconcile.Result{}, err
|
|
}
|
|
if len(s.Spec.Server.Ports) == 0 {
|
|
s.Spec.Server.Ports = image.Spec.Ports
|
|
err := r.Update(ctx, s)
|
|
return reconcile.Result{}, err
|
|
}
|
|
|
|
serverPod := r.ServerPod(s, pvc, image)
|
|
found := &corev1.Pod{}
|
|
err = r.Get(ctx, client.ObjectKey{Namespace: pvc.Namespace, Name: pvc.Name}, found)
|
|
if err == nil && !s.Spec.Server.On {
|
|
logging.Info("deleting server pod")
|
|
err = r.Delete(ctx, found)
|
|
return ctrl.Result{Requeue: true}, err
|
|
}
|
|
if err != nil && !errors.IsNotFound(err) {
|
|
return ctrl.Result{}, err
|
|
}
|
|
if errors.IsNotFound(err) && s.Spec.Server.On {
|
|
logging.Info("creating server pod")
|
|
err = r.Create(ctx, serverPod)
|
|
return ctrl.Result{Requeue: true}, err
|
|
}
|
|
|
|
if s.Spec.Server.On {
|
|
switch found.Status.Phase {
|
|
case corev1.PodPending:
|
|
logging.Info("Waiting for server pod to start")
|
|
return ctrl.Result{RequeueAfter: time.Second * 10}, nil
|
|
case corev1.PodRunning:
|
|
if !s.Status.Server.Running {
|
|
s.Status.Server.Running = true
|
|
statusChanged = true
|
|
}
|
|
if s.Status.Server.Command == nil {
|
|
s.Status.Server.Command = serverPod.Spec.Containers[0].Command
|
|
statusChanged = true
|
|
}
|
|
if s.Status.Server.Args == nil {
|
|
s.Status.Server.Args = serverPod.Spec.Containers[0].Args
|
|
statusChanged = true
|
|
}
|
|
default:
|
|
if s.Status.Server.Running {
|
|
s.Status.Server.Running = false
|
|
statusChanged = true
|
|
}
|
|
if len(s.Status.Server.Args) != 0 {
|
|
s.Status.Server.Args = nil
|
|
statusChanged = true
|
|
}
|
|
if len(s.Status.Server.Command) != 0 {
|
|
s.Status.Server.Command = nil
|
|
statusChanged = true
|
|
}
|
|
}
|
|
}
|
|
|
|
if errors.IsNotFound(err) && !s.Spec.Server.On {
|
|
if s.Status.Server.Running {
|
|
s.Status.Server.Running = false
|
|
statusChanged = true
|
|
}
|
|
if len(s.Status.Server.Args) != 0 {
|
|
s.Status.Server.Args = nil
|
|
statusChanged = true
|
|
}
|
|
if len(s.Status.Server.Command) != 0 {
|
|
s.Status.Server.Command = nil
|
|
statusChanged = true
|
|
}
|
|
}
|
|
|
|
logging.Info("verified pod")
|
|
if s.Status.Server.Running {
|
|
domain := r.Config.DefaultDomain
|
|
if r.Config.DomainLabel != "" {
|
|
node := &corev1.Node{}
|
|
err = r.Get(ctx, client.ObjectKeyFromObject(&corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: found.Spec.NodeName}}), node)
|
|
if err != nil {
|
|
logging.Error(err, fmt.Sprintf("Failed to find node %s", found.Spec.NodeName))
|
|
return ctrl.Result{}, err
|
|
}
|
|
|
|
labelDomain, ok := node.GetLabels()[r.Config.DomainLabel]
|
|
if ok {
|
|
domain = labelDomain
|
|
}
|
|
}
|
|
|
|
if domain != s.Status.Server.Domain {
|
|
logging.Info("got domain", "domain", domain)
|
|
s.Status.Server.Domain = domain
|
|
statusChanged = true
|
|
}
|
|
}
|
|
|
|
service := r.ServerService(s)
|
|
foundService := &corev1.Service{}
|
|
err = r.Get(ctx, client.ObjectKeyFromObject(service), foundService)
|
|
if err == nil && !s.Spec.Server.On {
|
|
logging.Info("deleting server service")
|
|
err = r.Delete(ctx, foundService)
|
|
return ctrl.Result{Requeue: true}, err
|
|
}
|
|
if err != nil && !errors.IsNotFound(err) {
|
|
return ctrl.Result{}, err
|
|
}
|
|
if errors.IsNotFound(err) && s.Spec.Server.On {
|
|
logging.Info("creating server service")
|
|
err = r.Create(ctx, service)
|
|
return ctrl.Result{Requeue: true}, err
|
|
}
|
|
|
|
if s.Spec.Server.On {
|
|
if len(s.Status.Server.HostPorts) == 0 {
|
|
hostPorts := make([]servermanagerv1alpha1.PortMapping, len(foundService.Spec.Ports))
|
|
for i, port := range foundService.Spec.Ports {
|
|
if port.NodePort == 0 {
|
|
logging.Info("Cluster didn't assign ports yet, waiting")
|
|
return ctrl.Result{RequeueAfter: time.Second * 10}, nil
|
|
}
|
|
|
|
hostPorts[i] = servermanagerv1alpha1.PortMapping{
|
|
Protocol: port.Protocol,
|
|
HostPort: port.NodePort,
|
|
TargetPort: port.TargetPort.IntVal,
|
|
}
|
|
}
|
|
logging.Info("Found service mapping", "mapping", hostPorts)
|
|
|
|
s.Status.Server.HostPorts = hostPorts
|
|
statusChanged = true
|
|
}
|
|
} else {
|
|
if len(s.Status.Server.HostPorts) > 0 {
|
|
s.Status.Server.HostPorts = nil
|
|
}
|
|
}
|
|
logging.Info("verified server service")
|
|
|
|
browserPod := r.BrowserPod(s, pvc, browserPvc)
|
|
foundBrowser := &corev1.Pod{}
|
|
err = r.Get(ctx, client.ObjectKeyFromObject(browserPod), foundBrowser)
|
|
if err == nil && !s.Spec.Browser.On {
|
|
logging.Info("deleting browser pod")
|
|
err = r.Delete(ctx, foundBrowser)
|
|
return ctrl.Result{Requeue: true}, err
|
|
}
|
|
if err != nil && !errors.IsNotFound(err) {
|
|
return ctrl.Result{}, err
|
|
}
|
|
if errors.IsNotFound(err) && s.Spec.Browser.On {
|
|
logging.Info("creating browser pod")
|
|
err = r.Create(ctx, browserPod)
|
|
return ctrl.Result{Requeue: true}, err
|
|
}
|
|
if s.Spec.Browser.On {
|
|
switch foundBrowser.Status.Phase {
|
|
case corev1.PodPending:
|
|
logging.Info("Waiting for browser pod to start")
|
|
return ctrl.Result{RequeueAfter: time.Second * 10}, nil
|
|
case corev1.PodRunning:
|
|
if !s.Status.Browser.Running {
|
|
s.Status.Browser.Running = true
|
|
statusChanged = true
|
|
}
|
|
default:
|
|
if s.Status.Browser.Running {
|
|
s.Status.Browser.Running = false
|
|
statusChanged = true
|
|
}
|
|
}
|
|
}
|
|
if errors.IsNotFound(err) && !s.Spec.Browser.On {
|
|
if s.Status.Browser.Running {
|
|
s.Status.Browser.Running = false
|
|
statusChanged = true
|
|
}
|
|
}
|
|
logging.Info("verified browser pod")
|
|
|
|
browserService := r.BrowserService(s)
|
|
foundService = &corev1.Service{}
|
|
err = r.Get(ctx, client.ObjectKeyFromObject(browserService), foundService)
|
|
if err == nil && !s.Spec.Browser.On {
|
|
logging.Info("deleting browser service")
|
|
err = r.Delete(ctx, foundService)
|
|
return ctrl.Result{Requeue: true}, err
|
|
}
|
|
if err != nil && !errors.IsNotFound(err) {
|
|
return ctrl.Result{}, err
|
|
}
|
|
if errors.IsNotFound(err) && s.Spec.Browser.On {
|
|
logging.Info("creating browser service")
|
|
err = r.Create(ctx, browserService)
|
|
return ctrl.Result{Requeue: true}, err
|
|
}
|
|
logging.Info("verified browser service")
|
|
|
|
browserIngress := r.BrowserIngress(s, browserService)
|
|
foundIngress := &traefikv3.IngressRoute{}
|
|
err = r.Get(ctx, client.ObjectKeyFromObject(browserIngress), foundIngress)
|
|
if err == nil && !s.Spec.Browser.On {
|
|
logging.Info("deleting browser ingress")
|
|
err = r.Delete(ctx, foundIngress)
|
|
return ctrl.Result{Requeue: true}, err
|
|
}
|
|
if err != nil && !errors.IsNotFound(err) {
|
|
return ctrl.Result{}, err
|
|
}
|
|
if errors.IsNotFound(err) && s.Spec.Browser.On {
|
|
logging.Info("creating browser ingress")
|
|
err = r.Create(ctx, browserIngress)
|
|
return ctrl.Result{Requeue: true}, err
|
|
}
|
|
|
|
logging.Info("verified browser ingress")
|
|
|
|
browserUrl := r.GenerateBrowserUrl(s)
|
|
if s.Spec.Browser.On {
|
|
s.Status.Browser.Url = browserUrl
|
|
statusChanged = true
|
|
} else {
|
|
s.Status.Browser.Url = ""
|
|
}
|
|
|
|
if statusChanged {
|
|
logging.Info("Updating ServerManager")
|
|
err = r.Status().Update(ctx, s)
|
|
return ctrl.Result{Requeue: true}, err
|
|
}
|
|
|
|
return ctrl.Result{}, nil
|
|
}
|
|
|
|
func (r *ServerManagerReconciler) GenerateBrowserUrl(s *servermanagerv1alpha1.ServerManager) string {
|
|
return fmt.Sprintf("%s%s", r.Config.Browser.Domain, r.GenerateBrowserSubPath(s))
|
|
}
|
|
|
|
func (r *ServerManagerReconciler) GenerateBrowserSubPath(s *servermanagerv1alpha1.ServerManager) string {
|
|
if r.Config.Browser.SubPath != "" {
|
|
return fmt.Sprintf("%s/%s/%s", r.Config.Browser.SubPath, s.Namespace, s.Name)
|
|
} else {
|
|
return fmt.Sprintf("/%s/%s", s.Namespace, s.Name)
|
|
}
|
|
}
|
|
|
|
func (r *ServerManagerReconciler) BrowserPvc(s *servermanagerv1alpha1.ServerManager) *corev1.PersistentVolumeClaim {
|
|
pvc := &corev1.PersistentVolumeClaim{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-browser", s.Name),
|
|
Namespace: s.Namespace,
|
|
},
|
|
|
|
Spec: corev1.PersistentVolumeClaimSpec{
|
|
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany},
|
|
Resources: corev1.VolumeResourceRequirements{
|
|
Requests: corev1.ResourceList{corev1.ResourceStorage: resource.MustParse(s.Spec.Storage)},
|
|
},
|
|
},
|
|
}
|
|
|
|
controllerutil.SetControllerReference(s, pvc, r.Scheme)
|
|
|
|
return pvc
|
|
}
|
|
|
|
func (r *ServerManagerReconciler) BrowserIngress(s *servermanagerv1alpha1.ServerManager, service *corev1.Service) *traefikv3.IngressRoute {
|
|
routes := make([]traefikv3.Route, len(r.Config.Browser.AdditionalRoutes)+1)
|
|
|
|
for i, route := range r.Config.Browser.AdditionalRoutes {
|
|
services := make([]traefikv3.Service, len(route.Services))
|
|
for j, service := range route.Services {
|
|
services[j] = traefikv3.Service{
|
|
LoadBalancerSpec: traefikv3.LoadBalancerSpec{
|
|
Name: service.Name,
|
|
Kind: service.Kind,
|
|
Namespace: service.Namespace,
|
|
Port: service.Port,
|
|
Scheme: service.Scheme,
|
|
Strategy: service.Strategy,
|
|
PassHostHeader: service.PassHostHeader,
|
|
ServersTransport: service.ServersTransport,
|
|
Weight: service.Weight,
|
|
NativeLB: service.NativeLB,
|
|
NodePortLB: service.NodePortLB,
|
|
},
|
|
}
|
|
}
|
|
|
|
middlewares := make([]traefikv3.MiddlewareRef, len(route.Middlewares))
|
|
for j, middleware := range route.Middlewares {
|
|
middlewares[j] = traefikv3.MiddlewareRef{
|
|
Name: middleware.Name,
|
|
Namespace: middleware.Namespace,
|
|
}
|
|
}
|
|
|
|
routes[i] = traefikv3.Route{
|
|
Match: route.Match,
|
|
Kind: route.Kind,
|
|
Priority: route.Priority,
|
|
Syntax: route.Syntax,
|
|
Services: services,
|
|
Middlewares: middlewares,
|
|
}
|
|
}
|
|
|
|
routes[len(routes)-1] = traefikv3.Route{
|
|
Match: fmt.Sprintf("Host(`%s`) && PathPrefix(`%s`)", r.Config.Browser.Domain, r.GenerateBrowserSubPath(s)),
|
|
Kind: "Rule",
|
|
Services: []traefikv3.Service{
|
|
{
|
|
LoadBalancerSpec: traefikv3.LoadBalancerSpec{
|
|
Name: service.Name,
|
|
Namespace: service.Namespace,
|
|
Port: intstr.FromInt(80),
|
|
},
|
|
},
|
|
},
|
|
Middlewares: []traefikv3.MiddlewareRef{
|
|
traefikv3.MiddlewareRef(r.Config.Browser.Middleware),
|
|
},
|
|
}
|
|
|
|
ingressRoute := &traefikv3.IngressRoute{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: s.Name,
|
|
Namespace: s.Namespace,
|
|
},
|
|
Spec: traefikv3.IngressRouteSpec{
|
|
EntryPoints: r.Config.Browser.Entrypoints,
|
|
Routes: routes,
|
|
TLS: &traefikv3.TLS{
|
|
CertResolver: r.Config.Browser.CertResolver,
|
|
},
|
|
},
|
|
}
|
|
|
|
controllerutil.SetControllerReference(s, ingressRoute, r.Scheme)
|
|
|
|
return ingressRoute
|
|
}
|
|
|
|
func (r *ServerManagerReconciler) BrowserService(s *servermanagerv1alpha1.ServerManager) *corev1.Service {
|
|
service := &corev1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-browser", s.Name),
|
|
Namespace: s.Namespace,
|
|
},
|
|
Spec: corev1.ServiceSpec{
|
|
Type: corev1.ServiceTypeNodePort,
|
|
Ports: []corev1.ServicePort{{Name: "web", Protocol: corev1.ProtocolTCP, Port: 80, TargetPort: intstr.FromInt(80)}},
|
|
Selector: map[string]string{
|
|
"browser": s.Name,
|
|
},
|
|
},
|
|
}
|
|
controllerutil.SetControllerReference(s, service, r.Scheme)
|
|
return service
|
|
}
|
|
|
|
func (r *ServerManagerReconciler) BrowserPod(s *servermanagerv1alpha1.ServerManager, pvc *corev1.PersistentVolumeClaim, browserPvc *corev1.PersistentVolumeClaim) *corev1.Pod {
|
|
ports := make([]corev1.ContainerPort, len(s.Spec.Server.Ports))
|
|
|
|
for i, port := range s.Spec.Server.Ports {
|
|
ports[i] = corev1.ContainerPort{
|
|
ContainerPort: port.Port,
|
|
Protocol: port.Protocol,
|
|
}
|
|
}
|
|
|
|
pod := &corev1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-browser", s.Name),
|
|
Namespace: s.Namespace,
|
|
Labels: map[string]string{"browser": s.Name},
|
|
},
|
|
Spec: corev1.PodSpec{
|
|
Volumes: []corev1.Volume{
|
|
{
|
|
Name: "volume",
|
|
VolumeSource: corev1.VolumeSource{
|
|
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
|
ClaimName: pvc.Name,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
Name: "browser-volume",
|
|
VolumeSource: corev1.VolumeSource{
|
|
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
|
ClaimName: browserPvc.Name,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
InitContainers: []corev1.Container{
|
|
{
|
|
Name: "proxy-setter",
|
|
Image: "filebrowser/filebrowser",
|
|
ImagePullPolicy: corev1.PullIfNotPresent,
|
|
Ports: ports,
|
|
Command: []string{"/bin/sh"},
|
|
Args: []string{"-c", fmt.Sprintf("rm /tmp/database/filebrowser.db; /filebrowser config init -d /tmp/database/filebrowser.db && /filebrowser config set --auth.method=proxy --auth.header=%s -d /tmp/database/filebrowser.db", r.Config.Browser.AuthHeader)},
|
|
VolumeMounts: []corev1.VolumeMount{
|
|
{
|
|
Name: "browser-volume",
|
|
MountPath: "/tmp/database",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
Containers: []corev1.Container{
|
|
{
|
|
Name: "browser",
|
|
Image: "filebrowser/filebrowser",
|
|
ImagePullPolicy: corev1.PullIfNotPresent,
|
|
Ports: ports,
|
|
Args: []string{"-d", "/tmp/database/filebrowser.db", "-r", "/tmp/data", "-b", r.GenerateBrowserSubPath(s)},
|
|
VolumeMounts: []corev1.VolumeMount{
|
|
{
|
|
Name: "volume",
|
|
MountPath: "/tmp/data",
|
|
},
|
|
{
|
|
Name: "browser-volume",
|
|
MountPath: "/tmp/database",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
controllerutil.SetControllerReference(s, pod, r.Scheme)
|
|
return pod
|
|
}
|
|
|
|
func (r *ServerManagerReconciler) ServerService(s *servermanagerv1alpha1.ServerManager) *corev1.Service {
|
|
ports := make([]corev1.ServicePort, len(s.Spec.Server.Ports))
|
|
for i, port := range s.Spec.Server.Ports {
|
|
ports[i] = corev1.ServicePort{NodePort: 0, Port: port.Port, TargetPort: intstr.FromInt32(port.Port), Name: fmt.Sprintf("%s-%d", strings.ToLower(string(port.Protocol)), port.Port)}
|
|
}
|
|
service := &corev1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: s.Name,
|
|
Namespace: s.Namespace,
|
|
},
|
|
Spec: corev1.ServiceSpec{
|
|
Type: corev1.ServiceTypeNodePort,
|
|
Ports: ports,
|
|
Selector: map[string]string{
|
|
"server": s.Name,
|
|
},
|
|
},
|
|
}
|
|
controllerutil.SetControllerReference(s, service, r.Scheme)
|
|
return service
|
|
}
|
|
|
|
func (r *ServerManagerReconciler) ServerPvc(s *servermanagerv1alpha1.ServerManager) *corev1.PersistentVolumeClaim {
|
|
pvc := &corev1.PersistentVolumeClaim{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: s.Name,
|
|
Namespace: s.Namespace,
|
|
},
|
|
|
|
Spec: corev1.PersistentVolumeClaimSpec{
|
|
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany},
|
|
Resources: corev1.VolumeResourceRequirements{
|
|
Requests: corev1.ResourceList{corev1.ResourceStorage: resource.MustParse(s.Spec.Storage)},
|
|
},
|
|
},
|
|
}
|
|
|
|
controllerutil.SetControllerReference(s, pvc, r.Scheme)
|
|
|
|
return pvc
|
|
}
|
|
|
|
func (r *ServerManagerReconciler) ServerPod(s *servermanagerv1alpha1.ServerManager, pvc *corev1.PersistentVolumeClaim, image *servermanagerv1alpha1.Image) *corev1.Pod {
|
|
serverPorts := image.Spec.Ports
|
|
|
|
ports := make([]corev1.ContainerPort, len(serverPorts))
|
|
|
|
for i, port := range serverPorts {
|
|
ports[i] = corev1.ContainerPort{
|
|
ContainerPort: port.Port,
|
|
Protocol: port.Protocol,
|
|
}
|
|
}
|
|
|
|
command := image.Spec.Command
|
|
if len(s.Spec.Server.Command) > 0 {
|
|
command = s.Spec.Server.Command
|
|
}
|
|
|
|
args := image.Spec.Args
|
|
if len(s.Spec.Server.Args) > 0 {
|
|
args = s.Spec.Server.Args
|
|
}
|
|
|
|
var initContainers []corev1.Container = nil
|
|
|
|
if image.Spec.InitScript != nil {
|
|
initContainers = []corev1.Container{
|
|
{
|
|
Name: "init",
|
|
Image: image.Spec.InitScript.Image,
|
|
ImagePullPolicy: corev1.PullIfNotPresent,
|
|
Command: image.Spec.Command,
|
|
Args: image.Spec.InitScript.Args,
|
|
VolumeMounts: []corev1.VolumeMount{
|
|
{
|
|
Name: "volume",
|
|
MountPath: "/data",
|
|
},
|
|
},
|
|
},
|
|
}
|
|
}
|
|
|
|
volumePath := image.Spec.Volume
|
|
if volumePath == "" {
|
|
volumePath = image.Spec.WorkingDir
|
|
}
|
|
|
|
pod := &corev1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: s.Name,
|
|
Namespace: s.Namespace,
|
|
Labels: map[string]string{"server": s.Name},
|
|
},
|
|
Spec: corev1.PodSpec{
|
|
Volumes: []corev1.Volume{
|
|
{
|
|
Name: "volume",
|
|
VolumeSource: corev1.VolumeSource{
|
|
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
|
ClaimName: pvc.Name,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
InitContainers: initContainers,
|
|
Containers: []corev1.Container{
|
|
{
|
|
Name: "server",
|
|
Image: image.Spec.Location,
|
|
ImagePullPolicy: corev1.PullAlways,
|
|
Command: command,
|
|
Args: args,
|
|
WorkingDir: image.Spec.WorkingDir,
|
|
Ports: ports,
|
|
VolumeMounts: []corev1.VolumeMount{{
|
|
Name: "volume",
|
|
MountPath: volumePath,
|
|
}},
|
|
Stdin: true,
|
|
TTY: true,
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
controllerutil.SetControllerReference(s, pod, r.Scheme)
|
|
return pod
|
|
}
|
|
|
|
func (r *ServerManagerReconciler) GetImage(ctx context.Context, s *servermanagerv1alpha1.ServerManager) (*servermanagerv1alpha1.Image, error) {
|
|
image := &servermanagerv1alpha1.Image{}
|
|
err := r.Get(ctx, client.ObjectKey{Name: s.Spec.Server.Image, Namespace: s.Namespace}, image)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return image, nil
|
|
}
|
|
|
|
// SetupWithManager sets up the controller with the Manager.
|
|
func (r *ServerManagerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|
return ctrl.NewControllerManagedBy(mgr).
|
|
For(&servermanagerv1alpha1.ServerManager{}).
|
|
Owns(&corev1.Pod{}).
|
|
Owns(&corev1.Service{}).
|
|
Owns(&corev1.PersistentVolumeClaim{}).
|
|
Owns(&traefikv3.IngressRoute{}).
|
|
Complete(r)
|
|
}
|