added working browser with proxy
This commit is contained in:
parent
27d24e5b1d
commit
df8facd70b
@ -29,6 +29,12 @@ type BrowserStatus struct {
|
||||
Running bool `json:"running,omitempty"`
|
||||
}
|
||||
|
||||
type PortMapping struct {
|
||||
Protocol corev1.Protocol `json:"protocol,omitempty"`
|
||||
HostPort int32 `json:"host_port,omitempty"`
|
||||
TargetPort int32 `json:"target_port,omitempty"`
|
||||
}
|
||||
|
||||
type Port struct {
|
||||
Protocol corev1.Protocol `json:"protocol,omitempty"`
|
||||
Port int32 `json:"port,omitempty"`
|
||||
@ -37,7 +43,7 @@ type Port struct {
|
||||
type ServerStatus struct {
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Running bool `json:"running,omitempty"`
|
||||
HostPorts []Port `json:"host_ports,omitempty"`
|
||||
HostPorts []PortMapping `json:"host_ports,omitempty"`
|
||||
}
|
||||
|
||||
type ServerSpec struct {
|
||||
@ -55,7 +61,6 @@ type BrowserSpec struct {
|
||||
|
||||
// ServerManagerSpec defines the desired state of ServerManager
|
||||
type ServerManagerSpec struct {
|
||||
Id string `json:"id,omitempty"`
|
||||
Storage string `json:"storage,omitempty"`
|
||||
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
|
@ -27,7 +27,6 @@ import (
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BrowserSpec) DeepCopyInto(out *BrowserSpec) {
|
||||
*out = *in
|
||||
out.Port = in.Port
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrowserSpec.
|
||||
@ -70,6 +69,21 @@ func (in *Port) DeepCopy() *Port {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PortMapping) DeepCopyInto(out *PortMapping) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortMapping.
|
||||
func (in *PortMapping) DeepCopy() *PortMapping {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PortMapping)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServerManager) DeepCopyInto(out *ServerManager) {
|
||||
*out = *in
|
||||
@ -198,7 +212,7 @@ func (in *ServerStatus) DeepCopyInto(out *ServerStatus) {
|
||||
*out = *in
|
||||
if in.HostPorts != nil {
|
||||
in, out := &in.HostPorts, &out.HostPorts
|
||||
*out = make([]Port, len(*in))
|
||||
*out = make([]PortMapping, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
21
config.yaml
21
config.yaml
@ -1,2 +1,23 @@
|
||||
domain_label: "ddns.acooldomain.co/hostname"
|
||||
default_domain: "acooldomain.co"
|
||||
browser:
|
||||
domain: games.acooldomain.co
|
||||
sub_path: /browsers
|
||||
auth_header: x-authentik-username
|
||||
cert_resolver: letsencrypt
|
||||
entrypoints:
|
||||
- websecure
|
||||
|
||||
additional_routes:
|
||||
- kind: Rule
|
||||
match: "Host(`games.acooldomain.co`) && PathPrefix(`/outpost.goauthentik.io/`)"
|
||||
priority: 15
|
||||
services:
|
||||
- kind: Service
|
||||
name: ak-outpost-traefik
|
||||
namespace: authentik
|
||||
port: 9000
|
||||
|
||||
middleware:
|
||||
name: authentik
|
||||
namespace: authentik
|
||||
|
@ -41,23 +41,9 @@ spec:
|
||||
properties:
|
||||
browser:
|
||||
properties:
|
||||
image:
|
||||
type: string
|
||||
"on":
|
||||
type: boolean
|
||||
port:
|
||||
properties:
|
||||
port:
|
||||
format: int32
|
||||
type: integer
|
||||
protocol:
|
||||
description: Protocol defines network protocols supported
|
||||
for things like container ports.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
id:
|
||||
type: string
|
||||
server:
|
||||
description: |-
|
||||
INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||
@ -113,13 +99,16 @@ spec:
|
||||
host_ports:
|
||||
items:
|
||||
properties:
|
||||
port:
|
||||
host_port:
|
||||
format: int32
|
||||
type: integer
|
||||
protocol:
|
||||
description: Protocol defines network protocols supported
|
||||
for things like container ports.
|
||||
type: string
|
||||
target_port:
|
||||
format: int32
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
running:
|
||||
|
@ -0,0 +1,51 @@
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: ClusterServiceVersion
|
||||
metadata:
|
||||
annotations:
|
||||
alm-examples: '[]'
|
||||
capabilities: Basic Install
|
||||
name: kubernetes-operator.v0.0.0
|
||||
namespace: placeholder
|
||||
spec:
|
||||
apiservicedefinitions: {}
|
||||
customresourcedefinitions:
|
||||
owned:
|
||||
- description: ServerManager is the Schema for the servermanagers API
|
||||
displayName: Server Manager
|
||||
kind: ServerManager
|
||||
name: servermanagers.server-manager.acooldomain.co
|
||||
version: v1alpha1
|
||||
description: An operator that manages game servers and relevant resources
|
||||
displayName: server-manager-operator
|
||||
icon:
|
||||
- base64data: ""
|
||||
mediatype: ""
|
||||
install:
|
||||
spec:
|
||||
deployments: null
|
||||
strategy: ""
|
||||
installModes:
|
||||
- supported: false
|
||||
type: OwnNamespace
|
||||
- supported: false
|
||||
type: SingleNamespace
|
||||
- supported: false
|
||||
type: MultiNamespace
|
||||
- supported: true
|
||||
type: AllNamespaces
|
||||
keywords:
|
||||
- game-servers
|
||||
- filebrowser
|
||||
- manager
|
||||
- games
|
||||
links:
|
||||
- name: Kubernetes Operator
|
||||
url: https://kubernetes-operator.domain
|
||||
maintainers:
|
||||
- email: acoolname@acooldomain.co
|
||||
name: acoolname
|
||||
maturity: alpha
|
||||
provider:
|
||||
name: server-manager-provider
|
||||
url: https://git.acooldomain.co/server-manager/kubernetes-operator
|
||||
version: 0.0.0
|
@ -4,6 +4,24 @@ kind: ClusterRole
|
||||
metadata:
|
||||
name: manager-role
|
||||
rules:
|
||||
- resources:
|
||||
- persistentvolumeclaims
|
||||
- services
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- resources:
|
||||
- pods
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- server-manager.acooldomain.co
|
||||
resources:
|
||||
@ -30,3 +48,15 @@ rules:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- traefik.io
|
||||
resources:
|
||||
- ingressroutes
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
|
62
config/role/role.yaml
Normal file
62
config/role/role.yaml
Normal file
@ -0,0 +1,62 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: manager-role
|
||||
rules:
|
||||
- resources:
|
||||
- persistentvolumeclaims
|
||||
- services
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- resources:
|
||||
- pods
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- server-manager.acooldomain.co
|
||||
resources:
|
||||
- servermanagers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- server-manager.acooldomain.co
|
||||
resources:
|
||||
- servermanagers/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- server-manager.acooldomain.co
|
||||
resources:
|
||||
- servermanagers/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- traefik.io
|
||||
resources:
|
||||
- ingressroutes
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
@ -6,8 +6,9 @@ metadata:
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: servermanager-sample
|
||||
spec:
|
||||
id: test-serverr
|
||||
storage: 10Gi
|
||||
browser:
|
||||
"on": true
|
||||
server:
|
||||
"on": true
|
||||
image: git.acooldomain.co/server-manager/minecraft:paper-1.21.4
|
||||
|
@ -42,12 +42,41 @@ type MiddlewareRef struct {
|
||||
Namespace string `yaml:"namespace"`
|
||||
}
|
||||
|
||||
type TraefikService struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind,omitempty"`
|
||||
Namespace string `yaml:"namespace,omitempty"`
|
||||
Port intstr.IntOrString `yaml:"port,omitempty"`
|
||||
Scheme string `yaml:"scheme,omitempty"`
|
||||
Strategy string `yaml:"strategy,omitempty"`
|
||||
PassHostHeader *bool `yaml:"passHostHeader,omitempty"`
|
||||
ServersTransport string `yaml:"serversTransport,omitempty"`
|
||||
Weight *int `yaml:"weight,omitempty"`
|
||||
NativeLB *bool `yaml:"nativeLB,omitempty"`
|
||||
NodePortLB bool `yaml:"nodePortLB,omitempty"`
|
||||
}
|
||||
|
||||
type TraefikRoute struct {
|
||||
Match string `yaml:"match"`
|
||||
Kind string `yaml:"kind,omitempty"`
|
||||
Priority int `yaml:"priority,omitempty"`
|
||||
Syntax string `yaml:"syntax,omitempty"`
|
||||
Services []TraefikService `yaml:"services,omitempty"`
|
||||
Middlewares []MiddlewareRef `yaml:"middlewares,omitempty"`
|
||||
}
|
||||
|
||||
type BrowserConfig struct {
|
||||
Middleware MiddlewareRef `yaml:"middleware"`
|
||||
AuthHeader string `yaml:"auth_header"`
|
||||
AdditionalRoutes []TraefikRoute `yaml:"additional_routes"`
|
||||
Entrypoints []string `yaml:"entrypoints"`
|
||||
CertResolver string `yaml:"cert_resolver"`
|
||||
Domain string `yaml:"domain"`
|
||||
SubPath string `yaml:"sub_path"`
|
||||
}
|
||||
|
||||
type ServerManagerReconcilerConfig struct {
|
||||
Browser BrowserConfig `yaml:"browser_middleware"`
|
||||
Browser BrowserConfig `yaml:"browser"`
|
||||
DomainLabel string `yaml:"domain_label"`
|
||||
DefaultDomain string `yaml:"default_domain"`
|
||||
}
|
||||
@ -61,7 +90,11 @@ type ServerManagerReconciler struct {
|
||||
|
||||
// +kubebuilder:rbac:groups=server-manager.acooldomain.co,resources=servermanagers,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=server-manager.acooldomain.co,resources=servermanagers/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=traefik.io,resources=ingressroutes,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=,resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=server-manager.acooldomain.co,resources=servermanagers/finalizers,verbs=update
|
||||
// +kubebuilder:rbac:groups=,resources=services,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=,resources=pods,verbs=get;list;watch;create
|
||||
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
@ -75,6 +108,7 @@ type ServerManagerReconciler struct {
|
||||
func (r *ServerManagerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logging := log.FromContext(ctx)
|
||||
s := &servermanagerv1alpha1.ServerManager{}
|
||||
statusChanged := false
|
||||
|
||||
err := r.Get(ctx, req.NamespacedName, s)
|
||||
if err != nil {
|
||||
@ -91,6 +125,7 @@ func (r *ServerManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reques
|
||||
if errors.IsNotFound(err) {
|
||||
err = r.Create(ctx, pvc)
|
||||
if err != nil {
|
||||
logging.Info("creating server pvc")
|
||||
return ctrl.Result{Requeue: true}, err
|
||||
}
|
||||
} else {
|
||||
@ -99,32 +134,66 @@ func (r *ServerManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reques
|
||||
}
|
||||
logging.Info("verified pvc")
|
||||
|
||||
serverPod := r.ServerPod(s, pvc)
|
||||
found := &corev1.Pod{}
|
||||
err = r.Get(ctx, client.ObjectKey{Namespace: pvc.Namespace, Name: pvc.Name}, found)
|
||||
if err == nil && !s.Spec.Server.On {
|
||||
err = r.Delete(ctx, serverPod)
|
||||
return ctrl.Result{Requeue: true}, err
|
||||
}
|
||||
|
||||
browserPvc := r.BrowserPvc(s)
|
||||
foundBrowserPvc := &corev1.PersistentVolumeClaim{}
|
||||
err = r.Get(ctx, client.ObjectKey{Namespace: browserPvc.Namespace, Name: browserPvc.Name}, foundBrowserPvc)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
if s.Spec.Server.On {
|
||||
err = r.Create(ctx, serverPod)
|
||||
err = r.Create(ctx, browserPvc)
|
||||
if err != nil {
|
||||
logging.Info("creating server browserPvc")
|
||||
return ctrl.Result{Requeue: true}, err
|
||||
}
|
||||
} else {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
logging.Info("verified browserPvc")
|
||||
|
||||
logging.Info("verified pod")
|
||||
|
||||
if found.Spec.NodeName == "" {
|
||||
logging.Info("waiting for pod to start 2")
|
||||
return ctrl.Result{RequeueAfter: time.Second * 10}, nil
|
||||
serverPod := r.ServerPod(s, pvc)
|
||||
found := &corev1.Pod{}
|
||||
err = r.Get(ctx, client.ObjectKey{Namespace: pvc.Namespace, Name: pvc.Name}, found)
|
||||
if err == nil && !s.Spec.Server.On {
|
||||
logging.Info("deleting server pod")
|
||||
err = r.Delete(ctx, found)
|
||||
return ctrl.Result{Requeue: true}, err
|
||||
}
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if errors.IsNotFound(err) && s.Spec.Server.On {
|
||||
logging.Info("creating server pod")
|
||||
err = r.Create(ctx, serverPod)
|
||||
return ctrl.Result{Requeue: true}, err
|
||||
}
|
||||
|
||||
if s.Spec.Server.On {
|
||||
switch found.Status.Phase {
|
||||
case corev1.PodPending:
|
||||
logging.Info("Waiting for server pod to start")
|
||||
return ctrl.Result{RequeueAfter: time.Second * 10}, nil
|
||||
case corev1.PodRunning:
|
||||
if !s.Status.Server.Running {
|
||||
s.Status.Server.Running = true
|
||||
statusChanged = true
|
||||
}
|
||||
default:
|
||||
if s.Status.Server.Running {
|
||||
s.Status.Server.Running = false
|
||||
statusChanged = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if errors.IsNotFound(err) && !s.Spec.Server.On {
|
||||
if s.Status.Server.Running {
|
||||
s.Status.Server.Running = false
|
||||
statusChanged = true
|
||||
}
|
||||
}
|
||||
|
||||
logging.Info("verified pod")
|
||||
if s.Status.Server.Running {
|
||||
domain := r.Config.DefaultDomain
|
||||
if r.Config.DomainLabel != "" {
|
||||
node := &corev1.Node{}
|
||||
@ -140,38 +209,274 @@ func (r *ServerManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reques
|
||||
}
|
||||
}
|
||||
|
||||
logging.Info("got domain", "domain", domain)
|
||||
|
||||
if domain != s.Status.Server.Domain {
|
||||
logging.Info("got domain", "domain", domain)
|
||||
s.Status.Server.Domain = domain
|
||||
logging.Info("updating ServerManager object", "NewDomain", domain, "OldDomain", s.Status.Server.Domain)
|
||||
err = r.Status().Update(ctx, s)
|
||||
logging.Info(fmt.Sprintf("%#v", err))
|
||||
return ctrl.Result{}, err
|
||||
statusChanged = true
|
||||
}
|
||||
}
|
||||
|
||||
service := r.ServerService(s)
|
||||
foundService := &corev1.Service{}
|
||||
err = r.Get(ctx, client.ObjectKeyFromObject(service), foundService)
|
||||
if err == nil && !s.Spec.Server.On {
|
||||
err = r.Delete(ctx, service)
|
||||
logging.Info("deleting server service")
|
||||
err = r.Delete(ctx, foundService)
|
||||
return ctrl.Result{Requeue: true}, err
|
||||
}
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if s.Spec.Server.On {
|
||||
if errors.IsNotFound(err) && s.Spec.Server.On {
|
||||
logging.Info("creating server service")
|
||||
err = r.Create(ctx, service)
|
||||
return ctrl.Result{Requeue: true}, err
|
||||
}
|
||||
|
||||
if s.Spec.Server.On {
|
||||
if len(s.Status.Server.HostPorts) == 0 {
|
||||
hostPorts := make([]servermanagerv1alpha1.PortMapping, len(foundService.Spec.Ports))
|
||||
for i, port := range foundService.Spec.Ports {
|
||||
if port.NodePort == 0 {
|
||||
logging.Info("Cluster didn't assign ports yet, waiting")
|
||||
return ctrl.Result{RequeueAfter: time.Second * 10}, nil
|
||||
}
|
||||
|
||||
hostPorts[i] = servermanagerv1alpha1.PortMapping{
|
||||
Protocol: port.Protocol,
|
||||
HostPort: port.NodePort,
|
||||
TargetPort: port.TargetPort.IntVal,
|
||||
}
|
||||
}
|
||||
logging.Info("Found service mapping", "mapping", hostPorts)
|
||||
|
||||
s.Status.Server.HostPorts = hostPorts
|
||||
statusChanged = true
|
||||
}
|
||||
}
|
||||
logging.Info("verified server service")
|
||||
|
||||
browserPod := r.BrowserPod(s, pvc, browserPvc)
|
||||
foundBrowser := &corev1.Pod{}
|
||||
err = r.Get(ctx, client.ObjectKeyFromObject(browserPod), foundBrowser)
|
||||
if err == nil && !s.Spec.Browser.On {
|
||||
logging.Info("deleting browser pod")
|
||||
err = r.Delete(ctx, foundBrowser)
|
||||
return ctrl.Result{Requeue: true}, err
|
||||
}
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if errors.IsNotFound(err) && s.Spec.Browser.On {
|
||||
logging.Info("creating browser pod")
|
||||
err = r.Create(ctx, browserPod)
|
||||
return ctrl.Result{Requeue: true}, err
|
||||
}
|
||||
if s.Spec.Browser.On {
|
||||
switch foundBrowser.Status.Phase {
|
||||
case corev1.PodPending:
|
||||
logging.Info("Waiting for browser pod to start")
|
||||
return ctrl.Result{RequeueAfter: time.Second * 10}, nil
|
||||
case corev1.PodRunning:
|
||||
if !s.Status.Browser.Running {
|
||||
s.Status.Browser.Running = true
|
||||
statusChanged = true
|
||||
}
|
||||
default:
|
||||
if s.Status.Browser.Running {
|
||||
s.Status.Browser.Running = false
|
||||
statusChanged = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if errors.IsNotFound(err) && !s.Spec.Server.On {
|
||||
if s.Status.Browser.Running {
|
||||
s.Status.Browser.Running = false
|
||||
statusChanged = true
|
||||
}
|
||||
}
|
||||
logging.Info("verified browser pod")
|
||||
|
||||
browserService := r.BrowserService(s)
|
||||
foundService = &corev1.Service{}
|
||||
err = r.Get(ctx, client.ObjectKeyFromObject(browserService), foundService)
|
||||
if err == nil && !s.Spec.Browser.On {
|
||||
logging.Info("deleting browser service")
|
||||
err = r.Delete(ctx, foundService)
|
||||
return ctrl.Result{Requeue: true}, err
|
||||
}
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if errors.IsNotFound(err) && s.Spec.Browser.On {
|
||||
logging.Info("creating browser service")
|
||||
err = r.Create(ctx, browserService)
|
||||
return ctrl.Result{Requeue: true}, err
|
||||
}
|
||||
logging.Info("verified browser service")
|
||||
|
||||
browserIngress := r.BrowserIngress(s, browserService)
|
||||
foundIngress := &traefikv3.IngressRoute{}
|
||||
err = r.Get(ctx, client.ObjectKeyFromObject(browserIngress), foundIngress)
|
||||
if err == nil && !s.Spec.Browser.On {
|
||||
logging.Info("deleting browser ingress")
|
||||
err = r.Delete(ctx, foundIngress)
|
||||
return ctrl.Result{Requeue: true}, err
|
||||
}
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if errors.IsNotFound(err) && s.Spec.Browser.On {
|
||||
logging.Info("creating browser ingress")
|
||||
err = r.Create(ctx, browserIngress)
|
||||
return ctrl.Result{Requeue: true}, err
|
||||
}
|
||||
|
||||
logging.Info("verified browser ingress")
|
||||
|
||||
browserUrl := r.GenerateBrowserUrl(s)
|
||||
if s.Spec.Browser.On {
|
||||
s.Status.Browser.Url = browserUrl
|
||||
statusChanged = true
|
||||
}
|
||||
|
||||
if statusChanged {
|
||||
logging.Info("Updating ServerManager")
|
||||
err = r.Status().Update(ctx, s)
|
||||
return ctrl.Result{Requeue: true}, err
|
||||
}
|
||||
logging.Info(fmt.Sprintf("verified service %#v", foundService))
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *ServerManagerReconciler) BrowserPod(s *servermanagerv1alpha1.ServerManager, pvc *corev1.PersistentVolumeClaim) *corev1.Pod {
|
||||
func (r *ServerManagerReconciler) GenerateBrowserUrl(s *servermanagerv1alpha1.ServerManager) string {
|
||||
return fmt.Sprintf("%s%s", r.Config.Browser.Domain, r.GenerateBrowserSubPath(s))
|
||||
}
|
||||
|
||||
func (r *ServerManagerReconciler) GenerateBrowserSubPath(s *servermanagerv1alpha1.ServerManager) string {
|
||||
if r.Config.Browser.SubPath == "" {
|
||||
return fmt.Sprintf("%s/%s/%s", r.Config.Browser.SubPath, s.Namespace, s.Name)
|
||||
} else {
|
||||
return fmt.Sprintf("/%s/%s", s.Namespace, s.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ServerManagerReconciler) BrowserPvc(s *servermanagerv1alpha1.ServerManager) *corev1.PersistentVolumeClaim {
|
||||
pvc := &corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-browser", s.Name),
|
||||
Namespace: s.Namespace,
|
||||
},
|
||||
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany},
|
||||
Resources: corev1.VolumeResourceRequirements{
|
||||
Requests: corev1.ResourceList{corev1.ResourceStorage: resource.MustParse(s.Spec.Storage)},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
controllerutil.SetControllerReference(s, pvc, r.Scheme)
|
||||
|
||||
return pvc
|
||||
}
|
||||
|
||||
func (r *ServerManagerReconciler) BrowserIngress(s *servermanagerv1alpha1.ServerManager, service *corev1.Service) *traefikv3.IngressRoute {
|
||||
routes := make([]traefikv3.Route, len(r.Config.Browser.AdditionalRoutes)+1)
|
||||
|
||||
for i, route := range r.Config.Browser.AdditionalRoutes {
|
||||
services := make([]traefikv3.Service, len(route.Services))
|
||||
for j, service := range route.Services {
|
||||
services[j] = traefikv3.Service{
|
||||
LoadBalancerSpec: traefikv3.LoadBalancerSpec{
|
||||
Name: service.Name,
|
||||
Kind: service.Kind,
|
||||
Namespace: service.Namespace,
|
||||
Port: service.Port,
|
||||
Scheme: service.Scheme,
|
||||
Strategy: service.Strategy,
|
||||
PassHostHeader: service.PassHostHeader,
|
||||
ServersTransport: service.ServersTransport,
|
||||
Weight: service.Weight,
|
||||
NativeLB: service.NativeLB,
|
||||
NodePortLB: service.NodePortLB,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
middlewares := make([]traefikv3.MiddlewareRef, len(route.Middlewares))
|
||||
for j, middleware := range route.Middlewares {
|
||||
middlewares[j] = traefikv3.MiddlewareRef{
|
||||
Name: middleware.Name,
|
||||
Namespace: middleware.Namespace,
|
||||
}
|
||||
}
|
||||
|
||||
routes[i] = traefikv3.Route{
|
||||
Match: route.Match,
|
||||
Kind: route.Kind,
|
||||
Priority: route.Priority,
|
||||
Syntax: route.Syntax,
|
||||
Services: services,
|
||||
Middlewares: middlewares,
|
||||
}
|
||||
}
|
||||
|
||||
routes[len(routes)-1] = traefikv3.Route{
|
||||
Match: fmt.Sprintf("Host(`%s`) && PathPrefix(`%s`)", r.Config.Browser.Domain, r.GenerateBrowserSubPath(s)),
|
||||
Kind: "Rule",
|
||||
Services: []traefikv3.Service{
|
||||
{
|
||||
LoadBalancerSpec: traefikv3.LoadBalancerSpec{
|
||||
Name: service.Name,
|
||||
Namespace: service.Namespace,
|
||||
Port: intstr.FromInt(80),
|
||||
},
|
||||
},
|
||||
},
|
||||
Middlewares: []traefikv3.MiddlewareRef{
|
||||
traefikv3.MiddlewareRef(r.Config.Browser.Middleware),
|
||||
},
|
||||
}
|
||||
|
||||
ingressRoute := &traefikv3.IngressRoute{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: s.Name,
|
||||
Namespace: s.Namespace,
|
||||
},
|
||||
Spec: traefikv3.IngressRouteSpec{
|
||||
EntryPoints: r.Config.Browser.Entrypoints,
|
||||
Routes: routes,
|
||||
TLS: &traefikv3.TLS{
|
||||
CertResolver: r.Config.Browser.CertResolver,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
controllerutil.SetControllerReference(s, ingressRoute, r.Scheme)
|
||||
|
||||
return ingressRoute
|
||||
}
|
||||
|
||||
func (r *ServerManagerReconciler) BrowserService(s *servermanagerv1alpha1.ServerManager) *corev1.Service {
|
||||
service := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-browser", s.Name),
|
||||
Namespace: s.Namespace,
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Type: corev1.ServiceTypeNodePort,
|
||||
Ports: []corev1.ServicePort{{Name: "web", Protocol: corev1.ProtocolTCP, Port: 80, TargetPort: intstr.FromInt(80)}},
|
||||
Selector: map[string]string{
|
||||
"browser": s.Name,
|
||||
},
|
||||
},
|
||||
}
|
||||
controllerutil.SetControllerReference(s, service, r.Scheme)
|
||||
return service
|
||||
}
|
||||
|
||||
func (r *ServerManagerReconciler) BrowserPod(s *servermanagerv1alpha1.ServerManager, pvc *corev1.PersistentVolumeClaim, browserPvc *corev1.PersistentVolumeClaim) *corev1.Pod {
|
||||
ports := make([]corev1.ContainerPort, len(s.Spec.Server.Ports))
|
||||
|
||||
for i, port := range s.Spec.Server.Ports {
|
||||
@ -185,7 +490,7 @@ func (r *ServerManagerReconciler) BrowserPod(s *servermanagerv1alpha1.ServerMana
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-browser", s.Name),
|
||||
Namespace: s.Namespace,
|
||||
Labels: map[string]string{"server": s.Name},
|
||||
Labels: map[string]string{"browser": s.Name},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Volumes: []corev1.Volume{
|
||||
@ -197,34 +502,53 @@ func (r *ServerManagerReconciler) BrowserPod(s *servermanagerv1alpha1.ServerMana
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "browser-volume",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: browserPvc.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
InitContainers: []corev1.Container{
|
||||
{
|
||||
Name: "proxy-setter",
|
||||
Image: "filebrowser/filebrowser",
|
||||
ImagePullPolicy: corev1.PullAlways,
|
||||
WorkingDir: s.Spec.Server.WorkingDir,
|
||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||
Ports: ports,
|
||||
Args: []string{},
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", fmt.Sprintf("rm /tmp/database/filebrowser.db; /filebrowser config init -d /tmp/database/filebrowser.db && /filebrowser config set --auth.method=proxy --auth.header=%s -d /tmp/database/filebrowser.db", r.Config.Browser.AuthHeader)},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "browser-volume",
|
||||
MountPath: "/tmp/database",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "browser",
|
||||
Image: "filebrowser/filebrowser",
|
||||
ImagePullPolicy: corev1.PullAlways,
|
||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||
Ports: ports,
|
||||
Args: []string{},
|
||||
VolumeMounts: []corev1.VolumeMount{{
|
||||
Args: []string{"-d", "/tmp/database/filebrowser.db", "-r", "/tmp/data", "-b", r.GenerateBrowserSubPath(s)},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "volume",
|
||||
MountPath: s.Spec.Server.WorkingDir,
|
||||
}},
|
||||
Stdin: true,
|
||||
TTY: true,
|
||||
MountPath: "/tmp/data",
|
||||
},
|
||||
{
|
||||
Name: "browser-volume",
|
||||
MountPath: "/tmp/database",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
controllerutil.SetControllerReference(s, pod, r.Scheme)
|
||||
return pod
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user