helm: don't automount Service token when integration is not enabled, improve k8s detection
Signed-off-by: Jens Langhammer <jens.langhammer@beryju.org>
This commit is contained in:
parent
12b1f53948
commit
4054e6da8c
|
@ -1,7 +1,7 @@
|
||||||
"""authentik core tasks"""
|
"""authentik core tasks"""
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
from pathlib import Path
|
from os import environ
|
||||||
|
|
||||||
from boto3.exceptions import Boto3Error
|
from boto3.exceptions import Boto3Error
|
||||||
from botocore.exceptions import BotoCoreError, ClientError
|
from botocore.exceptions import BotoCoreError, ClientError
|
||||||
|
@ -9,6 +9,7 @@ from dbbackup.db.exceptions import CommandConnectorError
|
||||||
from django.contrib.humanize.templatetags.humanize import naturaltime
|
from django.contrib.humanize.templatetags.humanize import naturaltime
|
||||||
from django.core import management
|
from django.core import management
|
||||||
from django.utils.timezone import now
|
from django.utils.timezone import now
|
||||||
|
from kubernetes.config.incluster_config import SERVICE_HOST_ENV_NAME
|
||||||
from structlog.stdlib import get_logger
|
from structlog.stdlib import get_logger
|
||||||
|
|
||||||
from authentik.core.models import ExpiringModel
|
from authentik.core.models import ExpiringModel
|
||||||
|
@ -40,9 +41,7 @@ def clean_expired_models(self: MonitoredTask):
|
||||||
def backup_database(self: MonitoredTask): # pragma: no cover
|
def backup_database(self: MonitoredTask): # pragma: no cover
|
||||||
"""Database backup"""
|
"""Database backup"""
|
||||||
self.result_timeout_hours = 25
|
self.result_timeout_hours = 25
|
||||||
if Path("/var/run/secrets/kubernetes.io").exists() and not CONFIG.y(
|
if SERVICE_HOST_ENV_NAME in environ and not CONFIG.y("postgresql.s3_backup"):
|
||||||
"postgresql.s3_backup"
|
|
||||||
):
|
|
||||||
LOGGER.info("Running in k8s and s3 backups are not configured, skipping")
|
LOGGER.info("Running in k8s and s3 backups are not configured, skipping")
|
||||||
self.set_status(
|
self.set_status(
|
||||||
TaskResult(
|
TaskResult(
|
||||||
|
|
|
@ -39,6 +39,8 @@ class AuthentikOutpostConfig(AppConfig):
|
||||||
KubernetesServiceConnection,
|
KubernetesServiceConnection,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Explicitly check against token filename, as thats
|
||||||
|
# only present when the integration is enabled
|
||||||
if Path(SERVICE_TOKEN_FILENAME).exists():
|
if Path(SERVICE_TOKEN_FILENAME).exists():
|
||||||
LOGGER.debug("Detected in-cluster Kubernetes Config")
|
LOGGER.debug("Detected in-cluster Kubernetes Config")
|
||||||
if not KubernetesServiceConnection.objects.filter(local=True).exists():
|
if not KubernetesServiceConnection.objects.filter(local=True).exists():
|
||||||
|
|
|
@ -22,6 +22,7 @@ spec:
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
k8s.goauthentik.io/component: web
|
k8s.goauthentik.io/component: web
|
||||||
spec:
|
spec:
|
||||||
|
automountServiceAccountToken: false
|
||||||
affinity:
|
affinity:
|
||||||
podAntiAffinity:
|
podAntiAffinity:
|
||||||
preferredDuringSchedulingIgnoredDuringExecution:
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
|
|
@ -24,6 +24,8 @@ spec:
|
||||||
spec:
|
spec:
|
||||||
{{- if .Values.kubernetesIntegration }}
|
{{- if .Values.kubernetesIntegration }}
|
||||||
serviceAccountName: {{ include "authentik.fullname" . }}-sa
|
serviceAccountName: {{ include "authentik.fullname" . }}-sa
|
||||||
|
{{- else }}
|
||||||
|
automountServiceAccountToken: false
|
||||||
{{- end }}
|
{{- end }}
|
||||||
affinity:
|
affinity:
|
||||||
podAntiAffinity:
|
podAntiAffinity:
|
||||||
|
|
|
@ -2,9 +2,9 @@
|
||||||
import os
|
import os
|
||||||
import warnings
|
import warnings
|
||||||
from multiprocessing import cpu_count
|
from multiprocessing import cpu_count
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import structlog
|
import structlog
|
||||||
|
from kubernetes.config.incluster_config import SERVICE_HOST_ENV_NAME
|
||||||
|
|
||||||
bind = "0.0.0.0:8000"
|
bind = "0.0.0.0:8000"
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ logconfig_dict = {
|
||||||
|
|
||||||
# if we're running in kubernetes, use fixed workers because we can scale with more pods
|
# if we're running in kubernetes, use fixed workers because we can scale with more pods
|
||||||
# otherwise (assume docker-compose), use as much as we can
|
# otherwise (assume docker-compose), use as much as we can
|
||||||
if Path("/var/run/secrets/kubernetes.io").exists():
|
if SERVICE_HOST_ENV_NAME in os.environ:
|
||||||
workers = 2
|
workers = 2
|
||||||
else:
|
else:
|
||||||
workers = cpu_count() * 2 + 1
|
workers = cpu_count() * 2 + 1
|
||||||
|
|
Reference in a new issue