outposts: delete old outpost deployment when name or namespace is changed

closes #845

Signed-off-by: Jens Langhammer <jens.langhammer@beryju.org>
This commit is contained in:
Jens Langhammer 2021-05-08 16:11:38 +02:00
parent 1e303b515b
commit 6868b7722c
3 changed files with 38 additions and 8 deletions

View file

@ -60,7 +60,7 @@ class OutpostConfig:
kubernetes_replicas: int = field(default=1)
kubernetes_namespace: str = field(default="default")
kubernetes_ingress_annotations: dict[str, str] = field(default_factory=dict)
kubernetes_ingress_secret_name: str = field(default="authentik-outpost")
kubernetes_ingress_secret_name: str = field(default="authentik-outpost-tls")
kubernetes_service_type: str = field(default="ClusterIP")

View file

@ -1,15 +1,16 @@
"""authentik outpost signals"""
from django.conf import settings
from django.db.models import Model
from django.db.models.signals import post_save, pre_delete
from django.db.models.signals import post_save, pre_delete, pre_save
from django.dispatch import receiver
from structlog.stdlib import get_logger
from authentik.core.models import Provider
from authentik.crypto.models import CertificateKeyPair
from authentik.lib.utils.reflection import class_to_path
from authentik.outposts.controllers.base import ControllerException
from authentik.outposts.models import Outpost, OutpostServiceConnection
from authentik.outposts.tasks import outpost_post_save, outpost_pre_delete
from authentik.outposts.tasks import outpost_controller_down, outpost_post_save
LOGGER = get_logger()
UPDATE_TRIGGERING_MODELS = (
@ -20,6 +21,27 @@ UPDATE_TRIGGERING_MODELS = (
)
@receiver(pre_save, sender=Outpost)
# pylint: disable=unused-argument
def pre_save_outpost(sender, instance: Outpost, **_):
"""Pre-save checks for an outpost, if the name or config.kubernetes_namespace changes,
we call down and then wait for the up after save"""
old_instances = Outpost.objects.filter(pk=instance.pk)
if not old_instances.exists():
return
old_instance = old_instances.first()
dirty = False
# Name changes the deployment name, need to recreate
dirty += old_instance.name != instance.name
# namespace requires re-create
dirty += (
old_instance.config.kubernetes_namespace != instance.config.kubernetes_namespace
)
if bool(dirty):
LOGGER.info("Outpost needs re-deployment due to changes", instance=instance)
outpost_controller_down_wrapper(old_instance)
@receiver(post_save)
# pylint: disable=unused-argument
def post_save_update(sender, instance: Model, **_):
@ -41,11 +63,15 @@ def post_save_update(sender, instance: Model, **_):
def pre_delete_cleanup(sender, instance: Outpost, **_):
"""Ensure that Outpost's user is deleted (which will delete the token through cascade)"""
instance.user.delete()
# To ensure that deployment is cleaned up *consistently* we call the controller, and wait
# for it to finish. We don't want to call it in this thread, as we don't have the Outpost
# Service connection here
outpost_controller_down_wrapper(instance)
def outpost_controller_down_wrapper(instance: Outpost):
"""To ensure that deployment is cleaned up *consistently* we call the controller, and wait
for it to finish. We don't want to call it in this thread, as we don't have the Outpost
Service connection here"""
try:
outpost_pre_delete.delay(instance.pk.hex).get()
outpost_controller_down.delay(instance.pk.hex).get()
except RuntimeError: # pragma: no cover
# In e2e/integration tests, this might run inside a thread/process and
# trigger the celery `Never call result.get() within a task` detection
@ -53,3 +79,7 @@ def pre_delete_cleanup(sender, instance: Outpost, **_):
pass
else:
raise
except ControllerException as exc:
LOGGER.warning(
"failed to cleanup outpost deployment", exc=exc, instance=instance
)

View file

@ -111,7 +111,7 @@ def outpost_controller(self: MonitoredTask, outpost_pk: str):
@CELERY_APP.task()
def outpost_pre_delete(outpost_pk: str):
def outpost_controller_down(outpost_pk: str):
"""Delete outpost objects before deleting the DB Object"""
outpost = Outpost.objects.get(pk=outpost_pk)
controller = controller_for_outpost(outpost)