diff --git a/authentik/core/tasks.py b/authentik/core/tasks.py
index d03bc7eb4..99307f8a1 100644
--- a/authentik/core/tasks.py
+++ b/authentik/core/tasks.py
@@ -1,7 +1,7 @@
 """authentik core tasks"""
 from datetime import datetime
 from io import StringIO
-from pathlib import Path
+from os import environ
 
 from boto3.exceptions import Boto3Error
 from botocore.exceptions import BotoCoreError, ClientError
@@ -9,6 +9,7 @@ from dbbackup.db.exceptions import CommandConnectorError
 from django.contrib.humanize.templatetags.humanize import naturaltime
 from django.core import management
 from django.utils.timezone import now
+from kubernetes.config.incluster_config import SERVICE_HOST_ENV_NAME
 from structlog.stdlib import get_logger
 
 from authentik.core.models import ExpiringModel
@@ -40,9 +41,7 @@ def clean_expired_models(self: MonitoredTask):
 def backup_database(self: MonitoredTask):  # pragma: no cover
     """Database backup"""
     self.result_timeout_hours = 25
-    if Path("/var/run/secrets/kubernetes.io").exists() and not CONFIG.y(
-        "postgresql.s3_backup"
-    ):
+    if SERVICE_HOST_ENV_NAME in environ and not CONFIG.y("postgresql.s3_backup"):
         LOGGER.info("Running in k8s and s3 backups are not configured, skipping")
         self.set_status(
             TaskResult(
diff --git a/authentik/outposts/apps.py b/authentik/outposts/apps.py
index 08556fd19..02cce0e16 100644
--- a/authentik/outposts/apps.py
+++ b/authentik/outposts/apps.py
@@ -39,6 +39,8 @@ class AuthentikOutpostConfig(AppConfig):
             KubernetesServiceConnection,
         )
 
+        # Explicitly check against token filename, as thats
+        # only present when the integration is enabled
         if Path(SERVICE_TOKEN_FILENAME).exists():
             LOGGER.debug("Detected in-cluster Kubernetes Config")
             if not KubernetesServiceConnection.objects.filter(local=True).exists():
diff --git a/helm/templates/web-deployment.yaml b/helm/templates/web-deployment.yaml
index 3d2bd47cb..74a9ed979 100644
--- a/helm/templates/web-deployment.yaml
+++ b/helm/templates/web-deployment.yaml
@@ -22,6 +22,7 @@ spec:
         app.kubernetes.io/instance: {{ .Release.Name }}
         k8s.goauthentik.io/component: web
     spec:
+      automountServiceAccountToken: false
       affinity:
         podAntiAffinity:
           preferredDuringSchedulingIgnoredDuringExecution:
diff --git a/helm/templates/worker-deployment.yaml b/helm/templates/worker-deployment.yaml
index 7d62c2c6b..9743b6113 100644
--- a/helm/templates/worker-deployment.yaml
+++ b/helm/templates/worker-deployment.yaml
@@ -24,6 +24,8 @@ spec:
     spec:
       {{- if .Values.kubernetesIntegration }}
       serviceAccountName: {{ include "authentik.fullname" . }}-sa
+      {{- else }}
+      automountServiceAccountToken: false
       {{- end }}
       affinity:
         podAntiAffinity:
diff --git a/lifecycle/gunicorn.conf.py b/lifecycle/gunicorn.conf.py
index 680ed43f9..e5246bc86 100644
--- a/lifecycle/gunicorn.conf.py
+++ b/lifecycle/gunicorn.conf.py
@@ -2,9 +2,9 @@
 import os
 import warnings
 from multiprocessing import cpu_count
-from pathlib import Path
 
 import structlog
+from kubernetes.config.incluster_config import SERVICE_HOST_ENV_NAME
 
 bind = "0.0.0.0:8000"
 
@@ -48,7 +48,7 @@ logconfig_dict = {
 
 # if we're running in kubernetes, use fixed workers because we can scale with more pods
 # otherwise (assume docker-compose), use as much as we can
-if Path("/var/run/secrets/kubernetes.io").exists():
+if SERVICE_HOST_ENV_NAME in os.environ:
     workers = 2
 else:
     workers = cpu_count() * 2 + 1