Merge branch 'main' into dev
* main: (24 commits) internal: remove special route for /outpost.goauthentik.io (#7539) providers/proxy: Fix duplicate cookies when using file system store. (#7541) web: bump API Client version (#7543) sources/ldap: add check command to verify ldap connectivity (#7263) internal: remove deprecated metrics (#7540) core: compile backend translations (#7538) web: bump prettier from 3.0.3 to 3.1.0 in /web (#7528) web: bump @trivago/prettier-plugin-sort-imports from 4.2.1 to 4.3.0 in /web (#7531) web: bump rollup from 4.3.0 to 4.4.0 in /web (#7529) core: bump celery from 5.3.4 to 5.3.5 (#7536) web: bump @formatjs/intl-listformat from 7.5.1 to 7.5.2 in /web (#7530) web: bump prettier from 3.0.3 to 3.1.0 in /tests/wdio (#7532) web: bump @trivago/prettier-plugin-sort-imports from 4.2.1 to 4.3.0 in /tests/wdio (#7533) website: bump prettier from 3.0.3 to 3.1.0 in /website (#7534) website: bump prism-react-renderer from 2.1.0 to 2.2.0 in /website (#7535) translate: Updates for file locale/en/LC_MESSAGES/django.po in zh_TW (#7537) root: Restructure broker / cache / channel / result configuration (#7097) core: bump twilio from 8.10.0 to 8.10.1 (#7474) web: bump axios from 1.5.0 to 1.6.1 in /web (#7518) web: bump wdio-wait-for from 3.0.7 to 3.0.8 in /tests/wdio (#7514) ...
This commit is contained in:
commit
2aed74bd9f
|
@ -93,10 +93,10 @@ class ConfigView(APIView):
|
||||||
"traces_sample_rate": float(CONFIG.get("error_reporting.sample_rate", 0.4)),
|
"traces_sample_rate": float(CONFIG.get("error_reporting.sample_rate", 0.4)),
|
||||||
},
|
},
|
||||||
"capabilities": self.get_capabilities(),
|
"capabilities": self.get_capabilities(),
|
||||||
"cache_timeout": CONFIG.get_int("redis.cache_timeout"),
|
"cache_timeout": CONFIG.get_int("cache.timeout"),
|
||||||
"cache_timeout_flows": CONFIG.get_int("redis.cache_timeout_flows"),
|
"cache_timeout_flows": CONFIG.get_int("cache.timeout_flows"),
|
||||||
"cache_timeout_policies": CONFIG.get_int("redis.cache_timeout_policies"),
|
"cache_timeout_policies": CONFIG.get_int("cache.timeout_policies"),
|
||||||
"cache_timeout_reputation": CONFIG.get_int("redis.cache_timeout_reputation"),
|
"cache_timeout_reputation": CONFIG.get_int("cache.timeout_reputation"),
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ PLAN_CONTEXT_SOURCE = "source"
|
||||||
# Is set by the Flow Planner when a FlowToken was used, and the currently active flow plan
|
# Is set by the Flow Planner when a FlowToken was used, and the currently active flow plan
|
||||||
# was restored.
|
# was restored.
|
||||||
PLAN_CONTEXT_IS_RESTORED = "is_restored"
|
PLAN_CONTEXT_IS_RESTORED = "is_restored"
|
||||||
CACHE_TIMEOUT = CONFIG.get_int("redis.cache_timeout_flows")
|
CACHE_TIMEOUT = CONFIG.get_int("cache.timeout_flows")
|
||||||
CACHE_PREFIX = "goauthentik.io/flows/planner/"
|
CACHE_PREFIX = "goauthentik.io/flows/planner/"
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
"""authentik core config loader"""
|
"""authentik core config loader"""
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
from collections.abc import Mapping
|
from collections.abc import Mapping
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
|
@ -22,6 +24,25 @@ SEARCH_PATHS = ["authentik/lib/default.yml", "/etc/authentik/config.yml", ""] +
|
||||||
ENV_PREFIX = "AUTHENTIK"
|
ENV_PREFIX = "AUTHENTIK"
|
||||||
ENVIRONMENT = os.getenv(f"{ENV_PREFIX}_ENV", "local")
|
ENVIRONMENT = os.getenv(f"{ENV_PREFIX}_ENV", "local")
|
||||||
|
|
||||||
|
REDIS_ENV_KEYS = [
|
||||||
|
f"{ENV_PREFIX}_REDIS__HOST",
|
||||||
|
f"{ENV_PREFIX}_REDIS__PORT",
|
||||||
|
f"{ENV_PREFIX}_REDIS__DB",
|
||||||
|
f"{ENV_PREFIX}_REDIS__USERNAME",
|
||||||
|
f"{ENV_PREFIX}_REDIS__PASSWORD",
|
||||||
|
f"{ENV_PREFIX}_REDIS__TLS",
|
||||||
|
f"{ENV_PREFIX}_REDIS__TLS_REQS",
|
||||||
|
]
|
||||||
|
|
||||||
|
DEPRECATIONS = {
|
||||||
|
"redis.broker_url": "broker.url",
|
||||||
|
"redis.broker_transport_options": "broker.transport_options",
|
||||||
|
"redis.cache_timeout": "cache.timeout",
|
||||||
|
"redis.cache_timeout_flows": "cache.timeout_flows",
|
||||||
|
"redis.cache_timeout_policies": "cache.timeout_policies",
|
||||||
|
"redis.cache_timeout_reputation": "cache.timeout_reputation",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def get_path_from_dict(root: dict, path: str, sep=".", default=None) -> Any:
|
def get_path_from_dict(root: dict, path: str, sep=".", default=None) -> Any:
|
||||||
"""Recursively walk through `root`, checking each part of `path` separated by `sep`.
|
"""Recursively walk through `root`, checking each part of `path` separated by `sep`.
|
||||||
|
@ -81,6 +102,10 @@ class AttrEncoder(JSONEncoder):
|
||||||
return super().default(o)
|
return super().default(o)
|
||||||
|
|
||||||
|
|
||||||
|
class UNSET:
|
||||||
|
"""Used to test whether configuration key has not been set."""
|
||||||
|
|
||||||
|
|
||||||
class ConfigLoader:
|
class ConfigLoader:
|
||||||
"""Search through SEARCH_PATHS and load configuration. Environment variables starting with
|
"""Search through SEARCH_PATHS and load configuration. Environment variables starting with
|
||||||
`ENV_PREFIX` are also applied.
|
`ENV_PREFIX` are also applied.
|
||||||
|
@ -113,6 +138,40 @@ class ConfigLoader:
|
||||||
self.update_from_file(env_file)
|
self.update_from_file(env_file)
|
||||||
self.update_from_env()
|
self.update_from_env()
|
||||||
self.update(self.__config, kwargs)
|
self.update(self.__config, kwargs)
|
||||||
|
self.check_deprecations()
|
||||||
|
|
||||||
|
def check_deprecations(self):
|
||||||
|
"""Warn if any deprecated configuration options are used"""
|
||||||
|
|
||||||
|
def _pop_deprecated_key(current_obj, dot_parts, index):
|
||||||
|
"""Recursive function to remove deprecated keys in configuration"""
|
||||||
|
dot_part = dot_parts[index]
|
||||||
|
if index == len(dot_parts) - 1:
|
||||||
|
return current_obj.pop(dot_part)
|
||||||
|
value = _pop_deprecated_key(current_obj[dot_part], dot_parts, index + 1)
|
||||||
|
if not current_obj[dot_part]:
|
||||||
|
current_obj.pop(dot_part)
|
||||||
|
return value
|
||||||
|
|
||||||
|
for deprecation, replacement in DEPRECATIONS.items():
|
||||||
|
if self.get(deprecation, default=UNSET) is not UNSET:
|
||||||
|
message = (
|
||||||
|
f"'{deprecation}' has been deprecated in favor of '{replacement}'! "
|
||||||
|
+ "Please update your configuration."
|
||||||
|
)
|
||||||
|
self.log(
|
||||||
|
"warning",
|
||||||
|
message,
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
from authentik.events.models import Event, EventAction
|
||||||
|
|
||||||
|
Event.new(EventAction.CONFIGURATION_ERROR, message=message).save()
|
||||||
|
except ImportError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
deprecated_attr = _pop_deprecated_key(self.__config, deprecation.split("."), 0)
|
||||||
|
self.set(replacement, deprecated_attr.value)
|
||||||
|
|
||||||
def log(self, level: str, message: str, **kwargs):
|
def log(self, level: str, message: str, **kwargs):
|
||||||
"""Custom Log method, we want to ensure ConfigLoader always logs JSON even when
|
"""Custom Log method, we want to ensure ConfigLoader always logs JSON even when
|
||||||
|
@ -180,6 +239,10 @@ class ConfigLoader:
|
||||||
error=str(exc),
|
error=str(exc),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def update_from_dict(self, update: dict):
|
||||||
|
"""Update config from dict"""
|
||||||
|
self.__config.update(update)
|
||||||
|
|
||||||
def update_from_env(self):
|
def update_from_env(self):
|
||||||
"""Check environment variables"""
|
"""Check environment variables"""
|
||||||
outer = {}
|
outer = {}
|
||||||
|
@ -188,19 +251,13 @@ class ConfigLoader:
|
||||||
if not key.startswith(ENV_PREFIX):
|
if not key.startswith(ENV_PREFIX):
|
||||||
continue
|
continue
|
||||||
relative_key = key.replace(f"{ENV_PREFIX}_", "", 1).replace("__", ".").lower()
|
relative_key = key.replace(f"{ENV_PREFIX}_", "", 1).replace("__", ".").lower()
|
||||||
# Recursively convert path from a.b.c into outer[a][b][c]
|
|
||||||
current_obj = outer
|
|
||||||
dot_parts = relative_key.split(".")
|
|
||||||
for dot_part in dot_parts[:-1]:
|
|
||||||
if dot_part not in current_obj:
|
|
||||||
current_obj[dot_part] = {}
|
|
||||||
current_obj = current_obj[dot_part]
|
|
||||||
# Check if the value is json, and try to load it
|
# Check if the value is json, and try to load it
|
||||||
try:
|
try:
|
||||||
value = loads(value)
|
value = loads(value)
|
||||||
except JSONDecodeError:
|
except JSONDecodeError:
|
||||||
pass
|
pass
|
||||||
current_obj[dot_parts[-1]] = Attr(value, Attr.Source.ENV, key)
|
attr_value = Attr(value, Attr.Source.ENV, relative_key)
|
||||||
|
set_path_in_dict(outer, relative_key, attr_value)
|
||||||
idx += 1
|
idx += 1
|
||||||
if idx > 0:
|
if idx > 0:
|
||||||
self.log("debug", "Loaded environment variables", count=idx)
|
self.log("debug", "Loaded environment variables", count=idx)
|
||||||
|
@ -241,6 +298,23 @@ class ConfigLoader:
|
||||||
"""Wrapper for get that converts value into boolean"""
|
"""Wrapper for get that converts value into boolean"""
|
||||||
return str(self.get(path, default)).lower() == "true"
|
return str(self.get(path, default)).lower() == "true"
|
||||||
|
|
||||||
|
def get_dict_from_b64_json(self, path: str, default=None) -> dict:
|
||||||
|
"""Wrapper for get that converts value from Base64 encoded string into dictionary"""
|
||||||
|
config_value = self.get(path)
|
||||||
|
if config_value is None:
|
||||||
|
return {}
|
||||||
|
try:
|
||||||
|
b64decoded_str = base64.b64decode(config_value).decode("utf-8")
|
||||||
|
b64decoded_str = b64decoded_str.strip().lstrip("{").rstrip("}")
|
||||||
|
b64decoded_str = "{" + b64decoded_str + "}"
|
||||||
|
return json.loads(b64decoded_str)
|
||||||
|
except (JSONDecodeError, TypeError, ValueError) as exc:
|
||||||
|
self.log(
|
||||||
|
"warning",
|
||||||
|
f"Ignored invalid configuration for '{path}' due to exception: {str(exc)}",
|
||||||
|
)
|
||||||
|
return default if isinstance(default, dict) else {}
|
||||||
|
|
||||||
def set(self, path: str, value: Any, sep="."):
|
def set(self, path: str, value: Any, sep="."):
|
||||||
"""Set value using same syntax as get()"""
|
"""Set value using same syntax as get()"""
|
||||||
set_path_in_dict(self.raw, path, Attr(value), sep=sep)
|
set_path_in_dict(self.raw, path, Attr(value), sep=sep)
|
||||||
|
|
|
@ -28,14 +28,28 @@ listen:
|
||||||
redis:
|
redis:
|
||||||
host: localhost
|
host: localhost
|
||||||
port: 6379
|
port: 6379
|
||||||
|
db: 0
|
||||||
|
username: ""
|
||||||
password: ""
|
password: ""
|
||||||
tls: false
|
tls: false
|
||||||
tls_reqs: "none"
|
tls_reqs: "none"
|
||||||
db: 0
|
|
||||||
cache_timeout: 300
|
# broker:
|
||||||
cache_timeout_flows: 300
|
# url: ""
|
||||||
cache_timeout_policies: 300
|
# transport_options: ""
|
||||||
cache_timeout_reputation: 300
|
|
||||||
|
cache:
|
||||||
|
# url: ""
|
||||||
|
timeout: 300
|
||||||
|
timeout_flows: 300
|
||||||
|
timeout_policies: 300
|
||||||
|
timeout_reputation: 300
|
||||||
|
|
||||||
|
# channel:
|
||||||
|
# url: ""
|
||||||
|
|
||||||
|
# result_backend:
|
||||||
|
# url: ""
|
||||||
|
|
||||||
paths:
|
paths:
|
||||||
media: ./media
|
media: ./media
|
||||||
|
|
|
@ -1,20 +1,32 @@
|
||||||
"""Test config loader"""
|
"""Test config loader"""
|
||||||
|
import base64
|
||||||
|
from json import dumps
|
||||||
from os import chmod, environ, unlink, write
|
from os import chmod, environ, unlink, write
|
||||||
from tempfile import mkstemp
|
from tempfile import mkstemp
|
||||||
|
from unittest import mock
|
||||||
|
|
||||||
from django.conf import ImproperlyConfigured
|
from django.conf import ImproperlyConfigured
|
||||||
from django.test import TestCase
|
from django.test import TestCase
|
||||||
|
|
||||||
from authentik.lib.config import ENV_PREFIX, ConfigLoader
|
from authentik.lib.config import ENV_PREFIX, UNSET, Attr, AttrEncoder, ConfigLoader
|
||||||
|
|
||||||
|
|
||||||
class TestConfig(TestCase):
|
class TestConfig(TestCase):
|
||||||
"""Test config loader"""
|
"""Test config loader"""
|
||||||
|
|
||||||
|
check_deprecations_env_vars = {
|
||||||
|
ENV_PREFIX + "_REDIS__BROKER_URL": "redis://myredis:8327/43",
|
||||||
|
ENV_PREFIX + "_REDIS__BROKER_TRANSPORT_OPTIONS": "bWFzdGVybmFtZT1teW1hc3Rlcg==",
|
||||||
|
ENV_PREFIX + "_REDIS__CACHE_TIMEOUT": "124s",
|
||||||
|
ENV_PREFIX + "_REDIS__CACHE_TIMEOUT_FLOWS": "32m",
|
||||||
|
ENV_PREFIX + "_REDIS__CACHE_TIMEOUT_POLICIES": "3920ns",
|
||||||
|
ENV_PREFIX + "_REDIS__CACHE_TIMEOUT_REPUTATION": "298382us",
|
||||||
|
}
|
||||||
|
|
||||||
|
@mock.patch.dict(environ, {ENV_PREFIX + "_test__test": "bar"})
|
||||||
def test_env(self):
|
def test_env(self):
|
||||||
"""Test simple instance"""
|
"""Test simple instance"""
|
||||||
config = ConfigLoader()
|
config = ConfigLoader()
|
||||||
environ[ENV_PREFIX + "_test__test"] = "bar"
|
|
||||||
config.update_from_env()
|
config.update_from_env()
|
||||||
self.assertEqual(config.get("test.test"), "bar")
|
self.assertEqual(config.get("test.test"), "bar")
|
||||||
|
|
||||||
|
@ -27,12 +39,20 @@ class TestConfig(TestCase):
|
||||||
self.assertEqual(config.get("foo.bar"), "baz")
|
self.assertEqual(config.get("foo.bar"), "baz")
|
||||||
self.assertEqual(config.get("foo.bar"), "bar")
|
self.assertEqual(config.get("foo.bar"), "bar")
|
||||||
|
|
||||||
|
@mock.patch.dict(environ, {"foo": "bar"})
|
||||||
def test_uri_env(self):
|
def test_uri_env(self):
|
||||||
"""Test URI parsing (environment)"""
|
"""Test URI parsing (environment)"""
|
||||||
config = ConfigLoader()
|
config = ConfigLoader()
|
||||||
environ["foo"] = "bar"
|
foo_uri = "env://foo"
|
||||||
self.assertEqual(config.parse_uri("env://foo").value, "bar")
|
foo_parsed = config.parse_uri(foo_uri)
|
||||||
self.assertEqual(config.parse_uri("env://foo?bar").value, "bar")
|
self.assertEqual(foo_parsed.value, "bar")
|
||||||
|
self.assertEqual(foo_parsed.source_type, Attr.Source.URI)
|
||||||
|
self.assertEqual(foo_parsed.source, foo_uri)
|
||||||
|
foo_bar_uri = "env://foo?bar"
|
||||||
|
foo_bar_parsed = config.parse_uri(foo_bar_uri)
|
||||||
|
self.assertEqual(foo_bar_parsed.value, "bar")
|
||||||
|
self.assertEqual(foo_bar_parsed.source_type, Attr.Source.URI)
|
||||||
|
self.assertEqual(foo_bar_parsed.source, foo_bar_uri)
|
||||||
|
|
||||||
def test_uri_file(self):
|
def test_uri_file(self):
|
||||||
"""Test URI parsing (file load)"""
|
"""Test URI parsing (file load)"""
|
||||||
|
@ -91,3 +111,60 @@ class TestConfig(TestCase):
|
||||||
config = ConfigLoader()
|
config = ConfigLoader()
|
||||||
config.set("foo", "bar")
|
config.set("foo", "bar")
|
||||||
self.assertEqual(config.get_int("foo", 1234), 1234)
|
self.assertEqual(config.get_int("foo", 1234), 1234)
|
||||||
|
|
||||||
|
def test_get_dict_from_b64_json(self):
|
||||||
|
"""Test get_dict_from_b64_json"""
|
||||||
|
config = ConfigLoader()
|
||||||
|
test_value = ' { "foo": "bar" } '.encode("utf-8")
|
||||||
|
b64_value = base64.b64encode(test_value)
|
||||||
|
config.set("foo", b64_value)
|
||||||
|
self.assertEqual(config.get_dict_from_b64_json("foo"), {"foo": "bar"})
|
||||||
|
|
||||||
|
def test_get_dict_from_b64_json_missing_brackets(self):
|
||||||
|
"""Test get_dict_from_b64_json with missing brackets"""
|
||||||
|
config = ConfigLoader()
|
||||||
|
test_value = ' "foo": "bar" '.encode("utf-8")
|
||||||
|
b64_value = base64.b64encode(test_value)
|
||||||
|
config.set("foo", b64_value)
|
||||||
|
self.assertEqual(config.get_dict_from_b64_json("foo"), {"foo": "bar"})
|
||||||
|
|
||||||
|
def test_get_dict_from_b64_json_invalid(self):
|
||||||
|
"""Test get_dict_from_b64_json with invalid value"""
|
||||||
|
config = ConfigLoader()
|
||||||
|
config.set("foo", "bar")
|
||||||
|
self.assertEqual(config.get_dict_from_b64_json("foo"), {})
|
||||||
|
|
||||||
|
def test_attr_json_encoder(self):
|
||||||
|
"""Test AttrEncoder"""
|
||||||
|
test_attr = Attr("foo", Attr.Source.ENV, "AUTHENTIK_REDIS__USERNAME")
|
||||||
|
json_attr = dumps(test_attr, indent=4, cls=AttrEncoder)
|
||||||
|
self.assertEqual(json_attr, '"foo"')
|
||||||
|
|
||||||
|
def test_attr_json_encoder_no_attr(self):
|
||||||
|
"""Test AttrEncoder if no Attr is passed"""
|
||||||
|
|
||||||
|
class Test:
|
||||||
|
"""Non Attr class"""
|
||||||
|
|
||||||
|
with self.assertRaises(TypeError):
|
||||||
|
test_obj = Test()
|
||||||
|
dumps(test_obj, indent=4, cls=AttrEncoder)
|
||||||
|
|
||||||
|
@mock.patch.dict(environ, check_deprecations_env_vars)
|
||||||
|
def test_check_deprecations(self):
|
||||||
|
"""Test config key re-write for deprecated env vars"""
|
||||||
|
config = ConfigLoader()
|
||||||
|
config.update_from_env()
|
||||||
|
config.check_deprecations()
|
||||||
|
self.assertEqual(config.get("redis.broker_url", UNSET), UNSET)
|
||||||
|
self.assertEqual(config.get("redis.broker_transport_options", UNSET), UNSET)
|
||||||
|
self.assertEqual(config.get("redis.cache_timeout", UNSET), UNSET)
|
||||||
|
self.assertEqual(config.get("redis.cache_timeout_flows", UNSET), UNSET)
|
||||||
|
self.assertEqual(config.get("redis.cache_timeout_policies", UNSET), UNSET)
|
||||||
|
self.assertEqual(config.get("redis.cache_timeout_reputation", UNSET), UNSET)
|
||||||
|
self.assertEqual(config.get("broker.url"), "redis://myredis:8327/43")
|
||||||
|
self.assertEqual(config.get("broker.transport_options"), "bWFzdGVybmFtZT1teW1hc3Rlcg==")
|
||||||
|
self.assertEqual(config.get("cache.timeout"), "124s")
|
||||||
|
self.assertEqual(config.get("cache.timeout_flows"), "32m")
|
||||||
|
self.assertEqual(config.get("cache.timeout_policies"), "3920ns")
|
||||||
|
self.assertEqual(config.get("cache.timeout_reputation"), "298382us")
|
||||||
|
|
|
@ -93,7 +93,7 @@ class OutpostConsumer(AuthJsonConsumer):
|
||||||
expected=self.outpost.config.kubernetes_replicas,
|
expected=self.outpost.config.kubernetes_replicas,
|
||||||
).dec()
|
).dec()
|
||||||
|
|
||||||
def receive_json(self, content: Data):
|
def receive_json(self, content: Data, **kwargs):
|
||||||
msg = from_dict(WebsocketMessage, content)
|
msg = from_dict(WebsocketMessage, content)
|
||||||
uid = msg.args.get("uuid", self.channel_name)
|
uid = msg.args.get("uuid", self.channel_name)
|
||||||
self.last_uid = uid
|
self.last_uid = uid
|
||||||
|
|
|
@ -20,7 +20,7 @@ from authentik.policies.types import CACHE_PREFIX, PolicyRequest, PolicyResult
|
||||||
LOGGER = get_logger()
|
LOGGER = get_logger()
|
||||||
|
|
||||||
FORK_CTX = get_context("fork")
|
FORK_CTX = get_context("fork")
|
||||||
CACHE_TIMEOUT = CONFIG.get_int("redis.cache_timeout_policies")
|
CACHE_TIMEOUT = CONFIG.get_int("cache.timeout_policies")
|
||||||
PROCESS_CLASS = FORK_CTX.Process
|
PROCESS_CLASS = FORK_CTX.Process
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ from authentik.policies.reputation.tasks import save_reputation
|
||||||
from authentik.stages.identification.signals import identification_failed
|
from authentik.stages.identification.signals import identification_failed
|
||||||
|
|
||||||
LOGGER = get_logger()
|
LOGGER = get_logger()
|
||||||
CACHE_TIMEOUT = CONFIG.get_int("redis.cache_timeout_reputation")
|
CACHE_TIMEOUT = CONFIG.get_int("cache.timeout_reputation")
|
||||||
|
|
||||||
|
|
||||||
def update_score(request: HttpRequest, identifier: str, amount: int):
|
def update_score(request: HttpRequest, identifier: str, amount: int):
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
"""root settings for authentik"""
|
"""root settings for authentik"""
|
||||||
|
|
||||||
import importlib
|
import importlib
|
||||||
import os
|
import os
|
||||||
from hashlib import sha512
|
from hashlib import sha512
|
||||||
|
@ -195,8 +194,8 @@ _redis_url = (
|
||||||
CACHES = {
|
CACHES = {
|
||||||
"default": {
|
"default": {
|
||||||
"BACKEND": "django_redis.cache.RedisCache",
|
"BACKEND": "django_redis.cache.RedisCache",
|
||||||
"LOCATION": f"{_redis_url}/{CONFIG.get('redis.db')}",
|
"LOCATION": CONFIG.get("cache.url") or f"{_redis_url}/{CONFIG.get('redis.db')}",
|
||||||
"TIMEOUT": CONFIG.get_int("redis.cache_timeout", 300),
|
"TIMEOUT": CONFIG.get_int("cache.timeout", 300),
|
||||||
"OPTIONS": {"CLIENT_CLASS": "django_redis.client.DefaultClient"},
|
"OPTIONS": {"CLIENT_CLASS": "django_redis.client.DefaultClient"},
|
||||||
"KEY_PREFIX": "authentik_cache",
|
"KEY_PREFIX": "authentik_cache",
|
||||||
}
|
}
|
||||||
|
@ -256,7 +255,7 @@ CHANNEL_LAYERS = {
|
||||||
"default": {
|
"default": {
|
||||||
"BACKEND": "channels_redis.pubsub.RedisPubSubChannelLayer",
|
"BACKEND": "channels_redis.pubsub.RedisPubSubChannelLayer",
|
||||||
"CONFIG": {
|
"CONFIG": {
|
||||||
"hosts": [f"{_redis_url}/{CONFIG.get('redis.db')}"],
|
"hosts": [CONFIG.get("channel.url", f"{_redis_url}/{CONFIG.get('redis.db')}")],
|
||||||
"prefix": "authentik_channels_",
|
"prefix": "authentik_channels_",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -349,8 +348,11 @@ CELERY = {
|
||||||
},
|
},
|
||||||
"task_create_missing_queues": True,
|
"task_create_missing_queues": True,
|
||||||
"task_default_queue": "authentik",
|
"task_default_queue": "authentik",
|
||||||
"broker_url": f"{_redis_url}/{CONFIG.get('redis.db')}{_redis_celery_tls_requirements}",
|
"broker_url": CONFIG.get("broker.url")
|
||||||
"result_backend": f"{_redis_url}/{CONFIG.get('redis.db')}{_redis_celery_tls_requirements}",
|
or f"{_redis_url}/{CONFIG.get('redis.db')}{_redis_celery_tls_requirements}",
|
||||||
|
"broker_transport_options": CONFIG.get_dict_from_b64_json("broker.transport_options"),
|
||||||
|
"result_backend": CONFIG.get("result_backend.url")
|
||||||
|
or f"{_redis_url}/{CONFIG.get('redis.db')}{_redis_celery_tls_requirements}",
|
||||||
}
|
}
|
||||||
|
|
||||||
# Sentry integration
|
# Sentry integration
|
||||||
|
|
|
@ -1,13 +1,14 @@
|
||||||
"""Source API Views"""
|
"""Source API Views"""
|
||||||
from typing import Any
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
from django.core.cache import cache
|
||||||
from django_filters.filters import AllValuesMultipleFilter
|
from django_filters.filters import AllValuesMultipleFilter
|
||||||
from django_filters.filterset import FilterSet
|
from django_filters.filterset import FilterSet
|
||||||
from drf_spectacular.types import OpenApiTypes
|
from drf_spectacular.types import OpenApiTypes
|
||||||
from drf_spectacular.utils import extend_schema, extend_schema_field, inline_serializer
|
from drf_spectacular.utils import extend_schema, extend_schema_field, inline_serializer
|
||||||
from rest_framework.decorators import action
|
from rest_framework.decorators import action
|
||||||
from rest_framework.exceptions import ValidationError
|
from rest_framework.exceptions import ValidationError
|
||||||
from rest_framework.fields import DictField, ListField
|
from rest_framework.fields import BooleanField, DictField, ListField, SerializerMethodField
|
||||||
from rest_framework.relations import PrimaryKeyRelatedField
|
from rest_framework.relations import PrimaryKeyRelatedField
|
||||||
from rest_framework.request import Request
|
from rest_framework.request import Request
|
||||||
from rest_framework.response import Response
|
from rest_framework.response import Response
|
||||||
|
@ -17,15 +18,17 @@ from authentik.admin.api.tasks import TaskSerializer
|
||||||
from authentik.core.api.propertymappings import PropertyMappingSerializer
|
from authentik.core.api.propertymappings import PropertyMappingSerializer
|
||||||
from authentik.core.api.sources import SourceSerializer
|
from authentik.core.api.sources import SourceSerializer
|
||||||
from authentik.core.api.used_by import UsedByMixin
|
from authentik.core.api.used_by import UsedByMixin
|
||||||
|
from authentik.core.api.utils import PassiveSerializer
|
||||||
from authentik.crypto.models import CertificateKeyPair
|
from authentik.crypto.models import CertificateKeyPair
|
||||||
from authentik.events.monitored_tasks import TaskInfo
|
from authentik.events.monitored_tasks import TaskInfo
|
||||||
from authentik.sources.ldap.models import LDAPPropertyMapping, LDAPSource
|
from authentik.sources.ldap.models import LDAPPropertyMapping, LDAPSource
|
||||||
from authentik.sources.ldap.tasks import SYNC_CLASSES
|
from authentik.sources.ldap.tasks import CACHE_KEY_STATUS, SYNC_CLASSES
|
||||||
|
|
||||||
|
|
||||||
class LDAPSourceSerializer(SourceSerializer):
|
class LDAPSourceSerializer(SourceSerializer):
|
||||||
"""LDAP Source Serializer"""
|
"""LDAP Source Serializer"""
|
||||||
|
|
||||||
|
connectivity = SerializerMethodField()
|
||||||
client_certificate = PrimaryKeyRelatedField(
|
client_certificate = PrimaryKeyRelatedField(
|
||||||
allow_null=True,
|
allow_null=True,
|
||||||
help_text="Client certificate to authenticate against the LDAP Server's Certificate.",
|
help_text="Client certificate to authenticate against the LDAP Server's Certificate.",
|
||||||
|
@ -35,6 +38,10 @@ class LDAPSourceSerializer(SourceSerializer):
|
||||||
required=False,
|
required=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def get_connectivity(self, source: LDAPSource) -> Optional[dict[str, dict[str, str]]]:
|
||||||
|
"""Get cached source connectivity"""
|
||||||
|
return cache.get(CACHE_KEY_STATUS + source.slug, None)
|
||||||
|
|
||||||
def validate(self, attrs: dict[str, Any]) -> dict[str, Any]:
|
def validate(self, attrs: dict[str, Any]) -> dict[str, Any]:
|
||||||
"""Check that only a single source has password_sync on"""
|
"""Check that only a single source has password_sync on"""
|
||||||
sync_users_password = attrs.get("sync_users_password", True)
|
sync_users_password = attrs.get("sync_users_password", True)
|
||||||
|
@ -75,10 +82,18 @@ class LDAPSourceSerializer(SourceSerializer):
|
||||||
"sync_parent_group",
|
"sync_parent_group",
|
||||||
"property_mappings",
|
"property_mappings",
|
||||||
"property_mappings_group",
|
"property_mappings_group",
|
||||||
|
"connectivity",
|
||||||
]
|
]
|
||||||
extra_kwargs = {"bind_password": {"write_only": True}}
|
extra_kwargs = {"bind_password": {"write_only": True}}
|
||||||
|
|
||||||
|
|
||||||
|
class LDAPSyncStatusSerializer(PassiveSerializer):
|
||||||
|
"""LDAP Source sync status"""
|
||||||
|
|
||||||
|
is_running = BooleanField(read_only=True)
|
||||||
|
tasks = TaskSerializer(many=True, read_only=True)
|
||||||
|
|
||||||
|
|
||||||
class LDAPSourceViewSet(UsedByMixin, ModelViewSet):
|
class LDAPSourceViewSet(UsedByMixin, ModelViewSet):
|
||||||
"""LDAP Source Viewset"""
|
"""LDAP Source Viewset"""
|
||||||
|
|
||||||
|
@ -114,19 +129,19 @@ class LDAPSourceViewSet(UsedByMixin, ModelViewSet):
|
||||||
|
|
||||||
@extend_schema(
|
@extend_schema(
|
||||||
responses={
|
responses={
|
||||||
200: TaskSerializer(many=True),
|
200: LDAPSyncStatusSerializer(),
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@action(methods=["GET"], detail=True, pagination_class=None, filter_backends=[])
|
@action(methods=["GET"], detail=True, pagination_class=None, filter_backends=[])
|
||||||
def sync_status(self, request: Request, slug: str) -> Response:
|
def sync_status(self, request: Request, slug: str) -> Response:
|
||||||
"""Get source's sync status"""
|
"""Get source's sync status"""
|
||||||
source = self.get_object()
|
source: LDAPSource = self.get_object()
|
||||||
results = []
|
tasks = TaskInfo.by_name(f"ldap_sync:{source.slug}:*") or []
|
||||||
tasks = TaskInfo.by_name(f"ldap_sync:{source.slug}:*")
|
status = {
|
||||||
if tasks:
|
"tasks": tasks,
|
||||||
for task in tasks:
|
"is_running": source.sync_lock.locked(),
|
||||||
results.append(task)
|
}
|
||||||
return Response(TaskSerializer(results, many=True).data)
|
return Response(LDAPSyncStatusSerializer(status).data)
|
||||||
|
|
||||||
@extend_schema(
|
@extend_schema(
|
||||||
responses={
|
responses={
|
||||||
|
|
|
@ -0,0 +1,24 @@
|
||||||
|
"""LDAP Connection check"""
|
||||||
|
from json import dumps
|
||||||
|
|
||||||
|
from django.core.management.base import BaseCommand
|
||||||
|
from structlog.stdlib import get_logger
|
||||||
|
|
||||||
|
from authentik.sources.ldap.models import LDAPSource
|
||||||
|
|
||||||
|
LOGGER = get_logger()
|
||||||
|
|
||||||
|
|
||||||
|
class Command(BaseCommand):
|
||||||
|
"""Check connectivity to LDAP servers for a source"""
|
||||||
|
|
||||||
|
def add_arguments(self, parser):
|
||||||
|
parser.add_argument("source_slugs", nargs="?", type=str)
|
||||||
|
|
||||||
|
def handle(self, **options):
|
||||||
|
sources = LDAPSource.objects.filter(enabled=True)
|
||||||
|
if options["source_slugs"]:
|
||||||
|
sources = LDAPSource.objects.filter(slug__in=options["source_slugs"])
|
||||||
|
for source in sources.order_by("slug"):
|
||||||
|
status = source.check_connection()
|
||||||
|
self.stdout.write(dumps(status, indent=4))
|
|
@ -4,10 +4,12 @@ from ssl import CERT_REQUIRED
|
||||||
from tempfile import NamedTemporaryFile, mkdtemp
|
from tempfile import NamedTemporaryFile, mkdtemp
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
from django.core.cache import cache
|
||||||
from django.db import models
|
from django.db import models
|
||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
from ldap3 import ALL, NONE, RANDOM, Connection, Server, ServerPool, Tls
|
from ldap3 import ALL, NONE, RANDOM, Connection, Server, ServerPool, Tls
|
||||||
from ldap3.core.exceptions import LDAPInsufficientAccessRightsResult, LDAPSchemaError
|
from ldap3.core.exceptions import LDAPException, LDAPInsufficientAccessRightsResult, LDAPSchemaError
|
||||||
|
from redis.lock import Lock
|
||||||
from rest_framework.serializers import Serializer
|
from rest_framework.serializers import Serializer
|
||||||
|
|
||||||
from authentik.core.models import Group, PropertyMapping, Source
|
from authentik.core.models import Group, PropertyMapping, Source
|
||||||
|
@ -117,7 +119,7 @@ class LDAPSource(Source):
|
||||||
|
|
||||||
return LDAPSourceSerializer
|
return LDAPSourceSerializer
|
||||||
|
|
||||||
def server(self, **kwargs) -> Server:
|
def server(self, **kwargs) -> ServerPool:
|
||||||
"""Get LDAP Server/ServerPool"""
|
"""Get LDAP Server/ServerPool"""
|
||||||
servers = []
|
servers = []
|
||||||
tls_kwargs = {}
|
tls_kwargs = {}
|
||||||
|
@ -154,7 +156,10 @@ class LDAPSource(Source):
|
||||||
return ServerPool(servers, RANDOM, active=5, exhaust=True)
|
return ServerPool(servers, RANDOM, active=5, exhaust=True)
|
||||||
|
|
||||||
def connection(
|
def connection(
|
||||||
self, server_kwargs: Optional[dict] = None, connection_kwargs: Optional[dict] = None
|
self,
|
||||||
|
server: Optional[Server] = None,
|
||||||
|
server_kwargs: Optional[dict] = None,
|
||||||
|
connection_kwargs: Optional[dict] = None,
|
||||||
) -> Connection:
|
) -> Connection:
|
||||||
"""Get a fully connected and bound LDAP Connection"""
|
"""Get a fully connected and bound LDAP Connection"""
|
||||||
server_kwargs = server_kwargs or {}
|
server_kwargs = server_kwargs or {}
|
||||||
|
@ -164,7 +169,7 @@ class LDAPSource(Source):
|
||||||
if self.bind_password is not None:
|
if self.bind_password is not None:
|
||||||
connection_kwargs.setdefault("password", self.bind_password)
|
connection_kwargs.setdefault("password", self.bind_password)
|
||||||
connection = Connection(
|
connection = Connection(
|
||||||
self.server(**server_kwargs),
|
server or self.server(**server_kwargs),
|
||||||
raise_exceptions=True,
|
raise_exceptions=True,
|
||||||
receive_timeout=LDAP_TIMEOUT,
|
receive_timeout=LDAP_TIMEOUT,
|
||||||
**connection_kwargs,
|
**connection_kwargs,
|
||||||
|
@ -183,9 +188,55 @@ class LDAPSource(Source):
|
||||||
if server_kwargs.get("get_info", ALL) == NONE:
|
if server_kwargs.get("get_info", ALL) == NONE:
|
||||||
raise exc
|
raise exc
|
||||||
server_kwargs["get_info"] = NONE
|
server_kwargs["get_info"] = NONE
|
||||||
return self.connection(server_kwargs, connection_kwargs)
|
return self.connection(server, server_kwargs, connection_kwargs)
|
||||||
return RuntimeError("Failed to bind")
|
return RuntimeError("Failed to bind")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def sync_lock(self) -> Lock:
|
||||||
|
"""Redis lock for syncing LDAP to prevent multiple parallel syncs happening"""
|
||||||
|
return Lock(
|
||||||
|
cache.client.get_client(),
|
||||||
|
name=f"goauthentik.io/sources/ldap/sync-{self.slug}",
|
||||||
|
# Convert task timeout hours to seconds, and multiply times 3
|
||||||
|
# (see authentik/sources/ldap/tasks.py:54)
|
||||||
|
# multiply by 3 to add even more leeway
|
||||||
|
timeout=(60 * 60 * CONFIG.get_int("ldap.task_timeout_hours")) * 3,
|
||||||
|
)
|
||||||
|
|
||||||
|
def check_connection(self) -> dict[str, dict[str, str]]:
|
||||||
|
"""Check LDAP Connection"""
|
||||||
|
from authentik.sources.ldap.sync.base import flatten
|
||||||
|
|
||||||
|
servers = self.server()
|
||||||
|
server_info = {}
|
||||||
|
# Check each individual server
|
||||||
|
for server in servers.servers:
|
||||||
|
server: Server
|
||||||
|
try:
|
||||||
|
connection = self.connection(server=server)
|
||||||
|
server_info[server.host] = {
|
||||||
|
"vendor": str(flatten(connection.server.info.vendor_name)),
|
||||||
|
"version": str(flatten(connection.server.info.vendor_version)),
|
||||||
|
"status": "ok",
|
||||||
|
}
|
||||||
|
except LDAPException as exc:
|
||||||
|
server_info[server.host] = {
|
||||||
|
"status": str(exc),
|
||||||
|
}
|
||||||
|
# Check server pool
|
||||||
|
try:
|
||||||
|
connection = self.connection()
|
||||||
|
server_info["__all__"] = {
|
||||||
|
"vendor": str(flatten(connection.server.info.vendor_name)),
|
||||||
|
"version": str(flatten(connection.server.info.vendor_version)),
|
||||||
|
"status": "ok",
|
||||||
|
}
|
||||||
|
except LDAPException as exc:
|
||||||
|
server_info["__all__"] = {
|
||||||
|
"status": str(exc),
|
||||||
|
}
|
||||||
|
return server_info
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
verbose_name = _("LDAP Source")
|
verbose_name = _("LDAP Source")
|
||||||
verbose_name_plural = _("LDAP Sources")
|
verbose_name_plural = _("LDAP Sources")
|
||||||
|
|
|
@ -8,5 +8,10 @@ CELERY_BEAT_SCHEDULE = {
|
||||||
"task": "authentik.sources.ldap.tasks.ldap_sync_all",
|
"task": "authentik.sources.ldap.tasks.ldap_sync_all",
|
||||||
"schedule": crontab(minute=fqdn_rand("sources_ldap_sync"), hour="*/2"),
|
"schedule": crontab(minute=fqdn_rand("sources_ldap_sync"), hour="*/2"),
|
||||||
"options": {"queue": "authentik_scheduled"},
|
"options": {"queue": "authentik_scheduled"},
|
||||||
}
|
},
|
||||||
|
"sources_ldap_connectivity_check": {
|
||||||
|
"task": "authentik.sources.ldap.tasks.ldap_connectivity_check",
|
||||||
|
"schedule": crontab(minute=fqdn_rand("sources_ldap_connectivity_check"), hour="*"),
|
||||||
|
"options": {"queue": "authentik_scheduled"},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@ from authentik.events.models import Event, EventAction
|
||||||
from authentik.flows.planner import PLAN_CONTEXT_PENDING_USER
|
from authentik.flows.planner import PLAN_CONTEXT_PENDING_USER
|
||||||
from authentik.sources.ldap.models import LDAPSource
|
from authentik.sources.ldap.models import LDAPSource
|
||||||
from authentik.sources.ldap.password import LDAPPasswordChanger
|
from authentik.sources.ldap.password import LDAPPasswordChanger
|
||||||
from authentik.sources.ldap.tasks import ldap_sync_single
|
from authentik.sources.ldap.tasks import ldap_connectivity_check, ldap_sync_single
|
||||||
from authentik.stages.prompt.signals import password_validate
|
from authentik.stages.prompt.signals import password_validate
|
||||||
|
|
||||||
LOGGER = get_logger()
|
LOGGER = get_logger()
|
||||||
|
@ -32,6 +32,7 @@ def sync_ldap_source_on_save(sender, instance: LDAPSource, **_):
|
||||||
if not instance.property_mappings.exists() or not instance.property_mappings_group.exists():
|
if not instance.property_mappings.exists() or not instance.property_mappings_group.exists():
|
||||||
return
|
return
|
||||||
ldap_sync_single.delay(instance.pk)
|
ldap_sync_single.delay(instance.pk)
|
||||||
|
ldap_connectivity_check.delay(instance.pk)
|
||||||
|
|
||||||
|
|
||||||
@receiver(password_validate)
|
@receiver(password_validate)
|
||||||
|
|
|
@ -17,6 +17,15 @@ from authentik.sources.ldap.models import LDAPPropertyMapping, LDAPSource
|
||||||
LDAP_UNIQUENESS = "ldap_uniq"
|
LDAP_UNIQUENESS = "ldap_uniq"
|
||||||
|
|
||||||
|
|
||||||
|
def flatten(value: Any) -> Any:
|
||||||
|
"""Flatten `value` if its a list"""
|
||||||
|
if isinstance(value, list):
|
||||||
|
if len(value) < 1:
|
||||||
|
return None
|
||||||
|
return value[0]
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
class BaseLDAPSynchronizer:
|
class BaseLDAPSynchronizer:
|
||||||
"""Sync LDAP Users and groups into authentik"""
|
"""Sync LDAP Users and groups into authentik"""
|
||||||
|
|
||||||
|
@ -122,14 +131,6 @@ class BaseLDAPSynchronizer:
|
||||||
cookie = None
|
cookie = None
|
||||||
yield self._connection.response
|
yield self._connection.response
|
||||||
|
|
||||||
def _flatten(self, value: Any) -> Any:
|
|
||||||
"""Flatten `value` if its a list"""
|
|
||||||
if isinstance(value, list):
|
|
||||||
if len(value) < 1:
|
|
||||||
return None
|
|
||||||
return value[0]
|
|
||||||
return value
|
|
||||||
|
|
||||||
def build_user_properties(self, user_dn: str, **kwargs) -> dict[str, Any]:
|
def build_user_properties(self, user_dn: str, **kwargs) -> dict[str, Any]:
|
||||||
"""Build attributes for User object based on property mappings."""
|
"""Build attributes for User object based on property mappings."""
|
||||||
props = self._build_object_properties(user_dn, self._source.property_mappings, **kwargs)
|
props = self._build_object_properties(user_dn, self._source.property_mappings, **kwargs)
|
||||||
|
@ -163,10 +164,10 @@ class BaseLDAPSynchronizer:
|
||||||
object_field = mapping.object_field
|
object_field = mapping.object_field
|
||||||
if object_field.startswith("attributes."):
|
if object_field.startswith("attributes."):
|
||||||
# Because returning a list might desired, we can't
|
# Because returning a list might desired, we can't
|
||||||
# rely on self._flatten here. Instead, just save the result as-is
|
# rely on flatten here. Instead, just save the result as-is
|
||||||
set_path_in_dict(properties, object_field, value)
|
set_path_in_dict(properties, object_field, value)
|
||||||
else:
|
else:
|
||||||
properties[object_field] = self._flatten(value)
|
properties[object_field] = flatten(value)
|
||||||
except PropertyMappingExpressionException as exc:
|
except PropertyMappingExpressionException as exc:
|
||||||
Event.new(
|
Event.new(
|
||||||
EventAction.CONFIGURATION_ERROR,
|
EventAction.CONFIGURATION_ERROR,
|
||||||
|
@ -177,7 +178,7 @@ class BaseLDAPSynchronizer:
|
||||||
self._logger.warning("Mapping failed to evaluate", exc=exc, mapping=mapping)
|
self._logger.warning("Mapping failed to evaluate", exc=exc, mapping=mapping)
|
||||||
continue
|
continue
|
||||||
if self._source.object_uniqueness_field in kwargs:
|
if self._source.object_uniqueness_field in kwargs:
|
||||||
properties["attributes"][LDAP_UNIQUENESS] = self._flatten(
|
properties["attributes"][LDAP_UNIQUENESS] = flatten(
|
||||||
kwargs.get(self._source.object_uniqueness_field)
|
kwargs.get(self._source.object_uniqueness_field)
|
||||||
)
|
)
|
||||||
properties["attributes"][LDAP_DISTINGUISHED_NAME] = object_dn
|
properties["attributes"][LDAP_DISTINGUISHED_NAME] = object_dn
|
||||||
|
|
|
@ -7,7 +7,7 @@ from ldap3 import ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES, SUBTREE
|
||||||
|
|
||||||
from authentik.core.models import Group
|
from authentik.core.models import Group
|
||||||
from authentik.events.models import Event, EventAction
|
from authentik.events.models import Event, EventAction
|
||||||
from authentik.sources.ldap.sync.base import LDAP_UNIQUENESS, BaseLDAPSynchronizer
|
from authentik.sources.ldap.sync.base import LDAP_UNIQUENESS, BaseLDAPSynchronizer, flatten
|
||||||
|
|
||||||
|
|
||||||
class GroupLDAPSynchronizer(BaseLDAPSynchronizer):
|
class GroupLDAPSynchronizer(BaseLDAPSynchronizer):
|
||||||
|
@ -39,7 +39,7 @@ class GroupLDAPSynchronizer(BaseLDAPSynchronizer):
|
||||||
if "attributes" not in group:
|
if "attributes" not in group:
|
||||||
continue
|
continue
|
||||||
attributes = group.get("attributes", {})
|
attributes = group.get("attributes", {})
|
||||||
group_dn = self._flatten(self._flatten(group.get("entryDN", group.get("dn"))))
|
group_dn = flatten(flatten(group.get("entryDN", group.get("dn"))))
|
||||||
if self._source.object_uniqueness_field not in attributes:
|
if self._source.object_uniqueness_field not in attributes:
|
||||||
self.message(
|
self.message(
|
||||||
f"Cannot find uniqueness field in attributes: '{group_dn}'",
|
f"Cannot find uniqueness field in attributes: '{group_dn}'",
|
||||||
|
@ -47,7 +47,7 @@ class GroupLDAPSynchronizer(BaseLDAPSynchronizer):
|
||||||
dn=group_dn,
|
dn=group_dn,
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
uniq = self._flatten(attributes[self._source.object_uniqueness_field])
|
uniq = flatten(attributes[self._source.object_uniqueness_field])
|
||||||
try:
|
try:
|
||||||
defaults = self.build_group_properties(group_dn, **attributes)
|
defaults = self.build_group_properties(group_dn, **attributes)
|
||||||
defaults["parent"] = self._source.sync_parent_group
|
defaults["parent"] = self._source.sync_parent_group
|
||||||
|
|
|
@ -7,7 +7,7 @@ from ldap3 import ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES, SUBTREE
|
||||||
|
|
||||||
from authentik.core.models import User
|
from authentik.core.models import User
|
||||||
from authentik.events.models import Event, EventAction
|
from authentik.events.models import Event, EventAction
|
||||||
from authentik.sources.ldap.sync.base import LDAP_UNIQUENESS, BaseLDAPSynchronizer
|
from authentik.sources.ldap.sync.base import LDAP_UNIQUENESS, BaseLDAPSynchronizer, flatten
|
||||||
from authentik.sources.ldap.sync.vendor.freeipa import FreeIPA
|
from authentik.sources.ldap.sync.vendor.freeipa import FreeIPA
|
||||||
from authentik.sources.ldap.sync.vendor.ms_ad import MicrosoftActiveDirectory
|
from authentik.sources.ldap.sync.vendor.ms_ad import MicrosoftActiveDirectory
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ class UserLDAPSynchronizer(BaseLDAPSynchronizer):
|
||||||
if "attributes" not in user:
|
if "attributes" not in user:
|
||||||
continue
|
continue
|
||||||
attributes = user.get("attributes", {})
|
attributes = user.get("attributes", {})
|
||||||
user_dn = self._flatten(user.get("entryDN", user.get("dn")))
|
user_dn = flatten(user.get("entryDN", user.get("dn")))
|
||||||
if self._source.object_uniqueness_field not in attributes:
|
if self._source.object_uniqueness_field not in attributes:
|
||||||
self.message(
|
self.message(
|
||||||
f"Cannot find uniqueness field in attributes: '{user_dn}'",
|
f"Cannot find uniqueness field in attributes: '{user_dn}'",
|
||||||
|
@ -49,7 +49,7 @@ class UserLDAPSynchronizer(BaseLDAPSynchronizer):
|
||||||
dn=user_dn,
|
dn=user_dn,
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
uniq = self._flatten(attributes[self._source.object_uniqueness_field])
|
uniq = flatten(attributes[self._source.object_uniqueness_field])
|
||||||
try:
|
try:
|
||||||
defaults = self.build_user_properties(user_dn, **attributes)
|
defaults = self.build_user_properties(user_dn, **attributes)
|
||||||
self._logger.debug("Writing user with attributes", **defaults)
|
self._logger.debug("Writing user with attributes", **defaults)
|
||||||
|
|
|
@ -5,7 +5,7 @@ from typing import Any, Generator
|
||||||
from pytz import UTC
|
from pytz import UTC
|
||||||
|
|
||||||
from authentik.core.models import User
|
from authentik.core.models import User
|
||||||
from authentik.sources.ldap.sync.base import BaseLDAPSynchronizer
|
from authentik.sources.ldap.sync.base import BaseLDAPSynchronizer, flatten
|
||||||
|
|
||||||
|
|
||||||
class FreeIPA(BaseLDAPSynchronizer):
|
class FreeIPA(BaseLDAPSynchronizer):
|
||||||
|
@ -47,7 +47,7 @@ class FreeIPA(BaseLDAPSynchronizer):
|
||||||
return
|
return
|
||||||
# For some reason, nsaccountlock is not defined properly in the schema as bool
|
# For some reason, nsaccountlock is not defined properly in the schema as bool
|
||||||
# hence we get it as a list of strings
|
# hence we get it as a list of strings
|
||||||
_is_locked = str(self._flatten(attributes.get("nsaccountlock", ["FALSE"])))
|
_is_locked = str(flatten(attributes.get("nsaccountlock", ["FALSE"])))
|
||||||
# So we have to attempt to convert it to a bool
|
# So we have to attempt to convert it to a bool
|
||||||
is_locked = _is_locked.lower() == "true"
|
is_locked = _is_locked.lower() == "true"
|
||||||
# And then invert it since freeipa saves locked and we save active
|
# And then invert it since freeipa saves locked and we save active
|
||||||
|
|
|
@ -1,13 +1,14 @@
|
||||||
"""LDAP Sync tasks"""
|
"""LDAP Sync tasks"""
|
||||||
|
from typing import Optional
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
from celery import chain, group
|
from celery import chain, group
|
||||||
from django.core.cache import cache
|
from django.core.cache import cache
|
||||||
from ldap3.core.exceptions import LDAPException
|
from ldap3.core.exceptions import LDAPException
|
||||||
from redis.exceptions import LockError
|
from redis.exceptions import LockError
|
||||||
from redis.lock import Lock
|
|
||||||
from structlog.stdlib import get_logger
|
from structlog.stdlib import get_logger
|
||||||
|
|
||||||
|
from authentik.events.monitored_tasks import CACHE_KEY_PREFIX as CACHE_KEY_PREFIX_TASKS
|
||||||
from authentik.events.monitored_tasks import MonitoredTask, TaskResult, TaskResultStatus
|
from authentik.events.monitored_tasks import MonitoredTask, TaskResult, TaskResultStatus
|
||||||
from authentik.lib.config import CONFIG
|
from authentik.lib.config import CONFIG
|
||||||
from authentik.lib.utils.errors import exception_to_string
|
from authentik.lib.utils.errors import exception_to_string
|
||||||
|
@ -26,6 +27,7 @@ SYNC_CLASSES = [
|
||||||
MembershipLDAPSynchronizer,
|
MembershipLDAPSynchronizer,
|
||||||
]
|
]
|
||||||
CACHE_KEY_PREFIX = "goauthentik.io/sources/ldap/page/"
|
CACHE_KEY_PREFIX = "goauthentik.io/sources/ldap/page/"
|
||||||
|
CACHE_KEY_STATUS = "goauthentik.io/sources/ldap/status/"
|
||||||
|
|
||||||
|
|
||||||
@CELERY_APP.task()
|
@CELERY_APP.task()
|
||||||
|
@ -35,6 +37,19 @@ def ldap_sync_all():
|
||||||
ldap_sync_single.apply_async(args=[source.pk])
|
ldap_sync_single.apply_async(args=[source.pk])
|
||||||
|
|
||||||
|
|
||||||
|
@CELERY_APP.task()
|
||||||
|
def ldap_connectivity_check(pk: Optional[str] = None):
|
||||||
|
"""Check connectivity for LDAP Sources"""
|
||||||
|
# 2 hour timeout, this task should run every hour
|
||||||
|
timeout = 60 * 60 * 2
|
||||||
|
sources = LDAPSource.objects.filter(enabled=True)
|
||||||
|
if pk:
|
||||||
|
sources = sources.filter(pk=pk)
|
||||||
|
for source in sources:
|
||||||
|
status = source.check_connection()
|
||||||
|
cache.set(CACHE_KEY_STATUS + source.slug, status, timeout=timeout)
|
||||||
|
|
||||||
|
|
||||||
@CELERY_APP.task(
|
@CELERY_APP.task(
|
||||||
# We take the configured hours timeout time by 2.5 as we run user and
|
# We take the configured hours timeout time by 2.5 as we run user and
|
||||||
# group in parallel and then membership, so 2x is to cover the serial tasks,
|
# group in parallel and then membership, so 2x is to cover the serial tasks,
|
||||||
|
@ -47,12 +62,15 @@ def ldap_sync_single(source_pk: str):
|
||||||
source: LDAPSource = LDAPSource.objects.filter(pk=source_pk).first()
|
source: LDAPSource = LDAPSource.objects.filter(pk=source_pk).first()
|
||||||
if not source:
|
if not source:
|
||||||
return
|
return
|
||||||
lock = Lock(cache.client.get_client(), name=f"goauthentik.io/sources/ldap/sync-{source.slug}")
|
lock = source.sync_lock
|
||||||
if lock.locked():
|
if lock.locked():
|
||||||
LOGGER.debug("LDAP sync locked, skipping task", source=source.slug)
|
LOGGER.debug("LDAP sync locked, skipping task", source=source.slug)
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
with lock:
|
with lock:
|
||||||
|
# Delete all sync tasks from the cache
|
||||||
|
keys = cache.keys(f"{CACHE_KEY_PREFIX_TASKS}ldap_sync:{source.slug}*")
|
||||||
|
cache.delete_many(keys)
|
||||||
task = chain(
|
task = chain(
|
||||||
# User and group sync can happen at once, they have no dependencies on each other
|
# User and group sync can happen at once, they have no dependencies on each other
|
||||||
group(
|
group(
|
||||||
|
|
2
go.mod
2
go.mod
|
@ -27,7 +27,7 @@ require (
|
||||||
github.com/sirupsen/logrus v1.9.3
|
github.com/sirupsen/logrus v1.9.3
|
||||||
github.com/spf13/cobra v1.8.0
|
github.com/spf13/cobra v1.8.0
|
||||||
github.com/stretchr/testify v1.8.4
|
github.com/stretchr/testify v1.8.4
|
||||||
goauthentik.io/api/v3 v3.2023102.1
|
goauthentik.io/api/v3 v3.2023103.1
|
||||||
golang.org/x/exp v0.0.0-20230210204819-062eb4c674ab
|
golang.org/x/exp v0.0.0-20230210204819-062eb4c674ab
|
||||||
golang.org/x/oauth2 v0.14.0
|
golang.org/x/oauth2 v0.14.0
|
||||||
golang.org/x/sync v0.5.0
|
golang.org/x/sync v0.5.0
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -358,8 +358,8 @@ go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyK
|
||||||
go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8=
|
go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8=
|
||||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||||
goauthentik.io/api/v3 v3.2023102.1 h1:TinB3fzh17iw92Mak0pxVdVSMJbL2CxZkQSvd98C4+U=
|
goauthentik.io/api/v3 v3.2023103.1 h1:KqZny4BPDEQ6cIDuZ9pn6/kpvyu+o6o/EekAfujffow=
|
||||||
goauthentik.io/api/v3 v3.2023102.1/go.mod h1:zz+mEZg8rY/7eEjkMGWJ2DnGqk+zqxuybGCGrR2O4Kw=
|
goauthentik.io/api/v3 v3.2023103.1/go.mod h1:zz+mEZg8rY/7eEjkMGWJ2DnGqk+zqxuybGCGrR2O4Kw=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||||
|
|
|
@ -27,14 +27,11 @@ type Config struct {
|
||||||
type RedisConfig struct {
|
type RedisConfig struct {
|
||||||
Host string `yaml:"host" env:"AUTHENTIK_REDIS__HOST"`
|
Host string `yaml:"host" env:"AUTHENTIK_REDIS__HOST"`
|
||||||
Port int `yaml:"port" env:"AUTHENTIK_REDIS__PORT"`
|
Port int `yaml:"port" env:"AUTHENTIK_REDIS__PORT"`
|
||||||
|
DB int `yaml:"db" env:"AUTHENTIK_REDIS__DB"`
|
||||||
|
Username string `yaml:"username" env:"AUTHENTIK_REDIS__USERNAME"`
|
||||||
Password string `yaml:"password" env:"AUTHENTIK_REDIS__PASSWORD"`
|
Password string `yaml:"password" env:"AUTHENTIK_REDIS__PASSWORD"`
|
||||||
TLS bool `yaml:"tls" env:"AUTHENTIK_REDIS__TLS"`
|
TLS bool `yaml:"tls" env:"AUTHENTIK_REDIS__TLS"`
|
||||||
TLSReqs string `yaml:"tls_reqs" env:"AUTHENTIK_REDIS__TLS_REQS"`
|
TLSReqs string `yaml:"tls_reqs" env:"AUTHENTIK_REDIS__TLS_REQS"`
|
||||||
DB int `yaml:"cache_db" env:"AUTHENTIK_REDIS__DB"`
|
|
||||||
CacheTimeout int `yaml:"cache_timeout" env:"AUTHENTIK_REDIS__CACHE_TIMEOUT"`
|
|
||||||
CacheTimeoutFlows int `yaml:"cache_timeout_flows" env:"AUTHENTIK_REDIS__CACHE_TIMEOUT_FLOWS"`
|
|
||||||
CacheTimeoutPolicies int `yaml:"cache_timeout_policies" env:"AUTHENTIK_REDIS__CACHE_TIMEOUT_POLICIES"`
|
|
||||||
CacheTimeoutReputation int `yaml:"cache_timeout_reputation" env:"AUTHENTIK_REDIS__CACHE_TIMEOUT_REPUTATION"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type ListenConfig struct {
|
type ListenConfig struct {
|
||||||
|
|
|
@ -29,16 +29,6 @@ var (
|
||||||
Name: "authentik_outpost_flow_timing_post_seconds",
|
Name: "authentik_outpost_flow_timing_post_seconds",
|
||||||
Help: "Duration it took to send a challenge in seconds",
|
Help: "Duration it took to send a challenge in seconds",
|
||||||
}, []string{"stage", "flow"})
|
}, []string{"stage", "flow"})
|
||||||
|
|
||||||
// NOTE: the following metrics are kept for compatibility purpose
|
|
||||||
FlowTimingGetLegacy = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
|
||||||
Name: "authentik_outpost_flow_timing_get",
|
|
||||||
Help: "Duration it took to get a challenge",
|
|
||||||
}, []string{"stage", "flow"})
|
|
||||||
FlowTimingPostLegacy = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
|
||||||
Name: "authentik_outpost_flow_timing_post",
|
|
||||||
Help: "Duration it took to send a challenge",
|
|
||||||
}, []string{"stage", "flow"})
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type SolverFunction func(*api.ChallengeTypes, api.ApiFlowsExecutorSolveRequest) (api.FlowChallengeResponseRequest, error)
|
type SolverFunction func(*api.ChallengeTypes, api.ApiFlowsExecutorSolveRequest) (api.FlowChallengeResponseRequest, error)
|
||||||
|
@ -198,10 +188,6 @@ func (fe *FlowExecutor) getInitialChallenge() (*api.ChallengeTypes, error) {
|
||||||
"stage": ch.GetComponent(),
|
"stage": ch.GetComponent(),
|
||||||
"flow": fe.flowSlug,
|
"flow": fe.flowSlug,
|
||||||
}).Observe(float64(gcsp.EndTime.Sub(gcsp.StartTime)) / float64(time.Second))
|
}).Observe(float64(gcsp.EndTime.Sub(gcsp.StartTime)) / float64(time.Second))
|
||||||
FlowTimingGetLegacy.With(prometheus.Labels{
|
|
||||||
"stage": ch.GetComponent(),
|
|
||||||
"flow": fe.flowSlug,
|
|
||||||
}).Observe(float64(gcsp.EndTime.Sub(gcsp.StartTime)))
|
|
||||||
return challenge, nil
|
return challenge, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -259,10 +245,6 @@ func (fe *FlowExecutor) solveFlowChallenge(challenge *api.ChallengeTypes, depth
|
||||||
"stage": ch.GetComponent(),
|
"stage": ch.GetComponent(),
|
||||||
"flow": fe.flowSlug,
|
"flow": fe.flowSlug,
|
||||||
}).Observe(float64(scsp.EndTime.Sub(scsp.StartTime)) / float64(time.Second))
|
}).Observe(float64(scsp.EndTime.Sub(scsp.StartTime)) / float64(time.Second))
|
||||||
FlowTimingPostLegacy.With(prometheus.Labels{
|
|
||||||
"stage": ch.GetComponent(),
|
|
||||||
"flow": fe.flowSlug,
|
|
||||||
}).Observe(float64(scsp.EndTime.Sub(scsp.StartTime)))
|
|
||||||
|
|
||||||
if depth >= 10 {
|
if depth >= 10 {
|
||||||
return false, errors.New("exceeded stage recursion depth")
|
return false, errors.New("exceeded stage recursion depth")
|
||||||
|
|
|
@ -22,11 +22,6 @@ func (ls *LDAPServer) Bind(bindDN string, bindPW string, conn net.Conn) (ldap.LD
|
||||||
"type": "bind",
|
"type": "bind",
|
||||||
"app": selectedApp,
|
"app": selectedApp,
|
||||||
}).Observe(float64(span.EndTime.Sub(span.StartTime)) / float64(time.Second))
|
}).Observe(float64(span.EndTime.Sub(span.StartTime)) / float64(time.Second))
|
||||||
metrics.RequestsLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": ls.ac.Outpost.Name,
|
|
||||||
"type": "bind",
|
|
||||||
"app": selectedApp,
|
|
||||||
}).Observe(float64(span.EndTime.Sub(span.StartTime)))
|
|
||||||
req.Log().WithField("took-ms", span.EndTime.Sub(span.StartTime).Milliseconds()).Info("Bind request")
|
req.Log().WithField("took-ms", span.EndTime.Sub(span.StartTime).Milliseconds()).Info("Bind request")
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -55,12 +50,6 @@ func (ls *LDAPServer) Bind(bindDN string, bindPW string, conn net.Conn) (ldap.LD
|
||||||
"reason": "no_provider",
|
"reason": "no_provider",
|
||||||
"app": "",
|
"app": "",
|
||||||
}).Inc()
|
}).Inc()
|
||||||
metrics.RequestsRejectedLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": ls.ac.Outpost.Name,
|
|
||||||
"type": "bind",
|
|
||||||
"reason": "no_provider",
|
|
||||||
"app": "",
|
|
||||||
}).Inc()
|
|
||||||
|
|
||||||
return ldap.LDAPResultInsufficientAccessRights, nil
|
return ldap.LDAPResultInsufficientAccessRights, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,12 +47,6 @@ func (db *DirectBinder) Bind(username string, req *bind.Request) (ldap.LDAPResul
|
||||||
"reason": "flow_error",
|
"reason": "flow_error",
|
||||||
"app": db.si.GetAppSlug(),
|
"app": db.si.GetAppSlug(),
|
||||||
}).Inc()
|
}).Inc()
|
||||||
metrics.RequestsRejectedLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": db.si.GetOutpostName(),
|
|
||||||
"type": "bind",
|
|
||||||
"reason": "flow_error",
|
|
||||||
"app": db.si.GetAppSlug(),
|
|
||||||
}).Inc()
|
|
||||||
req.Log().WithError(err).Warning("failed to execute flow")
|
req.Log().WithError(err).Warning("failed to execute flow")
|
||||||
return ldap.LDAPResultInvalidCredentials, nil
|
return ldap.LDAPResultInvalidCredentials, nil
|
||||||
}
|
}
|
||||||
|
@ -63,12 +57,6 @@ func (db *DirectBinder) Bind(username string, req *bind.Request) (ldap.LDAPResul
|
||||||
"reason": "invalid_credentials",
|
"reason": "invalid_credentials",
|
||||||
"app": db.si.GetAppSlug(),
|
"app": db.si.GetAppSlug(),
|
||||||
}).Inc()
|
}).Inc()
|
||||||
metrics.RequestsRejectedLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": db.si.GetOutpostName(),
|
|
||||||
"type": "bind",
|
|
||||||
"reason": "invalid_credentials",
|
|
||||||
"app": db.si.GetAppSlug(),
|
|
||||||
}).Inc()
|
|
||||||
req.Log().Info("Invalid credentials")
|
req.Log().Info("Invalid credentials")
|
||||||
return ldap.LDAPResultInvalidCredentials, nil
|
return ldap.LDAPResultInvalidCredentials, nil
|
||||||
}
|
}
|
||||||
|
@ -82,12 +70,6 @@ func (db *DirectBinder) Bind(username string, req *bind.Request) (ldap.LDAPResul
|
||||||
"reason": "access_denied",
|
"reason": "access_denied",
|
||||||
"app": db.si.GetAppSlug(),
|
"app": db.si.GetAppSlug(),
|
||||||
}).Inc()
|
}).Inc()
|
||||||
metrics.RequestsRejectedLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": db.si.GetOutpostName(),
|
|
||||||
"type": "bind",
|
|
||||||
"reason": "access_denied",
|
|
||||||
"app": db.si.GetAppSlug(),
|
|
||||||
}).Inc()
|
|
||||||
return ldap.LDAPResultInsufficientAccessRights, nil
|
return ldap.LDAPResultInsufficientAccessRights, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -97,12 +79,6 @@ func (db *DirectBinder) Bind(username string, req *bind.Request) (ldap.LDAPResul
|
||||||
"reason": "access_check_fail",
|
"reason": "access_check_fail",
|
||||||
"app": db.si.GetAppSlug(),
|
"app": db.si.GetAppSlug(),
|
||||||
}).Inc()
|
}).Inc()
|
||||||
metrics.RequestsRejectedLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": db.si.GetOutpostName(),
|
|
||||||
"type": "bind",
|
|
||||||
"reason": "access_check_fail",
|
|
||||||
"app": db.si.GetAppSlug(),
|
|
||||||
}).Inc()
|
|
||||||
req.Log().WithError(err).Warning("failed to check access")
|
req.Log().WithError(err).Warning("failed to check access")
|
||||||
return ldap.LDAPResultOperationsError, nil
|
return ldap.LDAPResultOperationsError, nil
|
||||||
}
|
}
|
||||||
|
@ -117,12 +93,6 @@ func (db *DirectBinder) Bind(username string, req *bind.Request) (ldap.LDAPResul
|
||||||
"reason": "user_info_fail",
|
"reason": "user_info_fail",
|
||||||
"app": db.si.GetAppSlug(),
|
"app": db.si.GetAppSlug(),
|
||||||
}).Inc()
|
}).Inc()
|
||||||
metrics.RequestsRejectedLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": db.si.GetOutpostName(),
|
|
||||||
"type": "bind",
|
|
||||||
"reason": "user_info_fail",
|
|
||||||
"app": db.si.GetAppSlug(),
|
|
||||||
}).Inc()
|
|
||||||
req.Log().WithError(err).Warning("failed to get user info")
|
req.Log().WithError(err).Warning("failed to get user info")
|
||||||
return ldap.LDAPResultOperationsError, nil
|
return ldap.LDAPResultOperationsError, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,16 +22,6 @@ var (
|
||||||
Name: "authentik_outpost_ldap_requests_rejected_total",
|
Name: "authentik_outpost_ldap_requests_rejected_total",
|
||||||
Help: "Total number of rejected requests",
|
Help: "Total number of rejected requests",
|
||||||
}, []string{"outpost_name", "type", "reason", "app"})
|
}, []string{"outpost_name", "type", "reason", "app"})
|
||||||
|
|
||||||
// NOTE: the following metrics are kept for compatibility purpose
|
|
||||||
RequestsLegacy = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
|
||||||
Name: "authentik_outpost_ldap_requests",
|
|
||||||
Help: "The total number of configured providers",
|
|
||||||
}, []string{"outpost_name", "type", "app"})
|
|
||||||
RequestsRejectedLegacy = promauto.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Name: "authentik_outpost_ldap_requests_rejected",
|
|
||||||
Help: "Total number of rejected requests",
|
|
||||||
}, []string{"outpost_name", "type", "reason", "app"})
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func RunServer() {
|
func RunServer() {
|
||||||
|
|
|
@ -23,11 +23,6 @@ func (ls *LDAPServer) Search(bindDN string, searchReq ldap.SearchRequest, conn n
|
||||||
"type": "search",
|
"type": "search",
|
||||||
"app": selectedApp,
|
"app": selectedApp,
|
||||||
}).Observe(float64(span.EndTime.Sub(span.StartTime)) / float64(time.Second))
|
}).Observe(float64(span.EndTime.Sub(span.StartTime)) / float64(time.Second))
|
||||||
metrics.RequestsLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": ls.ac.Outpost.Name,
|
|
||||||
"type": "search",
|
|
||||||
"app": selectedApp,
|
|
||||||
}).Observe(float64(span.EndTime.Sub(span.StartTime)))
|
|
||||||
req.Log().WithField("attributes", searchReq.Attributes).WithField("took-ms", span.EndTime.Sub(span.StartTime).Milliseconds()).Info("Search request")
|
req.Log().WithField("attributes", searchReq.Attributes).WithField("took-ms", span.EndTime.Sub(span.StartTime).Milliseconds()).Info("Search request")
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -45,12 +45,6 @@ func (ds *DirectSearcher) Search(req *search.Request) (ldap.ServerSearchResult,
|
||||||
"reason": "empty_bind_dn",
|
"reason": "empty_bind_dn",
|
||||||
"app": ds.si.GetAppSlug(),
|
"app": ds.si.GetAppSlug(),
|
||||||
}).Inc()
|
}).Inc()
|
||||||
metrics.RequestsRejectedLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": ds.si.GetOutpostName(),
|
|
||||||
"type": "search",
|
|
||||||
"reason": "empty_bind_dn",
|
|
||||||
"app": ds.si.GetAppSlug(),
|
|
||||||
}).Inc()
|
|
||||||
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultInsufficientAccessRights}, fmt.Errorf("Search Error: Anonymous BindDN not allowed %s", req.BindDN)
|
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultInsufficientAccessRights}, fmt.Errorf("Search Error: Anonymous BindDN not allowed %s", req.BindDN)
|
||||||
}
|
}
|
||||||
if !utils.HasSuffixNoCase(req.BindDN, ","+baseDN) {
|
if !utils.HasSuffixNoCase(req.BindDN, ","+baseDN) {
|
||||||
|
@ -60,12 +54,6 @@ func (ds *DirectSearcher) Search(req *search.Request) (ldap.ServerSearchResult,
|
||||||
"reason": "invalid_bind_dn",
|
"reason": "invalid_bind_dn",
|
||||||
"app": ds.si.GetAppSlug(),
|
"app": ds.si.GetAppSlug(),
|
||||||
}).Inc()
|
}).Inc()
|
||||||
metrics.RequestsRejectedLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": ds.si.GetOutpostName(),
|
|
||||||
"type": "search",
|
|
||||||
"reason": "invalid_bind_dn",
|
|
||||||
"app": ds.si.GetAppSlug(),
|
|
||||||
}).Inc()
|
|
||||||
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultInsufficientAccessRights}, fmt.Errorf("Search Error: BindDN %s not in our BaseDN %s", req.BindDN, ds.si.GetBaseDN())
|
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultInsufficientAccessRights}, fmt.Errorf("Search Error: BindDN %s not in our BaseDN %s", req.BindDN, ds.si.GetBaseDN())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,12 +66,6 @@ func (ds *DirectSearcher) Search(req *search.Request) (ldap.ServerSearchResult,
|
||||||
"reason": "user_info_not_cached",
|
"reason": "user_info_not_cached",
|
||||||
"app": ds.si.GetAppSlug(),
|
"app": ds.si.GetAppSlug(),
|
||||||
}).Inc()
|
}).Inc()
|
||||||
metrics.RequestsRejectedLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": ds.si.GetOutpostName(),
|
|
||||||
"type": "search",
|
|
||||||
"reason": "user_info_not_cached",
|
|
||||||
"app": ds.si.GetAppSlug(),
|
|
||||||
}).Inc()
|
|
||||||
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultInsufficientAccessRights}, errors.New("access denied")
|
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultInsufficientAccessRights}, errors.New("access denied")
|
||||||
}
|
}
|
||||||
accsp.Finish()
|
accsp.Finish()
|
||||||
|
@ -96,12 +78,6 @@ func (ds *DirectSearcher) Search(req *search.Request) (ldap.ServerSearchResult,
|
||||||
"reason": "filter_parse_fail",
|
"reason": "filter_parse_fail",
|
||||||
"app": ds.si.GetAppSlug(),
|
"app": ds.si.GetAppSlug(),
|
||||||
}).Inc()
|
}).Inc()
|
||||||
metrics.RequestsRejectedLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": ds.si.GetOutpostName(),
|
|
||||||
"type": "search",
|
|
||||||
"reason": "filter_parse_fail",
|
|
||||||
"app": ds.si.GetAppSlug(),
|
|
||||||
}).Inc()
|
|
||||||
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultOperationsError}, fmt.Errorf("Search Error: error parsing filter: %s", req.Filter)
|
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultOperationsError}, fmt.Errorf("Search Error: error parsing filter: %s", req.Filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -62,12 +62,6 @@ func (ms *MemorySearcher) Search(req *search.Request) (ldap.ServerSearchResult,
|
||||||
"reason": "empty_bind_dn",
|
"reason": "empty_bind_dn",
|
||||||
"app": ms.si.GetAppSlug(),
|
"app": ms.si.GetAppSlug(),
|
||||||
}).Inc()
|
}).Inc()
|
||||||
metrics.RequestsRejectedLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": ms.si.GetOutpostName(),
|
|
||||||
"type": "search",
|
|
||||||
"reason": "empty_bind_dn",
|
|
||||||
"app": ms.si.GetAppSlug(),
|
|
||||||
}).Inc()
|
|
||||||
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultInsufficientAccessRights}, fmt.Errorf("Search Error: Anonymous BindDN not allowed %s", req.BindDN)
|
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultInsufficientAccessRights}, fmt.Errorf("Search Error: Anonymous BindDN not allowed %s", req.BindDN)
|
||||||
}
|
}
|
||||||
if !utils.HasSuffixNoCase(req.BindDN, ","+baseDN) {
|
if !utils.HasSuffixNoCase(req.BindDN, ","+baseDN) {
|
||||||
|
@ -77,12 +71,6 @@ func (ms *MemorySearcher) Search(req *search.Request) (ldap.ServerSearchResult,
|
||||||
"reason": "invalid_bind_dn",
|
"reason": "invalid_bind_dn",
|
||||||
"app": ms.si.GetAppSlug(),
|
"app": ms.si.GetAppSlug(),
|
||||||
}).Inc()
|
}).Inc()
|
||||||
metrics.RequestsRejectedLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": ms.si.GetOutpostName(),
|
|
||||||
"type": "search",
|
|
||||||
"reason": "invalid_bind_dn",
|
|
||||||
"app": ms.si.GetAppSlug(),
|
|
||||||
}).Inc()
|
|
||||||
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultInsufficientAccessRights}, fmt.Errorf("Search Error: BindDN %s not in our BaseDN %s", req.BindDN, ms.si.GetBaseDN())
|
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultInsufficientAccessRights}, fmt.Errorf("Search Error: BindDN %s not in our BaseDN %s", req.BindDN, ms.si.GetBaseDN())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,12 +83,6 @@ func (ms *MemorySearcher) Search(req *search.Request) (ldap.ServerSearchResult,
|
||||||
"reason": "user_info_not_cached",
|
"reason": "user_info_not_cached",
|
||||||
"app": ms.si.GetAppSlug(),
|
"app": ms.si.GetAppSlug(),
|
||||||
}).Inc()
|
}).Inc()
|
||||||
metrics.RequestsRejectedLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": ms.si.GetOutpostName(),
|
|
||||||
"type": "search",
|
|
||||||
"reason": "user_info_not_cached",
|
|
||||||
"app": ms.si.GetAppSlug(),
|
|
||||||
}).Inc()
|
|
||||||
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultInsufficientAccessRights}, errors.New("access denied")
|
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultInsufficientAccessRights}, errors.New("access denied")
|
||||||
}
|
}
|
||||||
accsp.Finish()
|
accsp.Finish()
|
||||||
|
|
|
@ -22,11 +22,6 @@ func (ls *LDAPServer) Unbind(boundDN string, conn net.Conn) (ldap.LDAPResultCode
|
||||||
"type": "unbind",
|
"type": "unbind",
|
||||||
"app": selectedApp,
|
"app": selectedApp,
|
||||||
}).Observe(float64(span.EndTime.Sub(span.StartTime)) / float64(time.Second))
|
}).Observe(float64(span.EndTime.Sub(span.StartTime)) / float64(time.Second))
|
||||||
metrics.RequestsLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": ls.ac.Outpost.Name,
|
|
||||||
"type": "unbind",
|
|
||||||
"app": selectedApp,
|
|
||||||
}).Observe(float64(span.EndTime.Sub(span.StartTime)))
|
|
||||||
req.Log().WithField("took-ms", span.EndTime.Sub(span.StartTime).Milliseconds()).Info("Unbind request")
|
req.Log().WithField("took-ms", span.EndTime.Sub(span.StartTime).Milliseconds()).Info("Unbind request")
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -55,11 +50,5 @@ func (ls *LDAPServer) Unbind(boundDN string, conn net.Conn) (ldap.LDAPResultCode
|
||||||
"reason": "no_provider",
|
"reason": "no_provider",
|
||||||
"app": "",
|
"app": "",
|
||||||
}).Inc()
|
}).Inc()
|
||||||
metrics.RequestsRejectedLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": ls.ac.Outpost.Name,
|
|
||||||
"type": "unbind",
|
|
||||||
"reason": "no_provider",
|
|
||||||
"app": "",
|
|
||||||
}).Inc()
|
|
||||||
return ldap.LDAPResultOperationsError, nil
|
return ldap.LDAPResultOperationsError, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -173,12 +173,6 @@ func NewApplication(p api.ProxyOutpostConfig, c *http.Client, server Server) (*A
|
||||||
"method": r.Method,
|
"method": r.Method,
|
||||||
"host": web.GetHost(r),
|
"host": web.GetHost(r),
|
||||||
}).Observe(float64(elapsed) / float64(time.Second))
|
}).Observe(float64(elapsed) / float64(time.Second))
|
||||||
metrics.RequestsLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": a.outpostName,
|
|
||||||
"type": "app",
|
|
||||||
"method": r.Method,
|
|
||||||
"host": web.GetHost(r),
|
|
||||||
}).Observe(float64(elapsed))
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
if server.API().GlobalConfig.ErrorReporting.Enabled {
|
if server.API().GlobalConfig.ErrorReporting.Enabled {
|
||||||
|
@ -241,7 +235,10 @@ func (a *Application) Mode() api.ProxyMode {
|
||||||
return *a.proxyConfig.Mode
|
return *a.proxyConfig.Mode
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Application) HasQuerySignature(r *http.Request) bool {
|
func (a *Application) ShouldHandleURL(r *http.Request) bool {
|
||||||
|
if strings.HasPrefix(r.URL.Path, "/outpost.goauthentik.io") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
if strings.EqualFold(r.URL.Query().Get(CallbackSignature), "true") {
|
if strings.EqualFold(r.URL.Query().Get(CallbackSignature), "true") {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,13 +64,6 @@ func (a *Application) configureProxy() error {
|
||||||
"scheme": r.URL.Scheme,
|
"scheme": r.URL.Scheme,
|
||||||
"host": web.GetHost(r),
|
"host": web.GetHost(r),
|
||||||
}).Observe(float64(elapsed) / float64(time.Second))
|
}).Observe(float64(elapsed) / float64(time.Second))
|
||||||
metrics.UpstreamTimingLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": a.outpostName,
|
|
||||||
"upstream_host": r.URL.Host,
|
|
||||||
"method": r.Method,
|
|
||||||
"scheme": r.URL.Scheme,
|
|
||||||
"host": web.GetHost(r),
|
|
||||||
}).Observe(float64(elapsed))
|
|
||||||
})
|
})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,7 +71,7 @@ func (a *Application) getStore(p api.ProxyOutpostConfig, externalHost *url.URL)
|
||||||
cs.Options.Domain = *p.CookieDomain
|
cs.Options.Domain = *p.CookieDomain
|
||||||
cs.Options.SameSite = http.SameSiteLaxMode
|
cs.Options.SameSite = http.SameSiteLaxMode
|
||||||
cs.Options.MaxAge = maxAge
|
cs.Options.MaxAge = maxAge
|
||||||
cs.Options.Path = externalHost.Path
|
cs.Options.Path = "/"
|
||||||
a.log.WithField("dir", dir).Trace("using filesystem session backend")
|
a.log.WithField("dir", dir).Trace("using filesystem session backend")
|
||||||
return cs
|
return cs
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,12 +26,6 @@ func (ps *ProxyServer) HandlePing(rw http.ResponseWriter, r *http.Request) {
|
||||||
"host": web.GetHost(r),
|
"host": web.GetHost(r),
|
||||||
"type": "ping",
|
"type": "ping",
|
||||||
}).Observe(float64(elapsed) / float64(time.Second))
|
}).Observe(float64(elapsed) / float64(time.Second))
|
||||||
metrics.RequestsLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": ps.akAPI.Outpost.Name,
|
|
||||||
"method": r.Method,
|
|
||||||
"host": web.GetHost(r),
|
|
||||||
"type": "ping",
|
|
||||||
}).Observe(float64(elapsed))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ps *ProxyServer) HandleStatic(rw http.ResponseWriter, r *http.Request) {
|
func (ps *ProxyServer) HandleStatic(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
@ -44,12 +38,6 @@ func (ps *ProxyServer) HandleStatic(rw http.ResponseWriter, r *http.Request) {
|
||||||
"host": web.GetHost(r),
|
"host": web.GetHost(r),
|
||||||
"type": "static",
|
"type": "static",
|
||||||
}).Observe(float64(elapsed) / float64(time.Second))
|
}).Observe(float64(elapsed) / float64(time.Second))
|
||||||
metrics.RequestsLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": ps.akAPI.Outpost.Name,
|
|
||||||
"method": r.Method,
|
|
||||||
"host": web.GetHost(r),
|
|
||||||
"type": "static",
|
|
||||||
}).Observe(float64(elapsed))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ps *ProxyServer) lookupApp(r *http.Request) (*application.Application, string) {
|
func (ps *ProxyServer) lookupApp(r *http.Request) (*application.Application, string) {
|
||||||
|
|
|
@ -22,16 +22,6 @@ var (
|
||||||
Name: "authentik_outpost_proxy_upstream_response_duration_seconds",
|
Name: "authentik_outpost_proxy_upstream_response_duration_seconds",
|
||||||
Help: "Proxy upstream response latencies in seconds",
|
Help: "Proxy upstream response latencies in seconds",
|
||||||
}, []string{"outpost_name", "method", "scheme", "host", "upstream_host"})
|
}, []string{"outpost_name", "method", "scheme", "host", "upstream_host"})
|
||||||
|
|
||||||
// NOTE: the following metric is kept for compatibility purpose
|
|
||||||
RequestsLegacy = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
|
||||||
Name: "authentik_outpost_proxy_requests",
|
|
||||||
Help: "The total number of configured providers",
|
|
||||||
}, []string{"outpost_name", "method", "host", "type"})
|
|
||||||
UpstreamTimingLegacy = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
|
||||||
Name: "authentik_outpost_proxy_upstream_time",
|
|
||||||
Help: "A summary of the duration we wait for the upstream reply",
|
|
||||||
}, []string{"outpost_name", "method", "scheme", "host", "upstream_host"})
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func RunServer() {
|
func RunServer() {
|
||||||
|
|
|
@ -74,7 +74,7 @@ func (ps *ProxyServer) HandleHost(rw http.ResponseWriter, r *http.Request) bool
|
||||||
if a == nil {
|
if a == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if a.HasQuerySignature(r) || a.Mode() == api.PROXYMODE_PROXY {
|
if a.ShouldHandleURL(r) || a.Mode() == api.PROXYMODE_PROXY {
|
||||||
a.ServeHTTP(rw, r)
|
a.ServeHTTP(rw, r)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,11 +35,6 @@ func (rs *RadiusServer) Handle_AccessRequest(w radius.ResponseWriter, r *RadiusR
|
||||||
"reason": "flow_error",
|
"reason": "flow_error",
|
||||||
"app": r.pi.appSlug,
|
"app": r.pi.appSlug,
|
||||||
}).Inc()
|
}).Inc()
|
||||||
metrics.RequestsRejectedLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": rs.ac.Outpost.Name,
|
|
||||||
"reason": "flow_error",
|
|
||||||
"app": r.pi.appSlug,
|
|
||||||
}).Inc()
|
|
||||||
_ = w.Write(r.Response(radius.CodeAccessReject))
|
_ = w.Write(r.Response(radius.CodeAccessReject))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -49,11 +44,6 @@ func (rs *RadiusServer) Handle_AccessRequest(w radius.ResponseWriter, r *RadiusR
|
||||||
"reason": "invalid_credentials",
|
"reason": "invalid_credentials",
|
||||||
"app": r.pi.appSlug,
|
"app": r.pi.appSlug,
|
||||||
}).Inc()
|
}).Inc()
|
||||||
metrics.RequestsRejectedLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": rs.ac.Outpost.Name,
|
|
||||||
"reason": "invalid_credentials",
|
|
||||||
"app": r.pi.appSlug,
|
|
||||||
}).Inc()
|
|
||||||
_ = w.Write(r.Response(radius.CodeAccessReject))
|
_ = w.Write(r.Response(radius.CodeAccessReject))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -66,11 +56,6 @@ func (rs *RadiusServer) Handle_AccessRequest(w radius.ResponseWriter, r *RadiusR
|
||||||
"reason": "access_check_fail",
|
"reason": "access_check_fail",
|
||||||
"app": r.pi.appSlug,
|
"app": r.pi.appSlug,
|
||||||
}).Inc()
|
}).Inc()
|
||||||
metrics.RequestsRejectedLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": rs.ac.Outpost.Name,
|
|
||||||
"reason": "access_check_fail",
|
|
||||||
"app": r.pi.appSlug,
|
|
||||||
}).Inc()
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !access {
|
if !access {
|
||||||
|
@ -81,11 +66,6 @@ func (rs *RadiusServer) Handle_AccessRequest(w radius.ResponseWriter, r *RadiusR
|
||||||
"reason": "access_denied",
|
"reason": "access_denied",
|
||||||
"app": r.pi.appSlug,
|
"app": r.pi.appSlug,
|
||||||
}).Inc()
|
}).Inc()
|
||||||
metrics.RequestsRejectedLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": rs.ac.Outpost.Name,
|
|
||||||
"reason": "access_denied",
|
|
||||||
"app": r.pi.appSlug,
|
|
||||||
}).Inc()
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
_ = w.Write(r.Response(radius.CodeAccessAccept))
|
_ = w.Write(r.Response(radius.CodeAccessAccept))
|
||||||
|
|
|
@ -47,10 +47,6 @@ func (rs *RadiusServer) ServeRADIUS(w radius.ResponseWriter, r *radius.Request)
|
||||||
"outpost_name": rs.ac.Outpost.Name,
|
"outpost_name": rs.ac.Outpost.Name,
|
||||||
"app": selectedApp,
|
"app": selectedApp,
|
||||||
}).Observe(float64(span.EndTime.Sub(span.StartTime)) / float64(time.Second))
|
}).Observe(float64(span.EndTime.Sub(span.StartTime)) / float64(time.Second))
|
||||||
metrics.RequestsLegacy.With(prometheus.Labels{
|
|
||||||
"outpost_name": rs.ac.Outpost.Name,
|
|
||||||
"app": selectedApp,
|
|
||||||
}).Observe(float64(span.EndTime.Sub(span.StartTime)))
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
nr := &RadiusRequest{
|
nr := &RadiusRequest{
|
||||||
|
|
|
@ -22,16 +22,6 @@ var (
|
||||||
Name: "authentik_outpost_radius_requests_rejected_total",
|
Name: "authentik_outpost_radius_requests_rejected_total",
|
||||||
Help: "Total number of rejected requests",
|
Help: "Total number of rejected requests",
|
||||||
}, []string{"outpost_name", "reason", "app"})
|
}, []string{"outpost_name", "reason", "app"})
|
||||||
|
|
||||||
// NOTE: the following metric is kept for compatibility purpose
|
|
||||||
RequestsLegacy = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
|
||||||
Name: "authentik_outpost_radius_requests",
|
|
||||||
Help: "The total number of successful requests",
|
|
||||||
}, []string{"outpost_name", "app"})
|
|
||||||
RequestsRejectedLegacy = promauto.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Name: "authentik_outpost_radius_requests_rejected",
|
|
||||||
Help: "Total number of rejected requests",
|
|
||||||
}, []string{"outpost_name", "reason", "app"})
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func RunServer() {
|
func RunServer() {
|
||||||
|
|
|
@ -19,12 +19,6 @@ var (
|
||||||
Name: "authentik_main_request_duration_seconds",
|
Name: "authentik_main_request_duration_seconds",
|
||||||
Help: "API request latencies in seconds",
|
Help: "API request latencies in seconds",
|
||||||
}, []string{"dest"})
|
}, []string{"dest"})
|
||||||
|
|
||||||
// NOTE: the following metric is kept for compatibility purpose
|
|
||||||
RequestsLegacy = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
|
||||||
Name: "authentik_main_requests",
|
|
||||||
Help: "The total number of configured providers",
|
|
||||||
}, []string{"dest"})
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (ws *WebServer) runMetricsServer() {
|
func (ws *WebServer) runMetricsServer() {
|
||||||
|
|
|
@ -32,21 +32,6 @@ func (ws *WebServer) configureProxy() {
|
||||||
}
|
}
|
||||||
rp.ErrorHandler = ws.proxyErrorHandler
|
rp.ErrorHandler = ws.proxyErrorHandler
|
||||||
rp.ModifyResponse = ws.proxyModifyResponse
|
rp.ModifyResponse = ws.proxyModifyResponse
|
||||||
ws.m.PathPrefix("/outpost.goauthentik.io").HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
|
||||||
if ws.ProxyServer != nil {
|
|
||||||
before := time.Now()
|
|
||||||
ws.ProxyServer.Handle(rw, r)
|
|
||||||
elapsed := time.Since(before)
|
|
||||||
Requests.With(prometheus.Labels{
|
|
||||||
"dest": "embedded_outpost",
|
|
||||||
}).Observe(float64(elapsed) / float64(time.Second))
|
|
||||||
RequestsLegacy.With(prometheus.Labels{
|
|
||||||
"dest": "embedded_outpost",
|
|
||||||
}).Observe(float64(elapsed))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ws.proxyErrorHandler(rw, r, errors.New("proxy not running"))
|
|
||||||
})
|
|
||||||
ws.m.Path("/-/health/live/").HandlerFunc(sentry.SentryNoSample(func(rw http.ResponseWriter, r *http.Request) {
|
ws.m.Path("/-/health/live/").HandlerFunc(sentry.SentryNoSample(func(rw http.ResponseWriter, r *http.Request) {
|
||||||
rw.WriteHeader(204)
|
rw.WriteHeader(204)
|
||||||
}))
|
}))
|
||||||
|
@ -56,25 +41,17 @@ func (ws *WebServer) configureProxy() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
before := time.Now()
|
before := time.Now()
|
||||||
if ws.ProxyServer != nil {
|
if ws.ProxyServer != nil && ws.ProxyServer.HandleHost(rw, r) {
|
||||||
if ws.ProxyServer.HandleHost(rw, r) {
|
elapsed := time.Since(before)
|
||||||
elapsed := time.Since(before)
|
Requests.With(prometheus.Labels{
|
||||||
Requests.With(prometheus.Labels{
|
"dest": "embedded_outpost",
|
||||||
"dest": "embedded_outpost",
|
}).Observe(float64(elapsed) / float64(time.Second))
|
||||||
}).Observe(float64(elapsed) / float64(time.Second))
|
return
|
||||||
RequestsLegacy.With(prometheus.Labels{
|
|
||||||
"dest": "embedded_outpost",
|
|
||||||
}).Observe(float64(elapsed))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
elapsed := time.Since(before)
|
elapsed := time.Since(before)
|
||||||
Requests.With(prometheus.Labels{
|
Requests.With(prometheus.Labels{
|
||||||
"dest": "core",
|
"dest": "core",
|
||||||
}).Observe(float64(elapsed) / float64(time.Second))
|
}).Observe(float64(elapsed) / float64(time.Second))
|
||||||
RequestsLegacy.With(prometheus.Labels{
|
|
||||||
"dest": "core",
|
|
||||||
}).Observe(float64(elapsed))
|
|
||||||
r.Body = http.MaxBytesReader(rw, r.Body, 32*1024*1024)
|
r.Body = http.MaxBytesReader(rw, r.Body, 32*1024*1024)
|
||||||
rp.ServeHTTP(rw, r)
|
rp.ServeHTTP(rw, r)
|
||||||
}))
|
}))
|
||||||
|
|
Binary file not shown.
File diff suppressed because it is too large
Load Diff
|
@ -422,13 +422,13 @@ typecheck = ["mypy"]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "billiard"
|
name = "billiard"
|
||||||
version = "4.1.0"
|
version = "4.2.0"
|
||||||
description = "Python multiprocessing fork with improvements and bugfixes"
|
description = "Python multiprocessing fork with improvements and bugfixes"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
{file = "billiard-4.1.0-py3-none-any.whl", hash = "sha256:0f50d6be051c6b2b75bfbc8bfd85af195c5739c281d3f5b86a5640c65563614a"},
|
{file = "billiard-4.2.0-py3-none-any.whl", hash = "sha256:07aa978b308f334ff8282bd4a746e681b3513db5c9a514cbdd810cbbdc19714d"},
|
||||||
{file = "billiard-4.1.0.tar.gz", hash = "sha256:1ad2eeae8e28053d729ba3373d34d9d6e210f6e4d8bf0a9c64f92bd053f1edf5"},
|
{file = "billiard-4.2.0.tar.gz", hash = "sha256:9a3c3184cb275aa17a732f93f65b20c525d3d9f253722d26a82194803ade5a2c"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -544,29 +544,29 @@ test = ["pytest", "pytest-cov"]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "celery"
|
name = "celery"
|
||||||
version = "5.3.4"
|
version = "5.3.5"
|
||||||
description = "Distributed Task Queue."
|
description = "Distributed Task Queue."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.8"
|
python-versions = ">=3.8"
|
||||||
files = [
|
files = [
|
||||||
{file = "celery-5.3.4-py3-none-any.whl", hash = "sha256:1e6ed40af72695464ce98ca2c201ad0ef8fd192246f6c9eac8bba343b980ad34"},
|
{file = "celery-5.3.5-py3-none-any.whl", hash = "sha256:30b75ac60fb081c2d9f8881382c148ed7c9052031a75a1e8743ff4b4b071f184"},
|
||||||
{file = "celery-5.3.4.tar.gz", hash = "sha256:9023df6a8962da79eb30c0c84d5f4863d9793a466354cc931d7f72423996de28"},
|
{file = "celery-5.3.5.tar.gz", hash = "sha256:6b65d8dd5db499dd6190c45aa6398e171b99592f2af62c312f7391587feb5458"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
billiard = ">=4.1.0,<5.0"
|
billiard = ">=4.2.0,<5.0"
|
||||||
click = ">=8.1.2,<9.0"
|
click = ">=8.1.2,<9.0"
|
||||||
click-didyoumean = ">=0.3.0"
|
click-didyoumean = ">=0.3.0"
|
||||||
click-plugins = ">=1.1.1"
|
click-plugins = ">=1.1.1"
|
||||||
click-repl = ">=0.2.0"
|
click-repl = ">=0.2.0"
|
||||||
kombu = ">=5.3.2,<6.0"
|
kombu = ">=5.3.3,<6.0"
|
||||||
python-dateutil = ">=2.8.2"
|
python-dateutil = ">=2.8.2"
|
||||||
tzdata = ">=2022.7"
|
tzdata = ">=2022.7"
|
||||||
vine = ">=5.0.0,<6.0"
|
vine = ">=5.1.0,<6.0"
|
||||||
|
|
||||||
[package.extras]
|
[package.extras]
|
||||||
arangodb = ["pyArango (>=2.0.2)"]
|
arangodb = ["pyArango (>=2.0.2)"]
|
||||||
auth = ["cryptography (==41.0.3)"]
|
auth = ["cryptography (==41.0.5)"]
|
||||||
azureblockblob = ["azure-storage-blob (>=12.15.0)"]
|
azureblockblob = ["azure-storage-blob (>=12.15.0)"]
|
||||||
brotli = ["brotli (>=1.0.0)", "brotlipy (>=0.7.0)"]
|
brotli = ["brotli (>=1.0.0)", "brotlipy (>=0.7.0)"]
|
||||||
cassandra = ["cassandra-driver (>=3.25.0,<4)"]
|
cassandra = ["cassandra-driver (>=3.25.0,<4)"]
|
||||||
|
@ -576,26 +576,26 @@ couchbase = ["couchbase (>=3.0.0)"]
|
||||||
couchdb = ["pycouchdb (==1.14.2)"]
|
couchdb = ["pycouchdb (==1.14.2)"]
|
||||||
django = ["Django (>=2.2.28)"]
|
django = ["Django (>=2.2.28)"]
|
||||||
dynamodb = ["boto3 (>=1.26.143)"]
|
dynamodb = ["boto3 (>=1.26.143)"]
|
||||||
elasticsearch = ["elasticsearch (<8.0)"]
|
elasticsearch = ["elastic-transport (<=8.10.0)", "elasticsearch (<=8.10.1)"]
|
||||||
eventlet = ["eventlet (>=0.32.0)"]
|
eventlet = ["eventlet (>=0.32.0)"]
|
||||||
gevent = ["gevent (>=1.5.0)"]
|
gevent = ["gevent (>=1.5.0)"]
|
||||||
librabbitmq = ["librabbitmq (>=2.0.0)"]
|
librabbitmq = ["librabbitmq (>=2.0.0)"]
|
||||||
memcache = ["pylibmc (==1.6.3)"]
|
memcache = ["pylibmc (==1.6.3)"]
|
||||||
mongodb = ["pymongo[srv] (>=4.0.2)"]
|
mongodb = ["pymongo[srv] (>=4.0.2)"]
|
||||||
msgpack = ["msgpack (==1.0.5)"]
|
msgpack = ["msgpack (==1.0.7)"]
|
||||||
pymemcache = ["python-memcached (==1.59)"]
|
pymemcache = ["python-memcached (==1.59)"]
|
||||||
pyro = ["pyro4 (==4.82)"]
|
pyro = ["pyro4 (==4.82)"]
|
||||||
pytest = ["pytest-celery (==0.0.0)"]
|
pytest = ["pytest-celery (==0.0.0)"]
|
||||||
redis = ["redis (>=4.5.2,!=4.5.5,<5.0.0)"]
|
redis = ["redis (>=4.5.2,!=4.5.5,<6.0.0)"]
|
||||||
s3 = ["boto3 (>=1.26.143)"]
|
s3 = ["boto3 (>=1.26.143)"]
|
||||||
slmq = ["softlayer-messaging (>=1.0.3)"]
|
slmq = ["softlayer-messaging (>=1.0.3)"]
|
||||||
solar = ["ephem (==4.1.4)"]
|
solar = ["ephem (==4.1.5)"]
|
||||||
sqlalchemy = ["sqlalchemy (>=1.4.48,<2.1)"]
|
sqlalchemy = ["sqlalchemy (>=1.4.48,<2.1)"]
|
||||||
sqs = ["boto3 (>=1.26.143)", "kombu[sqs] (>=5.3.0)", "pycurl (>=7.43.0.5)", "urllib3 (>=1.26.16)"]
|
sqs = ["boto3 (>=1.26.143)", "kombu[sqs] (>=5.3.0)", "pycurl (>=7.43.0.5)", "urllib3 (>=1.26.16)"]
|
||||||
tblib = ["tblib (>=1.3.0)", "tblib (>=1.5.0)"]
|
tblib = ["tblib (>=1.3.0)", "tblib (>=1.5.0)"]
|
||||||
yaml = ["PyYAML (>=3.10)"]
|
yaml = ["PyYAML (>=3.10)"]
|
||||||
zookeeper = ["kazoo (>=1.3.1)"]
|
zookeeper = ["kazoo (>=1.3.1)"]
|
||||||
zstd = ["zstandard (==0.21.0)"]
|
zstd = ["zstandard (==0.22.0)"]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "certifi"
|
name = "certifi"
|
||||||
|
@ -1868,13 +1868,13 @@ referencing = ">=0.28.0"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "kombu"
|
name = "kombu"
|
||||||
version = "5.3.2"
|
version = "5.3.3"
|
||||||
description = "Messaging library for Python."
|
description = "Messaging library for Python."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.8"
|
python-versions = ">=3.8"
|
||||||
files = [
|
files = [
|
||||||
{file = "kombu-5.3.2-py3-none-any.whl", hash = "sha256:b753c9cfc9b1e976e637a7cbc1a65d446a22e45546cd996ea28f932082b7dc9e"},
|
{file = "kombu-5.3.3-py3-none-any.whl", hash = "sha256:6cd5c5d5ef77538434b8f81f3e265c414269418645dbb47dbf130a8a05c3e357"},
|
||||||
{file = "kombu-5.3.2.tar.gz", hash = "sha256:0ba213f630a2cb2772728aef56ac6883dc3a2f13435e10048f6e97d48506dbbd"},
|
{file = "kombu-5.3.3.tar.gz", hash = "sha256:1491df826cfc5178c80f3e89dd6dfba68e484ef334db81070eb5cb8094b31167"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
|
@ -1884,14 +1884,14 @@ vine = "*"
|
||||||
[package.extras]
|
[package.extras]
|
||||||
azureservicebus = ["azure-servicebus (>=7.10.0)"]
|
azureservicebus = ["azure-servicebus (>=7.10.0)"]
|
||||||
azurestoragequeues = ["azure-identity (>=1.12.0)", "azure-storage-queue (>=12.6.0)"]
|
azurestoragequeues = ["azure-identity (>=1.12.0)", "azure-storage-queue (>=12.6.0)"]
|
||||||
confluentkafka = ["confluent-kafka (==2.1.1)"]
|
confluentkafka = ["confluent-kafka (>=2.2.0)"]
|
||||||
consul = ["python-consul2"]
|
consul = ["python-consul2"]
|
||||||
librabbitmq = ["librabbitmq (>=2.0.0)"]
|
librabbitmq = ["librabbitmq (>=2.0.0)"]
|
||||||
mongodb = ["pymongo (>=4.1.1)"]
|
mongodb = ["pymongo (>=4.1.1)"]
|
||||||
msgpack = ["msgpack"]
|
msgpack = ["msgpack"]
|
||||||
pyro = ["pyro4"]
|
pyro = ["pyro4"]
|
||||||
qpid = ["qpid-python (>=0.26)", "qpid-tools (>=0.26)"]
|
qpid = ["qpid-python (>=0.26)", "qpid-tools (>=0.26)"]
|
||||||
redis = ["redis (>=4.5.2)"]
|
redis = ["redis (>=4.5.2,!=4.5.5,<6.0.0)"]
|
||||||
slmq = ["softlayer-messaging (>=1.0.3)"]
|
slmq = ["softlayer-messaging (>=1.0.3)"]
|
||||||
sqlalchemy = ["sqlalchemy (>=1.4.48,<2.1)"]
|
sqlalchemy = ["sqlalchemy (>=1.4.48,<2.1)"]
|
||||||
sqs = ["boto3 (>=1.26.143)", "pycurl (>=7.43.0.5)", "urllib3 (>=1.26.16)"]
|
sqs = ["boto3 (>=1.26.143)", "pycurl (>=7.43.0.5)", "urllib3 (>=1.26.16)"]
|
||||||
|
@ -3675,13 +3675,13 @@ wsproto = ">=0.14"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "twilio"
|
name = "twilio"
|
||||||
version = "8.10.0"
|
version = "8.10.1"
|
||||||
description = "Twilio API client and TwiML generator"
|
description = "Twilio API client and TwiML generator"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7.0"
|
python-versions = ">=3.7.0"
|
||||||
files = [
|
files = [
|
||||||
{file = "twilio-8.10.0-py2.py3-none-any.whl", hash = "sha256:1eb04af92f3e70fcc87a2fd30617f53784e34045d054e4ae3dc9cfe7bdf1e692"},
|
{file = "twilio-8.10.1-py2.py3-none-any.whl", hash = "sha256:eb08ac17c8eb4f6176907b4e7dea984102488fb86ad146ecd47e8a8dfcf3cfa3"},
|
||||||
{file = "twilio-8.10.0.tar.gz", hash = "sha256:3bf2def228ceaa7519f4d6e58b2e3c9cb5d865af02b4618239e52c9d9e75e29d"},
|
{file = "twilio-8.10.1.tar.gz", hash = "sha256:902267856d09cf1f59b7fa4af594edae0225fdd8b473a6ef8e5799e823e0a611"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
|
@ -3924,13 +3924,13 @@ test = ["Cython (>=0.29.32,<0.30.0)", "aiohttp", "flake8 (>=3.9.2,<3.10.0)", "my
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "vine"
|
name = "vine"
|
||||||
version = "5.0.0"
|
version = "5.1.0"
|
||||||
description = "Promises, promises, promises."
|
description = "Python promises."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
files = [
|
files = [
|
||||||
{file = "vine-5.0.0-py2.py3-none-any.whl", hash = "sha256:4c9dceab6f76ed92105027c49c823800dd33cacce13bdedc5b914e3514b7fb30"},
|
{file = "vine-5.1.0-py3-none-any.whl", hash = "sha256:40fdf3c48b2cfe1c38a49e9ae2da6fda88e4794c810050a728bd7413811fb1dc"},
|
||||||
{file = "vine-5.0.0.tar.gz", hash = "sha256:7d3b1624a953da82ef63462013bbd271d3eb75751489f9807598e8f340bd637e"},
|
{file = "vine-5.1.0.tar.gz", hash = "sha256:8b62e981d35c41049211cf62a0a1242d8c1ee9bd15bb196ce38aefd6799e61e0"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
|
31
schema.yml
31
schema.yml
|
@ -18942,7 +18942,7 @@ paths:
|
||||||
description: ''
|
description: ''
|
||||||
/sources/ldap/{slug}/sync_status/:
|
/sources/ldap/{slug}/sync_status/:
|
||||||
get:
|
get:
|
||||||
operationId: sources_ldap_sync_status_list
|
operationId: sources_ldap_sync_status_retrieve
|
||||||
description: Get source's sync status
|
description: Get source's sync status
|
||||||
parameters:
|
parameters:
|
||||||
- in: path
|
- in: path
|
||||||
|
@ -18960,9 +18960,7 @@ paths:
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema:
|
schema:
|
||||||
type: array
|
$ref: '#/components/schemas/LDAPSyncStatus'
|
||||||
items:
|
|
||||||
$ref: '#/components/schemas/Task'
|
|
||||||
description: ''
|
description: ''
|
||||||
'400':
|
'400':
|
||||||
content:
|
content:
|
||||||
|
@ -32812,9 +32810,19 @@ components:
|
||||||
type: string
|
type: string
|
||||||
format: uuid
|
format: uuid
|
||||||
description: Property mappings used for group creation/updating.
|
description: Property mappings used for group creation/updating.
|
||||||
|
connectivity:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
type: string
|
||||||
|
nullable: true
|
||||||
|
description: Get cached source connectivity
|
||||||
|
readOnly: true
|
||||||
required:
|
required:
|
||||||
- base_dn
|
- base_dn
|
||||||
- component
|
- component
|
||||||
|
- connectivity
|
||||||
- icon
|
- icon
|
||||||
- managed
|
- managed
|
||||||
- meta_model_name
|
- meta_model_name
|
||||||
|
@ -32948,6 +32956,21 @@ components:
|
||||||
- name
|
- name
|
||||||
- server_uri
|
- server_uri
|
||||||
- slug
|
- slug
|
||||||
|
LDAPSyncStatus:
|
||||||
|
type: object
|
||||||
|
description: LDAP Source sync status
|
||||||
|
properties:
|
||||||
|
is_running:
|
||||||
|
type: boolean
|
||||||
|
readOnly: true
|
||||||
|
tasks:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/Task'
|
||||||
|
readOnly: true
|
||||||
|
required:
|
||||||
|
- is_running
|
||||||
|
- tasks
|
||||||
LayoutEnum:
|
LayoutEnum:
|
||||||
enum:
|
enum:
|
||||||
- stacked
|
- stacked
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
"": {
|
"": {
|
||||||
"name": "@goauthentik/web-tests",
|
"name": "@goauthentik/web-tests",
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@trivago/prettier-plugin-sort-imports": "^4.2.1",
|
"@trivago/prettier-plugin-sort-imports": "^4.3.0",
|
||||||
"@typescript-eslint/eslint-plugin": "^6.10.0",
|
"@typescript-eslint/eslint-plugin": "^6.10.0",
|
||||||
"@typescript-eslint/parser": "^6.10.0",
|
"@typescript-eslint/parser": "^6.10.0",
|
||||||
"@wdio/cli": "^8.22.1",
|
"@wdio/cli": "^8.22.1",
|
||||||
|
@ -17,10 +17,10 @@
|
||||||
"eslint-config-google": "^0.14.0",
|
"eslint-config-google": "^0.14.0",
|
||||||
"eslint-plugin-sonarjs": "^0.23.0",
|
"eslint-plugin-sonarjs": "^0.23.0",
|
||||||
"npm-run-all": "^4.1.5",
|
"npm-run-all": "^4.1.5",
|
||||||
"prettier": "^3.0.3",
|
"prettier": "^3.1.0",
|
||||||
"ts-node": "^10.9.1",
|
"ts-node": "^10.9.1",
|
||||||
"typescript": "^5.2.2",
|
"typescript": "^5.2.2",
|
||||||
"wdio-wait-for": "^3.0.7"
|
"wdio-wait-for": "^3.0.8"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@aashutoshrathi/word-wrap": {
|
"node_modules/@aashutoshrathi/word-wrap": {
|
||||||
|
@ -787,9 +787,9 @@
|
||||||
"dev": true
|
"dev": true
|
||||||
},
|
},
|
||||||
"node_modules/@trivago/prettier-plugin-sort-imports": {
|
"node_modules/@trivago/prettier-plugin-sort-imports": {
|
||||||
"version": "4.2.1",
|
"version": "4.3.0",
|
||||||
"resolved": "https://registry.npmjs.org/@trivago/prettier-plugin-sort-imports/-/prettier-plugin-sort-imports-4.2.1.tgz",
|
"resolved": "https://registry.npmjs.org/@trivago/prettier-plugin-sort-imports/-/prettier-plugin-sort-imports-4.3.0.tgz",
|
||||||
"integrity": "sha512-iuy2MPVURGdxILTchHr15VAioItuYBejKfcTmQFlxIuqA7jeaT6ngr5aUIG6S6U096d6a6lJCgaOwlRrPLlOPg==",
|
"integrity": "sha512-r3n0onD3BTOVUNPhR4lhVK4/pABGpbA7bW3eumZnYdKaHkf1qEC+Mag6DPbGNuuh0eG8AaYj+YqmVHSiGslaTQ==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@babel/generator": "7.17.7",
|
"@babel/generator": "7.17.7",
|
||||||
|
@ -6590,9 +6590,9 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/prettier": {
|
"node_modules/prettier": {
|
||||||
"version": "3.0.3",
|
"version": "3.1.0",
|
||||||
"resolved": "https://registry.npmjs.org/prettier/-/prettier-3.0.3.tgz",
|
"resolved": "https://registry.npmjs.org/prettier/-/prettier-3.1.0.tgz",
|
||||||
"integrity": "sha512-L/4pUDMxcNa8R/EthV08Zt42WBO4h1rarVtK0K+QJG0X187OLo7l699jWw0GKuwzkPQ//jMFA/8Xm6Fh3J/DAg==",
|
"integrity": "sha512-TQLvXjq5IAibjh8EpBIkNKxO749UEWABoiIZehEPiY4GNpVdhaFKqSTu+QrlU6D2dPAfubRmtJTi4K4YkQ5eXw==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"bin": {
|
"bin": {
|
||||||
"prettier": "bin/prettier.cjs"
|
"prettier": "bin/prettier.cjs"
|
||||||
|
@ -8571,9 +8571,9 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/wdio-wait-for": {
|
"node_modules/wdio-wait-for": {
|
||||||
"version": "3.0.7",
|
"version": "3.0.8",
|
||||||
"resolved": "https://registry.npmjs.org/wdio-wait-for/-/wdio-wait-for-3.0.7.tgz",
|
"resolved": "https://registry.npmjs.org/wdio-wait-for/-/wdio-wait-for-3.0.8.tgz",
|
||||||
"integrity": "sha512-NLxEg57+DAQvsEgsAcuF0zM2XDAQTfbKn2mN4nw9hDzz3RfgsZbCxvp93Nm/3609QuxpikC+MxgQ5ORLSoptvA==",
|
"integrity": "sha512-Lptqzqso57sia7q6BRG2M+4S0YysXobcj9gchZxJBqYewgoH4e6Rime6i4WseIW85zmDMJu8pMSWNK4efong8A==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": "^16.13 || >=18"
|
"node": "^16.13 || >=18"
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
"private": true,
|
"private": true,
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@trivago/prettier-plugin-sort-imports": "^4.2.1",
|
"@trivago/prettier-plugin-sort-imports": "^4.3.0",
|
||||||
"@typescript-eslint/eslint-plugin": "^6.10.0",
|
"@typescript-eslint/eslint-plugin": "^6.10.0",
|
||||||
"@typescript-eslint/parser": "^6.10.0",
|
"@typescript-eslint/parser": "^6.10.0",
|
||||||
"@wdio/cli": "^8.22.1",
|
"@wdio/cli": "^8.22.1",
|
||||||
|
@ -14,10 +14,10 @@
|
||||||
"eslint-config-google": "^0.14.0",
|
"eslint-config-google": "^0.14.0",
|
||||||
"eslint-plugin-sonarjs": "^0.23.0",
|
"eslint-plugin-sonarjs": "^0.23.0",
|
||||||
"npm-run-all": "^4.1.5",
|
"npm-run-all": "^4.1.5",
|
||||||
"prettier": "^3.0.3",
|
"prettier": "^3.1.0",
|
||||||
"ts-node": "^10.9.1",
|
"ts-node": "^10.9.1",
|
||||||
"typescript": "^5.2.2",
|
"typescript": "^5.2.2",
|
||||||
"wdio-wait-for": "^3.0.7"
|
"wdio-wait-for": "^3.0.8"
|
||||||
},
|
},
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"wdio": "wdio run ./wdio.conf.ts",
|
"wdio": "wdio run ./wdio.conf.ts",
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -36,17 +36,17 @@
|
||||||
"@codemirror/lang-xml": "^6.0.2",
|
"@codemirror/lang-xml": "^6.0.2",
|
||||||
"@codemirror/legacy-modes": "^6.3.3",
|
"@codemirror/legacy-modes": "^6.3.3",
|
||||||
"@codemirror/theme-one-dark": "^6.1.2",
|
"@codemirror/theme-one-dark": "^6.1.2",
|
||||||
"@formatjs/intl-listformat": "^7.5.1",
|
"@formatjs/intl-listformat": "^7.5.2",
|
||||||
"@fortawesome/fontawesome-free": "^6.4.2",
|
"@fortawesome/fontawesome-free": "^6.4.2",
|
||||||
"@goauthentik/api": "^2023.10.3-1699554078",
|
"@goauthentik/api": "^2023.10.3-1699884123",
|
||||||
"@lit-labs/context": "^0.4.0",
|
"@lit-labs/context": "^0.4.0",
|
||||||
"@lit-labs/task": "^3.1.0",
|
"@lit-labs/task": "^3.1.0",
|
||||||
"@lit/localize": "^0.11.4",
|
"@lit/localize": "^0.11.4",
|
||||||
"@open-wc/lit-helpers": "^0.6.0",
|
"@open-wc/lit-helpers": "^0.6.0",
|
||||||
"@patternfly/elements": "^2.4.0",
|
"@patternfly/elements": "^2.4.0",
|
||||||
"@patternfly/patternfly": "^4.224.2",
|
"@patternfly/patternfly": "^4.224.2",
|
||||||
"@sentry/browser": "^7.79.0",
|
"@sentry/browser": "^7.80.0",
|
||||||
"@sentry/tracing": "^7.79.0",
|
"@sentry/tracing": "^7.80.0",
|
||||||
"@webcomponents/webcomponentsjs": "^2.8.0",
|
"@webcomponents/webcomponentsjs": "^2.8.0",
|
||||||
"base64-js": "^1.5.1",
|
"base64-js": "^1.5.1",
|
||||||
"chart.js": "^4.4.0",
|
"chart.js": "^4.4.0",
|
||||||
|
@ -64,14 +64,14 @@
|
||||||
"yaml": "^2.3.4"
|
"yaml": "^2.3.4"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@babel/core": "^7.23.2",
|
"@babel/core": "^7.23.3",
|
||||||
"@babel/plugin-proposal-class-properties": "^7.18.6",
|
"@babel/plugin-proposal-class-properties": "^7.18.6",
|
||||||
"@babel/plugin-proposal-decorators": "^7.23.2",
|
"@babel/plugin-proposal-decorators": "^7.23.3",
|
||||||
"@babel/plugin-transform-private-methods": "^7.22.5",
|
"@babel/plugin-transform-private-methods": "^7.23.3",
|
||||||
"@babel/plugin-transform-private-property-in-object": "^7.22.11",
|
"@babel/plugin-transform-private-property-in-object": "^7.23.3",
|
||||||
"@babel/plugin-transform-runtime": "^7.23.2",
|
"@babel/plugin-transform-runtime": "^7.23.3",
|
||||||
"@babel/preset-env": "^7.23.2",
|
"@babel/preset-env": "^7.23.3",
|
||||||
"@babel/preset-typescript": "^7.23.2",
|
"@babel/preset-typescript": "^7.23.3",
|
||||||
"@hcaptcha/types": "^1.0.3",
|
"@hcaptcha/types": "^1.0.3",
|
||||||
"@jackfranklin/rollup-plugin-markdown": "^0.4.0",
|
"@jackfranklin/rollup-plugin-markdown": "^0.4.0",
|
||||||
"@jeysal/storybook-addon-css-user-preferences": "^0.2.0",
|
"@jeysal/storybook-addon-css-user-preferences": "^0.2.0",
|
||||||
|
@ -87,7 +87,7 @@
|
||||||
"@storybook/blocks": "^7.1.1",
|
"@storybook/blocks": "^7.1.1",
|
||||||
"@storybook/web-components": "^7.5.3",
|
"@storybook/web-components": "^7.5.3",
|
||||||
"@storybook/web-components-vite": "^7.5.3",
|
"@storybook/web-components-vite": "^7.5.3",
|
||||||
"@trivago/prettier-plugin-sort-imports": "^4.2.1",
|
"@trivago/prettier-plugin-sort-imports": "^4.3.0",
|
||||||
"@types/chart.js": "^2.9.40",
|
"@types/chart.js": "^2.9.40",
|
||||||
"@types/codemirror": "5.60.13",
|
"@types/codemirror": "5.60.13",
|
||||||
"@types/grecaptcha": "^3.0.7",
|
"@types/grecaptcha": "^3.0.7",
|
||||||
|
@ -104,12 +104,12 @@
|
||||||
"eslint-plugin-storybook": "^0.6.15",
|
"eslint-plugin-storybook": "^0.6.15",
|
||||||
"lit-analyzer": "^2.0.1",
|
"lit-analyzer": "^2.0.1",
|
||||||
"npm-run-all": "^4.1.5",
|
"npm-run-all": "^4.1.5",
|
||||||
"prettier": "^3.0.3",
|
"prettier": "^3.1.0",
|
||||||
"pseudolocale": "^2.0.0",
|
"pseudolocale": "^2.0.0",
|
||||||
"pyright": "^1.1.335",
|
"pyright": "^1.1.335",
|
||||||
"react": "^18.2.0",
|
"react": "^18.2.0",
|
||||||
"react-dom": "^18.2.0",
|
"react-dom": "^18.2.0",
|
||||||
"rollup": "^4.3.0",
|
"rollup": "^4.4.0",
|
||||||
"rollup-plugin-copy": "^3.5.0",
|
"rollup-plugin-copy": "^3.5.0",
|
||||||
"rollup-plugin-cssimport": "^1.0.3",
|
"rollup-plugin-cssimport": "^1.0.3",
|
||||||
"rollup-plugin-postcss-lit": "^2.1.0",
|
"rollup-plugin-postcss-lit": "^2.1.0",
|
||||||
|
|
|
@ -44,11 +44,11 @@ export class LDAPSyncStatusChart extends AKChart<SyncStatus[]> {
|
||||||
await Promise.all(
|
await Promise.all(
|
||||||
sources.results.map(async (element) => {
|
sources.results.map(async (element) => {
|
||||||
try {
|
try {
|
||||||
const health = await api.sourcesLdapSyncStatusList({
|
const health = await api.sourcesLdapSyncStatusRetrieve({
|
||||||
slug: element.slug,
|
slug: element.slug,
|
||||||
});
|
});
|
||||||
|
|
||||||
health.forEach((task) => {
|
health.tasks.forEach((task) => {
|
||||||
if (task.status !== TaskStatusEnum.Successful) {
|
if (task.status !== TaskStatusEnum.Successful) {
|
||||||
metrics.failed += 1;
|
metrics.failed += 1;
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,7 @@ export class LDAPSyncStatusChart extends AKChart<SyncStatus[]> {
|
||||||
metrics.healthy += 1;
|
metrics.healthy += 1;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
if (health.length < 1) {
|
if (health.tasks.length < 1) {
|
||||||
metrics.unsynced += 1;
|
metrics.unsynced += 1;
|
||||||
}
|
}
|
||||||
} catch {
|
} catch {
|
||||||
|
|
|
@ -0,0 +1,50 @@
|
||||||
|
import { AKElement } from "@goauthentik/app/elements/Base";
|
||||||
|
import "@patternfly/elements/pf-tooltip/pf-tooltip.js";
|
||||||
|
|
||||||
|
import { msg } from "@lit/localize";
|
||||||
|
import { CSSResult, TemplateResult, html } from "lit";
|
||||||
|
import { customElement, property } from "lit/decorators.js";
|
||||||
|
|
||||||
|
import PFList from "@patternfly/patternfly/components/List/list.css";
|
||||||
|
import PFBase from "@patternfly/patternfly/patternfly-base.css";
|
||||||
|
|
||||||
|
@customElement("ak-source-ldap-connectivity")
|
||||||
|
export class LDAPSourceConnectivity extends AKElement {
|
||||||
|
@property()
|
||||||
|
connectivity?: {
|
||||||
|
[key: string]: {
|
||||||
|
[key: string]: string;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
static get styles(): CSSResult[] {
|
||||||
|
return [PFBase, PFList];
|
||||||
|
}
|
||||||
|
|
||||||
|
render(): TemplateResult {
|
||||||
|
if (!this.connectivity) {
|
||||||
|
return html``;
|
||||||
|
}
|
||||||
|
return html`<ul class="pf-c-list">
|
||||||
|
${Object.keys(this.connectivity).map((serverKey) => {
|
||||||
|
let serverLabel = html`${serverKey}`;
|
||||||
|
if (serverKey === "__all__") {
|
||||||
|
serverLabel = html`<b>${msg("Global status")}</b>`;
|
||||||
|
}
|
||||||
|
const server = this.connectivity![serverKey];
|
||||||
|
const content = html`${serverLabel}: ${server.status}`;
|
||||||
|
let tooltip = html`${content}`;
|
||||||
|
if (server.status === "ok") {
|
||||||
|
tooltip = html`<pf-tooltip position="top">
|
||||||
|
<ul slot="content" class="pf-c-list">
|
||||||
|
<li>${msg("Vendor")}: ${server.vendor}</li>
|
||||||
|
<li>${msg("Version")}: ${server.version}</li>
|
||||||
|
</ul>
|
||||||
|
${content}
|
||||||
|
</pf-tooltip>`;
|
||||||
|
}
|
||||||
|
return html`<li>${tooltip}</li>`;
|
||||||
|
})}
|
||||||
|
</ul>`;
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,3 +1,4 @@
|
||||||
|
import "@goauthentik/admin/sources/ldap/LDAPSourceConnectivity";
|
||||||
import "@goauthentik/admin/sources/ldap/LDAPSourceForm";
|
import "@goauthentik/admin/sources/ldap/LDAPSourceForm";
|
||||||
import "@goauthentik/app/elements/rbac/ObjectPermissionsPage";
|
import "@goauthentik/app/elements/rbac/ObjectPermissionsPage";
|
||||||
import { DEFAULT_CONFIG } from "@goauthentik/common/api/config";
|
import { DEFAULT_CONFIG } from "@goauthentik/common/api/config";
|
||||||
|
@ -25,9 +26,9 @@ import PFBase from "@patternfly/patternfly/patternfly-base.css";
|
||||||
|
|
||||||
import {
|
import {
|
||||||
LDAPSource,
|
LDAPSource,
|
||||||
|
LDAPSyncStatus,
|
||||||
RbacPermissionsAssignedByUsersListModelEnum,
|
RbacPermissionsAssignedByUsersListModelEnum,
|
||||||
SourcesApi,
|
SourcesApi,
|
||||||
Task,
|
|
||||||
TaskStatusEnum,
|
TaskStatusEnum,
|
||||||
} from "@goauthentik/api";
|
} from "@goauthentik/api";
|
||||||
|
|
||||||
|
@ -48,7 +49,7 @@ export class LDAPSourceViewPage extends AKElement {
|
||||||
source!: LDAPSource;
|
source!: LDAPSource;
|
||||||
|
|
||||||
@state()
|
@state()
|
||||||
syncState: Task[] = [];
|
syncState?: LDAPSyncStatus;
|
||||||
|
|
||||||
static get styles(): CSSResult[] {
|
static get styles(): CSSResult[] {
|
||||||
return [PFBase, PFPage, PFButton, PFGrid, PFContent, PFCard, PFDescriptionList, PFList];
|
return [PFBase, PFPage, PFButton, PFGrid, PFContent, PFCard, PFDescriptionList, PFList];
|
||||||
|
@ -62,6 +63,51 @@ export class LDAPSourceViewPage extends AKElement {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
renderSyncStatus(): TemplateResult {
|
||||||
|
if (!this.syncState) {
|
||||||
|
return html`${msg("No sync status.")}`;
|
||||||
|
}
|
||||||
|
if (this.syncState.isRunning) {
|
||||||
|
return html`${msg("Sync currently running.")}`;
|
||||||
|
}
|
||||||
|
if (this.syncState.tasks.length < 1) {
|
||||||
|
return html`${msg("Not synced yet.")}`;
|
||||||
|
}
|
||||||
|
return html`
|
||||||
|
<ul class="pf-c-list">
|
||||||
|
${this.syncState.tasks.map((task) => {
|
||||||
|
let header = "";
|
||||||
|
if (task.status === TaskStatusEnum.Warning) {
|
||||||
|
header = msg("Task finished with warnings");
|
||||||
|
} else if (task.status === TaskStatusEnum.Error) {
|
||||||
|
header = msg("Task finished with errors");
|
||||||
|
} else {
|
||||||
|
header = msg(str`Last sync: ${task.taskFinishTimestamp.toLocaleString()}`);
|
||||||
|
}
|
||||||
|
return html`<li>
|
||||||
|
<p>${task.taskName}</p>
|
||||||
|
<ul class="pf-c-list">
|
||||||
|
<li>${header}</li>
|
||||||
|
${task.messages.map((m) => {
|
||||||
|
return html`<li>${m}</li>`;
|
||||||
|
})}
|
||||||
|
</ul>
|
||||||
|
</li> `;
|
||||||
|
})}
|
||||||
|
</ul>
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
|
load(): void {
|
||||||
|
new SourcesApi(DEFAULT_CONFIG)
|
||||||
|
.sourcesLdapSyncStatusRetrieve({
|
||||||
|
slug: this.source.slug,
|
||||||
|
})
|
||||||
|
.then((state) => {
|
||||||
|
this.syncState = state;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
render(): TemplateResult {
|
render(): TemplateResult {
|
||||||
if (!this.source) {
|
if (!this.source) {
|
||||||
return html``;
|
return html``;
|
||||||
|
@ -72,13 +118,7 @@ export class LDAPSourceViewPage extends AKElement {
|
||||||
data-tab-title="${msg("Overview")}"
|
data-tab-title="${msg("Overview")}"
|
||||||
class="pf-c-page__main-section pf-m-no-padding-mobile"
|
class="pf-c-page__main-section pf-m-no-padding-mobile"
|
||||||
@activate=${() => {
|
@activate=${() => {
|
||||||
new SourcesApi(DEFAULT_CONFIG)
|
this.load();
|
||||||
.sourcesLdapSyncStatusList({
|
|
||||||
slug: this.source.slug,
|
|
||||||
})
|
|
||||||
.then((state) => {
|
|
||||||
this.syncState = state;
|
|
||||||
});
|
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
<div class="pf-l-grid pf-m-gutter">
|
<div class="pf-l-grid pf-m-gutter">
|
||||||
|
@ -137,42 +177,25 @@ export class LDAPSourceViewPage extends AKElement {
|
||||||
</ak-forms-modal>
|
</ak-forms-modal>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="pf-c-card pf-l-grid__item pf-m-12-col">
|
<div class="pf-c-card pf-l-grid__item pf-m-2-col">
|
||||||
|
<div class="pf-c-card__title">
|
||||||
|
<p>${msg("Connectivity")}</p>
|
||||||
|
</div>
|
||||||
|
<div class="pf-c-card__body">
|
||||||
|
<ak-source-ldap-connectivity
|
||||||
|
.connectivity=${this.source.connectivity}
|
||||||
|
></ak-source-ldap-connectivity>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="pf-c-card pf-l-grid__item pf-m-10-col">
|
||||||
<div class="pf-c-card__title">
|
<div class="pf-c-card__title">
|
||||||
<p>${msg("Sync status")}</p>
|
<p>${msg("Sync status")}</p>
|
||||||
</div>
|
</div>
|
||||||
<div class="pf-c-card__body">
|
<div class="pf-c-card__body">${this.renderSyncStatus()}</div>
|
||||||
${this.syncState.length < 1
|
|
||||||
? html`<p>${msg("Not synced yet.")}</p>`
|
|
||||||
: html`
|
|
||||||
<ul class="pf-c-list">
|
|
||||||
${this.syncState.map((task) => {
|
|
||||||
let header = "";
|
|
||||||
if (task.status === TaskStatusEnum.Warning) {
|
|
||||||
header = msg("Task finished with warnings");
|
|
||||||
} else if (task.status === TaskStatusEnum.Error) {
|
|
||||||
header = msg("Task finished with errors");
|
|
||||||
} else {
|
|
||||||
header = msg(
|
|
||||||
str`Last sync: ${task.taskFinishTimestamp.toLocaleString()}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
return html`<li>
|
|
||||||
<p>${task.taskName}</p>
|
|
||||||
<ul class="pf-c-list">
|
|
||||||
<li>${header}</li>
|
|
||||||
${task.messages.map((m) => {
|
|
||||||
return html`<li>${m}</li>`;
|
|
||||||
})}
|
|
||||||
</ul>
|
|
||||||
</li> `;
|
|
||||||
})}
|
|
||||||
</ul>
|
|
||||||
`}
|
|
||||||
</div>
|
|
||||||
<div class="pf-c-card__footer">
|
<div class="pf-c-card__footer">
|
||||||
<ak-action-button
|
<ak-action-button
|
||||||
class="pf-m-secondary"
|
class="pf-m-secondary"
|
||||||
|
?disabled=${this.syncState?.isRunning}
|
||||||
.apiRequest=${() => {
|
.apiRequest=${() => {
|
||||||
return new SourcesApi(DEFAULT_CONFIG)
|
return new SourcesApi(DEFAULT_CONFIG)
|
||||||
.sourcesLdapPartialUpdate({
|
.sourcesLdapPartialUpdate({
|
||||||
|
@ -186,6 +209,7 @@ export class LDAPSourceViewPage extends AKElement {
|
||||||
composed: true,
|
composed: true,
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
this.load();
|
||||||
});
|
});
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
|
|
|
@ -39,9 +39,8 @@ const container = (testItem: TemplateResult) =>
|
||||||
export const NumberInput = () => {
|
export const NumberInput = () => {
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
const displayChange = (ev: any) => {
|
const displayChange = (ev: any) => {
|
||||||
document.getElementById(
|
document.getElementById("number-message-pad")!.innerText =
|
||||||
"number-message-pad",
|
`Value selected: ${JSON.stringify(ev.target.value, null, 2)}`;
|
||||||
)!.innerText = `Value selected: ${JSON.stringify(ev.target.value, null, 2)}`;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
return container(
|
return container(
|
||||||
|
|
|
@ -46,9 +46,8 @@ export const SwitchInput = () => {
|
||||||
|
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
const displayChange = (ev: any) => {
|
const displayChange = (ev: any) => {
|
||||||
document.getElementById(
|
document.getElementById("switch-message-pad")!.innerText =
|
||||||
"switch-message-pad",
|
`Value selected: ${JSON.stringify(ev.target.checked, null, 2)}`;
|
||||||
)!.innerText = `Value selected: ${JSON.stringify(ev.target.checked, null, 2)}`;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
return container(
|
return container(
|
||||||
|
|
|
@ -39,9 +39,8 @@ const container = (testItem: TemplateResult) =>
|
||||||
export const TextareaInput = () => {
|
export const TextareaInput = () => {
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
const displayChange = (ev: any) => {
|
const displayChange = (ev: any) => {
|
||||||
document.getElementById(
|
document.getElementById("textarea-message-pad")!.innerText =
|
||||||
"textarea-message-pad",
|
`Value selected: ${JSON.stringify(ev.target.value, null, 2)}`;
|
||||||
)!.innerText = `Value selected: ${JSON.stringify(ev.target.value, null, 2)}`;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
return container(
|
return container(
|
||||||
|
|
|
@ -54,9 +54,8 @@ const testOptions = [
|
||||||
export const ToggleGroup = () => {
|
export const ToggleGroup = () => {
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
const displayChange = (ev: any) => {
|
const displayChange = (ev: any) => {
|
||||||
document.getElementById(
|
document.getElementById("toggle-message-pad")!.innerText =
|
||||||
"toggle-message-pad",
|
`Value selected: ${ev.detail.value}`;
|
||||||
)!.innerText = `Value selected: ${ev.detail.value}`;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
return container(
|
return container(
|
||||||
|
|
|
@ -5,6 +5,7 @@ import { CustomEmitterElement } from "@goauthentik/elements/utils/eventEmitter";
|
||||||
|
|
||||||
import { Task, TaskStatus } from "@lit-labs/task";
|
import { Task, TaskStatus } from "@lit-labs/task";
|
||||||
import { css, html } from "lit";
|
import { css, html } from "lit";
|
||||||
|
import { property } from "lit/decorators.js";
|
||||||
|
|
||||||
import PFButton from "@patternfly/patternfly/components/Button/button.css";
|
import PFButton from "@patternfly/patternfly/components/Button/button.css";
|
||||||
import PFSpinner from "@patternfly/patternfly/components/Spinner/spinner.css";
|
import PFSpinner from "@patternfly/patternfly/components/Spinner/spinner.css";
|
||||||
|
@ -57,6 +58,9 @@ export abstract class BaseTaskButton extends CustomEmitterElement(AKElement) {
|
||||||
|
|
||||||
actionTask: Task;
|
actionTask: Task;
|
||||||
|
|
||||||
|
@property({ type: Boolean })
|
||||||
|
disabled = false;
|
||||||
|
|
||||||
constructor() {
|
constructor() {
|
||||||
super();
|
super();
|
||||||
this.onSuccess = this.onSuccess.bind(this);
|
this.onSuccess = this.onSuccess.bind(this);
|
||||||
|
@ -121,6 +125,7 @@ export abstract class BaseTaskButton extends CustomEmitterElement(AKElement) {
|
||||||
part="spinner-button"
|
part="spinner-button"
|
||||||
class="pf-c-button pf-m-progress ${this.buttonClasses}"
|
class="pf-c-button pf-m-progress ${this.buttonClasses}"
|
||||||
@click=${this.onClick}
|
@click=${this.onClick}
|
||||||
|
?disabled=${this.disabled}
|
||||||
>
|
>
|
||||||
${this.actionTask.render({ pending: () => this.spinner })}
|
${this.actionTask.render({ pending: () => this.spinner })}
|
||||||
<slot></slot>
|
<slot></slot>
|
||||||
|
|
|
@ -0,0 +1,223 @@
|
||||||
|
---
|
||||||
|
title: "IPv6 addresses and why you need to make the switch now"
|
||||||
|
description: "IPv6 addresses have been commercially available since 2010. But is there any compelling reason for sysadmins and security engineers to make the switch?"
|
||||||
|
slug: 2023-11-09-IPv6-addresses
|
||||||
|
authors:
|
||||||
|
- name: Jens Langhammer
|
||||||
|
title: CTO at Authentik Security Inc
|
||||||
|
url: https://github.com/BeryJu
|
||||||
|
image_url: https://github.com/BeryJu.png
|
||||||
|
tags:
|
||||||
|
- authentik
|
||||||
|
- IP address
|
||||||
|
- IPv4
|
||||||
|
- IPv6
|
||||||
|
- IP address exhaustion
|
||||||
|
- NAT Gateway
|
||||||
|
- IETF
|
||||||
|
- Internet Engineering Task Force
|
||||||
|
- IANA
|
||||||
|
- Internet Assigned Numbers Authority
|
||||||
|
- IPv6 address format
|
||||||
|
- SSO
|
||||||
|
- security
|
||||||
|
- identity provider
|
||||||
|
- authentication
|
||||||
|
hide_table_of_contents: false
|
||||||
|
---
|
||||||
|
|
||||||
|
> **_authentik is an open source Identity Provider that unifies your identity needs into a single platform, replacing Okta, Active Directory, and auth0. Authentik Security is a [public benefit company](https://github.com/OpenCoreVentures/ocv-public-benefit-company/blob/main/ocv-public-benefit-company-charter.md) building on top of the open source project._**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
IPv6 addresses have been commercially available since 2010. Yet, after Google’s IPv6 rollout the following year, the adoption by System Administrators and security engineers responsible for an entire organization’s network has been slower than you might expect. Population size and the plethora of work and personal devices that accompany this large number of workers do not accurately predict which countries have deployed this protocol.
|
||||||
|
|
||||||
|
In this blog post, I explain briefly what IP addresses are and how they work; share why at Authentik Security we went full IPv6 in May 2023; and then set out some reasons why you should switch now.
|
||||||
|
|
||||||
|
## What are IP addresses?
|
||||||
|
|
||||||
|
IP Addresses are locations (similar to street addresses) that are assigned to allow system administrators and others to identify and locate every point (often referred to as a node) on a network through which traffic and communication passes via the internet. For example, every server, printer, computer, laptop, and phone in a single workplace network has its own IP address.
|
||||||
|
|
||||||
|
We use domain names for websites, to avoid having to remember IP addresses, though our readers who are sysadmin—used to referencing all sorts of nodes deep within their organization’s networks—will recall them at the drop of a hat.
|
||||||
|
|
||||||
|
But, increasingly, since many devices are online and [96.6% of internet users now use a smartphone](https://www.oberlo.com/statistics/how-many-people-have-smartphones), most Internet of Things (IoT) devices that we have in our workplaces and homes _also_ have their own IP address. This includes:
|
||||||
|
|
||||||
|
- Computers, laptops and smartphones
|
||||||
|
- Database servers, web servers, mail servers, virtual servers (virtual machines), and servers that store software packages for distribution
|
||||||
|
- Other devices such as network printers, routers and services running on computer networks
|
||||||
|
- Domain names for websites, which are mapped to the IP address using Domain Name Servers (DNS)
|
||||||
|
|
||||||
|
IP addresses are centrally overseen by the Internet Assigned Numbers Authority ([IANA](https://www.iana.org/)), with five [Regional Internet Registries](https://www.nro.net/about/rirs/) (RIRs).
|
||||||
|
|
||||||
|
## What is the state of the IP landscape right now?
|
||||||
|
|
||||||
|
Well, it’s all down to numbers.
|
||||||
|
|
||||||
|
The previous version of this network layer communications protocol is known as IPv4. From our informed vantage point—looking over the rapid growth of ecommerce, business, government, educational, and entertainment services across the internet—it’s easy to see how its originator could not possibly have predicted that demand for IPv4 addresses would outstrip supply.
|
||||||
|
|
||||||
|
Add in the ubiquity of connected devices that allow us to access and consume those services and you can see the problem.
|
||||||
|
|
||||||
|
IP address exhaustion was foreseen in the 1980s, which is why the Internet Engineering Task Force ([IETF](https://www.ietf.org/)) started work on IPv6 in the early 1990s. The first RIR to run out of IPv4 addresses was ARIN (North America) in 2015, followed by the RIPE (Europe) in 2019, and LACNIC (South America) in 2020. The very last, free /8 address block of IPv4 addresses was issued by IANA in January 2011.
|
||||||
|
|
||||||
|
The following realities contributed to the depletion of the IPv4 addresses:
|
||||||
|
|
||||||
|
- IPv4 addresses were designed to use 32 bits and are written with decimal numbers
|
||||||
|
- This allowed for 4.3 billion IP addresses
|
||||||
|
|
||||||
|
The IPv4 address format is written in 4 groups of 4 numbers, each group separated by a period.
|
||||||
|
|
||||||
|
Even though IPv4 addresses still trade hands, it’s actually quite difficult now to buy a completely unused block. What’s more, they’re expensive for smaller organizations (currently around $39 each) and leasing is cheaper. Unless you can acquire them from those sources, you’ll likely now be issued IPv6 ones.
|
||||||
|
|
||||||
|
> Interesting historical fact: IPv5 was developed specifically for streaming video and voice, becoming the basis for VoIP, though it was never widely adopted as a standard protocol.
|
||||||
|
|
||||||
|
### IPv6 addresses, history and adoption
|
||||||
|
|
||||||
|
The development of IPv6 was initiated by IETF in 1994, and was published as a draft standard in December 1998. The use of IPv6, went live in June 2012, and was ratified as an internet standard in July 2017.
|
||||||
|
|
||||||
|
There is an often circulated metaphor from J. Wiljakka’s IEEE paper, [Transition to IPv6 in GPRS and WCDMA Mobile Networks](https://ieeexplore.ieee.org/document/995863), stating that every grain of sand on every seashore could be allocated its own IPv6 address. Let me illustrate.
|
||||||
|
|
||||||
|
- IPv6 addresses were designed to use 128 bits and are written with hexadecimal digits (10 numbers from 1-10 and 6 letters from A-F).
|
||||||
|
- So, how many IPv6 addresses are there? In short, there are over 340 trillion IP addresses available!
|
||||||
|
|
||||||
|
The IPv6 address format is written in 8 groups of 4 digits (each digit can be made up of 4 bits), each group separated by a colon.
|
||||||
|
|
||||||
|
> Importantly, the hierarchical structure optimizes global IP routing, keeping routing tables small.
|
||||||
|
|
||||||
|
If you plan to make the switch to IPv6, it’s worth noting that you’ll need to ensure that your devices, router, and ISP all support it.
|
||||||
|
|
||||||
|
### Upward trend in the worldwide adoption by country
|
||||||
|
|
||||||
|
Over 42.9% of Google users worldwide are accessing search using the IPv6 protocol. It’s intriguing to note which countries have a larger adoption of the IPv6 protocol than not:
|
||||||
|
|
||||||
|
- France 74.38%
|
||||||
|
- Germany 71.52%
|
||||||
|
- India with 70.18%
|
||||||
|
- Malaysia 62.67%
|
||||||
|
- Greece 61.43%
|
||||||
|
- Saudi Arabia 60.93%
|
||||||
|
|
||||||
|
And, yet China, Indonesia, Pakistan, Nigeria, and Russia lag surprisingly far behind many others in terms of adoption (between 5-15%) given their population size. Even many ISPs have been slow to switch.
|
||||||
|
|
||||||
|
You can consult Google’s [per country IPv6 adoption statistics](https://www.google.com/intl/en/ipv6/statistics.html#tab=per-country-ipv6-adoption) to see where your location sits in the league table.
|
||||||
|
|
||||||
|
## Why we decided on a full IPv6 addresses deployment
|
||||||
|
|
||||||
|
The average internet user won’t be aware of anything much beyond what an IP address is, if even that. However for system administrators, IP addresses form a crucial part of an organization’s computer network infrastructure.
|
||||||
|
|
||||||
|
In our case, the impetus to use IPv6 addresses for authentik came from our own, internal Infrastructure Engineer, Marc Schmitt. We initially considered configuring IPv4 for internal traffic and, as an interim measure, provide IPv6 at the edge only (remaining with IPv4 for everything else). However, that would still have required providing IPv6 support for customers who needed it.
|
||||||
|
|
||||||
|
In the end, we determined it would be more efficient to adopt the IPv6 addresses protocol while we still had time to purchase, deploy, and configure it at our leisure across our existing network. We found it to be mostly a straightforward process. However, there are still some applications that did not fully support IPv6, but we were aided by the fact that we use open source software. This means that we were able to contribute back the changes needed to add IPv6 support to the tools we use. We were thrilled to have close access to a responsive community with some (not all!) of the tool vendors and their communities to help with any integration issues. [Plausible](https://plausible.io/), our web analytics tool, was especially helpful and supportive in our shift to IPv6.
|
||||||
|
|
||||||
|
### Future proofing IP addresses on our network and platform
|
||||||
|
|
||||||
|
While it seemed like there was no urgent reason to deploy IPv6 across our network, we knew that one day, it _would_ suddenly become pressing once ISPs and larger organizations had completely run out of still-circulating IPv4 addresses.
|
||||||
|
|
||||||
|
For those customers who have not yet shifted to IPv6, we still provide IPv4 support at the edge, configuring our load balancers to receive requests over IPv4 and IPv6, and forwarding them internally over IPv6 to our services (such as our customer portal, for example).
|
||||||
|
|
||||||
|
### Limiting ongoing spend
|
||||||
|
|
||||||
|
Deployment of IPv6 can be less expensive as time goes on. If we’d opted to remain with IPv4 even temporarily, we knew we would have needed to buy more IPv4 addresses.
|
||||||
|
|
||||||
|
In addition, we were paying our cloud-provider for using the NAT Gateway to convert our IPv4 addresses—all of which are private—to public IP addresses. On top of that, we were also charged a few cents per GB based on users. The costs can mount up, particularly when we pull Docker images multiple times per day. These costs were ongoing and on top of our existing cloud provider subscription. With IPv6, however, since IP addresses are already public—and there is no need to pay for the cost of translating them from private to public—the costs are limited to paying for the amount of data (incoming and outgoing traffic) passing through the network.
|
||||||
|
|
||||||
|
### Unlimited pods
|
||||||
|
|
||||||
|
Specifically when using the IPv4 protocol, there’s a limitation with our cloud provider if pulling IP addresses from the same subnet for both nodes and Kubernetes pods. You are limited by the number of pods (21) you can attach to a single node. With IPv6, the limit is so much higher that it's insignificant.
|
||||||
|
|
||||||
|
### Clusters setup
|
||||||
|
|
||||||
|
All original clusters were only configured for IPv4. It seemed like a good time to build in the IPv6 protocol while we were already investing time in renewing a cluster.
|
||||||
|
|
||||||
|
We’d already been planning to switch out a cluster for several reasons:
|
||||||
|
|
||||||
|
- We wanted to build a new cluster using ArgoCD (to replace the existing FluxCD one) for better GitOps, since ArgoCD comes with a built-in UI and provides a test deployment of the changes made in PRs to the application.
|
||||||
|
- We wanted to change the Container Network Interface (CNI) to select an IP from the same subnet as further future-proofing for when more clusters are added (a sandbox for Authentik Security and another sandbox for customers, for example). We enhanced our AWS-VPC-CNI with [Cilium](https://cilium.io/) to handle the interconnections between clusters and currently still use it to grab IPs.
|
||||||
|
|
||||||
|
## IPv6 ensures everything works out-of-the-box
|
||||||
|
|
||||||
|
If you’re a system administrator with limited time and resources, you’ll be concerned with ensuring that all devices, software, or connections are working across your network, and that traffic can flow securely without bottlenecks. So, it’s reassuring to know that IPv6 works out of the box—reducing the onboarding, expense, and maintenance feared by already overburdened sysadmins.
|
||||||
|
|
||||||
|
### Stateless address auto-configuration (SLAAC)
|
||||||
|
|
||||||
|
When it comes to devices, each device on which IPv6 has been enabled will independently assign IP addresses by default. With IPv6, there is no need for static or manual DHCP IP address configuration (though manual configuration is still supported). This is how it works:
|
||||||
|
|
||||||
|
1. When a device is switched on, it requests a network prefix.
|
||||||
|
2. A router or routers on the link will provide the network prefix to the host.
|
||||||
|
3. Previously, the subnet prefix was combined with an interface ID generated from an interface's MAC address. However, having a common IP based on the MAC address raises privacy concerns, so now most devices just generate a random one.
|
||||||
|
|
||||||
|
### No need to maintain both protocols across your network or convert IPv4 to IPv6
|
||||||
|
|
||||||
|
Unless you already have IPv6 deployed right across your network, if your traffic comes in via IPv4 or legacy networks, you’ll have to:
|
||||||
|
|
||||||
|
- Maintain both protocols
|
||||||
|
- Route traffic differently, depending on what it is
|
||||||
|
|
||||||
|
### No IP addresses sharing
|
||||||
|
|
||||||
|
Typically, public IP addresses, particularly in Europe, are shared by multiple individual units in a single apartment building, or by multiple homes on the same street. This is not really a problem for private individuals, because most people have private IP addresses assigned to them by their routers.
|
||||||
|
|
||||||
|
However, those in charge of the system administration for organizations and workplaces want to avoid sharing IP addresses. We are almost all subject to various country, state, and territory-based data protection and other compliance legislation. This makes it important to reduce the risks posed by improperly configured static IP addresses. And, given the virtually unlimited number of IP addresses now available with the IPv6 protocol, configuring unique IP addresses for every node on a network is possible.
|
||||||
|
|
||||||
|
## OK but are there any compelling reasons for _me_ to adopt IPv6 addresses _now_?
|
||||||
|
|
||||||
|
If our positive experience and outcomes, as well as the out-of-the-box nature of IPv6 have not yet persuaded you, these reasons might pique your interest.
|
||||||
|
|
||||||
|
### Ubiquitous support for the IPv6 addresses protocol
|
||||||
|
|
||||||
|
Consider how off-putting it is for users that some online services still do not offer otherwise ubiquitous identity protection mechanisms, such as sign-on Single Sign-on ([SSO](https://goauthentik.io/blog/2023-06-21-demystifying-security)) and Multi-factor Authentication (MFA). And, think of systems that do not allow you to switch off or otherwise configure pesky tracking settings that contradict data protection legislation.
|
||||||
|
|
||||||
|
Increasingly and in the same way, professionals will all simply assume that our online platforms, network services, smart devices, and tools support the IPv6 protocol—or they might go elsewhere. While IPv6 does not support all apps, and migration can be risky, putting this off indefinitely could deter buyers from purchasing your software solution.
|
||||||
|
|
||||||
|
### Man-in-the-Middle hack reduction
|
||||||
|
|
||||||
|
Man-in-the-Middle (MITM) attacks rely on redirecting or otherwise changing the communication between two parties using Address Resolution Protocol (ARP) poisoning and other naming-type interceptions. This is how many malicious ecommerce hacks target consumers, via spoofed ecommerce, banking, password reset, or MFA links sent by email or SMS. Experiencing this attack is less likely when you deploy and correctly configure the IPv6 protocol, and connect to other networks and nodes on which it is similarly configured. For example, you should enable IPv6 routing, but also include DNS information and network security policies
|
||||||
|
|
||||||
|
## Are there any challenges with IPv6 that I should be aware of before starting to make the switch?
|
||||||
|
|
||||||
|
Great question! Let’s address each of the stumbling blocks in turn.
|
||||||
|
|
||||||
|
### Long, multipart hexadecimal numbers
|
||||||
|
|
||||||
|
Since they are very long, IPv6 addresses are less memorable than IPv4 ones.
|
||||||
|
|
||||||
|
However, this has been alleviated using a built-in abbreviation standard. Here are the general principles:
|
||||||
|
|
||||||
|
- Dropping any leadings zeros in a group
|
||||||
|
- Replacing a group of all zeros with a single zero
|
||||||
|
- Replacing continuous zeros with a double colon
|
||||||
|
|
||||||
|
Though this might take a moment to memorize, familiarity comes through use.
|
||||||
|
|
||||||
|
### Handling firewalls in IPv6
|
||||||
|
|
||||||
|
With IPv4, the deployment of Network Address Translation (NAT) enables system administrators in larger enterprises, with hundreds or thousands of connected and online devices, to provide a sense of security. Devices with private IP addresses are displayed to the public internet via NAT firewalls and routers that mask those private addresses behind a single, public one.
|
||||||
|
|
||||||
|
- This helps to keep organizations’ IP addresses, devices, and networks hidden and secure.
|
||||||
|
- Hiding the private IP address discourages malicious attacks that would attempt to target an individual IP address.
|
||||||
|
|
||||||
|
This lack of the need for a huge number of public IPv4 addresses offered by NAT has additional benefits for sysadmins:
|
||||||
|
|
||||||
|
- Helping to manage the central problem of the limited number of available IPv4 addresses
|
||||||
|
- Allowing for flexibility in how you build and configure your network, without having to change IP addresses of internal nodes
|
||||||
|
- Limiting the admin burden of assigning and managing IP addresses, particularly if you manage a large number of devices across networks
|
||||||
|
|
||||||
|
### Firewall filter rules
|
||||||
|
|
||||||
|
It is difficult for some to move away from this secure and familiar setup. When it comes to IPv6 however, NAT is not deployed. This might prove to be a concern, if you are used to relying on NAT to provide a layer of security across your network.
|
||||||
|
|
||||||
|
Instead, while a firewall is still one of the default protective mechanisms, system administrators must deploy filter rules in place of NAT.
|
||||||
|
|
||||||
|
- In your router, you’ll be able to add both IPv4 and IPv6 values—with many device vendors now enabling it by default.
|
||||||
|
- Then, if you’ve also configured filtering rules, when packets encounter the router, they’ll meet any firewall filter rules. The filter rule will check if the packet header matches the rule’s filtering condition, including IP information.
|
||||||
|
- If it does, the Filter Action will be deployed
|
||||||
|
- If not, the packet simply proceeds to the next rule
|
||||||
|
|
||||||
|
If you configure filtering on your router, don’t forget to also enable IPv6 there, on your other devices, and on your ISP.
|
||||||
|
|
||||||
|
## Have you deployed IPv6 addresses to tackle address exhaustion?
|
||||||
|
|
||||||
|
Yes, it is true that there is still a way to go before IPv6 is adopted worldwide, as we discussed above. However, as the pace of innovative technologies, solutions, and platforms continues, we predict this will simply become one more common instrument in our tool bag.
|
||||||
|
|
||||||
|
We’d be very interested to know what you think of the IPv6 protocol, whether you’ve already converted and how you found the process. Do you have any ongoing challenges?
|
||||||
|
|
||||||
|
Join the Authentik Security community on [Github](https://github.com/goauthentik/authentik) or [Discord](https://discord.com/invite/jg33eMhnj6), or send us an email at hello@goauthentik.io. We look forward to hearing from you.
|
|
@ -71,16 +71,38 @@ To check if your config has been applied correctly, you can run the following co
|
||||||
|
|
||||||
## Redis Settings
|
## Redis Settings
|
||||||
|
|
||||||
- `AUTHENTIK_REDIS__HOST`: Hostname of your Redis Server
|
- `AUTHENTIK_REDIS__HOST`: Redis server host when not using configuration URL
|
||||||
- `AUTHENTIK_REDIS__PORT`: Redis port, defaults to 6379
|
- `AUTHENTIK_REDIS__PORT`: Redis server port when not using configuration URL
|
||||||
- `AUTHENTIK_REDIS__PASSWORD`: Password for your Redis Server
|
- `AUTHENTIK_REDIS__DB`: Redis server database when not using configuration URL
|
||||||
- `AUTHENTIK_REDIS__TLS`: Use TLS to connect to Redis, defaults to false
|
- `AUTHENTIK_REDIS__USERNAME`: Redis server username when not using configuration URL
|
||||||
- `AUTHENTIK_REDIS__TLS_REQS`: Redis TLS requirements, defaults to "none"
|
- `AUTHENTIK_REDIS__PASSWORD`: Redis server password when not using configuration URL
|
||||||
- `AUTHENTIK_REDIS__DB`: Database, defaults to 0
|
- `AUTHENTIK_REDIS__TLS`: Redis server connection using TLS when not using configuration URL
|
||||||
- `AUTHENTIK_REDIS__CACHE_TIMEOUT`: Timeout for cached data until it expires in seconds, defaults to 300
|
- `AUTHENTIK_REDIS__TLS_REQS`: Redis server TLS connection requirements when not using configuration URL
|
||||||
- `AUTHENTIK_REDIS__CACHE_TIMEOUT_FLOWS`: Timeout for cached flow plans until they expire in seconds, defaults to 300
|
|
||||||
- `AUTHENTIK_REDIS__CACHE_TIMEOUT_POLICIES`: Timeout for cached policies until they expire in seconds, defaults to 300
|
## Result Backend Settings
|
||||||
- `AUTHENTIK_REDIS__CACHE_TIMEOUT_REPUTATION`: Timeout for cached reputation until they expire in seconds, defaults to 300
|
|
||||||
|
- `AUTHENTIK_RESULT_BACKEND__URL`: Result backend configuration URL, uses [the Redis Settings](#redis-settings) by default
|
||||||
|
|
||||||
|
## Cache Settings
|
||||||
|
|
||||||
|
- `AUTHENTIK_CACHE__URL`: Cache configuration URL, uses [the Redis Settings](#redis-settings) by default
|
||||||
|
- `AUTHENTIK_CACHE__TIMEOUT`: Timeout for cached data until it expires in seconds, defaults to 300
|
||||||
|
- `AUTHENTIK_CACHE__TIMEOUT_FLOWS`: Timeout for cached flow plans until they expire in seconds, defaults to 300
|
||||||
|
- `AUTHENTIK_CACHE__TIMEOUT_POLICIES`: Timeout for cached policies until they expire in seconds, defaults to 300
|
||||||
|
- `AUTHENTIK_CACHE__TIMEOUT_REPUTATION`: Timeout for cached reputation until they expire in seconds, defaults to 300
|
||||||
|
|
||||||
|
:::info
|
||||||
|
`AUTHENTIK_CACHE__TIMEOUT_REPUTATION` only applies to the cache expiry, see [`AUTHENTIK_REPUTATION__EXPIRY`](#authentik_reputation__expiry) to control how long reputation is persisted for.
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Channel Layer Settings (inter-instance communication)
|
||||||
|
|
||||||
|
- `AUTHENTIK_CHANNEL__URL`: Channel layers configuration URL, uses [the Redis Settings](#redis-settings) by default
|
||||||
|
|
||||||
|
## Broker Settings
|
||||||
|
|
||||||
|
- `AUTHENTIK_BROKER__URL`: Broker configuration URL, defaults to Redis using [the respective settings](#redis-settings)
|
||||||
|
- `AUTHENTIK_BROKER__TRANSPORT_OPTIONS`: Base64 encoded broker transport options
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
`AUTHENTIK_REDIS__CACHE_TIMEOUT_REPUTATION` only applies to the cache expiry, see [`AUTHENTIK_REPUTATION__EXPIRY`](#authentik_reputation__expiry) to control how long reputation is persisted for.
|
`AUTHENTIK_REDIS__CACHE_TIMEOUT_REPUTATION` only applies to the cache expiry, see [`AUTHENTIK_REPUTATION__EXPIRY`](#authentik_reputation__expiry) to control how long reputation is persisted for.
|
||||||
|
|
|
@ -0,0 +1,52 @@
|
||||||
|
---
|
||||||
|
title: Release 2024.1
|
||||||
|
slug: "/releases/2024.1"
|
||||||
|
---
|
||||||
|
|
||||||
|
## Breaking changes
|
||||||
|
|
||||||
|
- Removal of deprecated metrics
|
||||||
|
|
||||||
|
- `authentik_outpost_flow_timing_get` -> `authentik_outpost_flow_timing_get_seconds`
|
||||||
|
- `authentik_outpost_flow_timing_post` -> `authentik_outpost_flow_timing_post_seconds`
|
||||||
|
- `authentik_outpost_ldap_requests` -> `authentik_outpost_ldap_request_duration_seconds`
|
||||||
|
- `authentik_outpost_ldap_requests_rejected` -> `authentik_outpost_ldap_requests_rejected_total`
|
||||||
|
- `authentik_outpost_proxy_requests` -> `authentik_outpost_proxy_request_duration_seconds`
|
||||||
|
- `authentik_outpost_proxy_upstream_time` -> `authentik_outpost_proxy_upstream_response_duration_seconds`
|
||||||
|
- `authentik_outpost_radius_requests` -> `authentik_outpost_radius_request_duration_seconds`
|
||||||
|
- `authentik_outpost_radius_requests_rejected` -> `authentik_outpost_radius_requests_rejected_total`
|
||||||
|
- `authentik_main_requests` -> `authentik_main_request_duration_seconds`
|
||||||
|
|
||||||
|
## New features
|
||||||
|
|
||||||
|
## Upgrading
|
||||||
|
|
||||||
|
This release does not introduce any new requirements.
|
||||||
|
|
||||||
|
### docker-compose
|
||||||
|
|
||||||
|
To upgrade, download the new docker-compose file and update the Docker stack with the new version, using these commands:
|
||||||
|
|
||||||
|
```
|
||||||
|
wget -O docker-compose.yml https://goauthentik.io/version/2024.1/docker-compose.yml
|
||||||
|
docker-compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
The `-O` flag retains the downloaded file's name, overwriting any existing local file with the same name.
|
||||||
|
|
||||||
|
### Kubernetes
|
||||||
|
|
||||||
|
Upgrade the Helm Chart to the new version, using the following commands:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
helm repo update
|
||||||
|
helm upgrade authentik authentik/authentik -f values.yaml --version ^2024.1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Minor changes/fixes
|
||||||
|
|
||||||
|
<!-- _Insert the output of `make gen-changelog` here_ -->
|
||||||
|
|
||||||
|
## API Changes
|
||||||
|
|
||||||
|
<!-- _Insert output of `make gen-diff` here_ -->
|
|
@ -13,3 +13,15 @@ or, for Kubernetes, run
|
||||||
```
|
```
|
||||||
kubectl exec -it deployment/authentik-worker -c authentik -- ak ldap_sync *slug of the source*
|
kubectl exec -it deployment/authentik-worker -c authentik -- ak ldap_sync *slug of the source*
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Starting with authentik 2023.10, you can also run command below to explicitly check the connectivity to the configured LDAP Servers:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker-compose run --rm worker ldap_check_connection *slug of the source*
|
||||||
|
```
|
||||||
|
|
||||||
|
or, for Kubernetes, run
|
||||||
|
|
||||||
|
```
|
||||||
|
kubectl exec -it deployment/authentik-worker -c authentik -- ak ldap_check_connection *slug of the source*
|
||||||
|
```
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
"clsx": "^2.0.0",
|
"clsx": "^2.0.0",
|
||||||
"disqus-react": "^1.1.5",
|
"disqus-react": "^1.1.5",
|
||||||
"postcss": "^8.4.31",
|
"postcss": "^8.4.31",
|
||||||
"prism-react-renderer": "^2.1.0",
|
"prism-react-renderer": "^2.2.0",
|
||||||
"rapidoc": "^9.3.4",
|
"rapidoc": "^9.3.4",
|
||||||
"react": "^18.2.0",
|
"react": "^18.2.0",
|
||||||
"react-before-after-slider-component": "^1.1.8",
|
"react-before-after-slider-component": "^1.1.8",
|
||||||
|
@ -34,7 +34,7 @@
|
||||||
"@docusaurus/tsconfig": "3.0.0",
|
"@docusaurus/tsconfig": "3.0.0",
|
||||||
"@docusaurus/types": "3.0.0",
|
"@docusaurus/types": "3.0.0",
|
||||||
"@types/react": "^18.2.37",
|
"@types/react": "^18.2.37",
|
||||||
"prettier": "3.0.3",
|
"prettier": "3.1.0",
|
||||||
"typescript": "~5.2.2"
|
"typescript": "~5.2.2"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -13582,9 +13582,9 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/prettier": {
|
"node_modules/prettier": {
|
||||||
"version": "3.0.3",
|
"version": "3.1.0",
|
||||||
"resolved": "https://registry.npmjs.org/prettier/-/prettier-3.0.3.tgz",
|
"resolved": "https://registry.npmjs.org/prettier/-/prettier-3.1.0.tgz",
|
||||||
"integrity": "sha512-L/4pUDMxcNa8R/EthV08Zt42WBO4h1rarVtK0K+QJG0X187OLo7l699jWw0GKuwzkPQ//jMFA/8Xm6Fh3J/DAg==",
|
"integrity": "sha512-TQLvXjq5IAibjh8EpBIkNKxO749UEWABoiIZehEPiY4GNpVdhaFKqSTu+QrlU6D2dPAfubRmtJTi4K4YkQ5eXw==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"bin": {
|
"bin": {
|
||||||
"prettier": "bin/prettier.cjs"
|
"prettier": "bin/prettier.cjs"
|
||||||
|
@ -13614,9 +13614,9 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/prism-react-renderer": {
|
"node_modules/prism-react-renderer": {
|
||||||
"version": "2.1.0",
|
"version": "2.2.0",
|
||||||
"resolved": "https://registry.npmjs.org/prism-react-renderer/-/prism-react-renderer-2.1.0.tgz",
|
"resolved": "https://registry.npmjs.org/prism-react-renderer/-/prism-react-renderer-2.2.0.tgz",
|
||||||
"integrity": "sha512-I5cvXHjA1PVGbGm1MsWCpvBCRrYyxEri0MC7/JbfIfYfcXAxHyO5PaUjs3A8H5GW6kJcLhTHxxMaOZZpRZD2iQ==",
|
"integrity": "sha512-j4AN0VkEr72598+47xDvpzeYyeh/wPPRNTt9nJFZqIZUxwGKwYqYgt7RVigZ3ZICJWJWN84KEuMKPNyypyhNIw==",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@types/prismjs": "^1.26.0",
|
"@types/prismjs": "^1.26.0",
|
||||||
"clsx": "^1.2.1"
|
"clsx": "^1.2.1"
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
"clsx": "^2.0.0",
|
"clsx": "^2.0.0",
|
||||||
"disqus-react": "^1.1.5",
|
"disqus-react": "^1.1.5",
|
||||||
"postcss": "^8.4.31",
|
"postcss": "^8.4.31",
|
||||||
"prism-react-renderer": "^2.1.0",
|
"prism-react-renderer": "^2.2.0",
|
||||||
"rapidoc": "^9.3.4",
|
"rapidoc": "^9.3.4",
|
||||||
"react-before-after-slider-component": "^1.1.8",
|
"react-before-after-slider-component": "^1.1.8",
|
||||||
"react-dom": "^18.2.0",
|
"react-dom": "^18.2.0",
|
||||||
|
@ -53,7 +53,7 @@
|
||||||
"@docusaurus/tsconfig": "3.0.0",
|
"@docusaurus/tsconfig": "3.0.0",
|
||||||
"@docusaurus/types": "3.0.0",
|
"@docusaurus/types": "3.0.0",
|
||||||
"@types/react": "^18.2.37",
|
"@types/react": "^18.2.37",
|
||||||
"prettier": "3.0.3",
|
"prettier": "3.1.0",
|
||||||
"typescript": "~5.2.2"
|
"typescript": "~5.2.2"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Reference in New Issue