Revert "Groups implementation (#609)" (#611)

This reverts commit 5f13f83697.
This commit is contained in:
Andrey Borodin 2024-05-07 18:55:52 +05:00 committed by GitHub
parent 5f13f83697
commit 3f50c152a2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 215 additions and 842 deletions

View File

@ -111,6 +111,7 @@ Set up your CLion to build project in container, [manual](https://github.com/shu
* [include](documentation/configuration.md#include-string)
* [daemonize](documentation/configuration.md#daemonize-yesno)
* [sequential\_routing](documentation/configuration.md#sequential_routing-yesno)
* [priority](documentation/configuration.md#priority-integer)
* [pid\_file](documentation/configuration.md#pid_file-string)
* [unix\_socket\_dir](documentation/configuration.md#unix_socket_dir-string)

View File

@ -45,6 +45,7 @@ listen {
compression yes
}
storage "postgres_server" {
type "remote"
host "localhost"

View File

@ -79,7 +79,6 @@ COPY ./docker/ldap /ldap
COPY ./docker/lagpolling /lagpolling
COPY ./docker/shell-test /shell-test
COPY ./docker/tsa /tsa
COPY ./docker/group /group
COPY ./docker/xproto /xproto
COPY ./docker/copy /copy
COPY ./docker/gorm /gorm

View File

@ -49,7 +49,7 @@ sudo -u postgres /usr/bin/pg_basebackup -D /var/lib/postgresql/14/repl -R -h loc
sudo -u postgres /usr/lib/postgresql/14/bin/pg_ctl -D /var/lib/postgresql/14/repl/ -o '-p 5433' start
# Create databases
for database_name in db scram_db ldap_db auth_query_db db1 hba_db tsa_db group_db addr_db xproto_db "spqr-console"; do
for database_name in db scram_db ldap_db auth_query_db db1 hba_db tsa_db addr_db xproto_db "spqr-console"; do
sudo -u postgres createdb $database_name >> "$SETUP_LOG" 2>&1 || {
echo "ERROR: 'createdb $database_name' failed, examine the log"
cat "$SETUP_LOG"
@ -63,14 +63,6 @@ mkdir /var/cores
sudo sysctl -w kernel.core_pattern=/var/cores/core.%p.%e
pgbench -i -h localhost -p 5432 -U postgres postgres
# Create users
psql -h localhost -p 5432 -U postgres -c "create role group1; create role group2; create user group_checker; create user group_user1; create user group_user2; create user group_user3; create user group_user4; create user group_user5; create user group_checker1; create user group_checker2;" -d group_db >> $SETUP_LOG 2>&1 || {
echo "ERROR: users creation failed, examine the log"
cat "$SETUP_LOG"
cat "$PG_LOG"
exit 1
}
# Create users
psql -h localhost -p 5432 -U postgres -c "set password_encryption = 'scram-sha-256'; create user scram_user password 'scram_user_password';" -d scram_db >> $SETUP_LOG 2>&1 || {
echo "ERROR: users creation failed, examine the log"

View File

@ -6,13 +6,6 @@ cd /test_dir/test && /usr/bin/odyssey_test
setup
# group
/group/test_group.sh
if [ $? -eq 1 ]
then
exit 1
fi
# gorm
ody-start
/gorm/test.sh

View File

@ -1,122 +0,0 @@
listen {
host "*"
port 6432
}
storage "postgres_server" {
type "remote"
host "localhost"
port 5432
}
database "group_db" {
user "group_user1" {
authentication "none"
storage "postgres_server"
pool "session"
}
group "group1" {
authentication "md5"
password "password1"
storage "postgres_server"
storage_db "postgres"
storage_user "postgres"
pool_routing "internal"
pool "session"
group_query "SELECT rolname FROM pg_roles WHERE pg_has_role(rolname, 'group1', 'member');"
}
user "group_user2" {
authentication "none"
storage "postgres_server"
pool "session"
}
user "group_user3" {
authentication "none"
storage "postgres_server"
pool "session"
}
group "group2" {
authentication "md5"
password "password2"
storage "postgres_server"
storage_db "postgres"
storage_user "postgres"
pool_routing "internal"
pool "session"
group_query "SELECT rolname FROM pg_roles WHERE pg_has_role(rolname, 'group2', 'member');"
}
user "group_user4" {
authentication "none"
storage "postgres_server"
pool "session"
}
user "group_user5" {
authentication "none"
storage "postgres_server"
pool "session"
}
}
database default {
user default {
authentication "none"
storage "postgres_server"
pool "session"
pool_size 0
pool_timeout 0
pool_ttl 1201
pool_discard no
pool_cancel yes
pool_rollback yes
# seconds
pool_client_idle_timeout 20
# seconds
pool_idle_in_transaction_timeout 20
client_fwd_error yes
application_name_add_host yes
server_lifetime 1901
log_debug no
quantiles "0.99,0.95,0.5"
client_max 107
}
}
unix_socket_dir "/tmp"
unix_socket_mode "0644"
log_file "/var/log/odyssey.log"
log_format "%p %t %l [%i %s] (%c) %m\n"
log_debug no
log_config yes
log_session no
log_query no
log_stats yes
daemonize yes
locks_dir "/tmp/odyssey"
graceful_die_on_errors yes
enable_online_restart yes
bindwith_reuseport yes
stats_interval 60
pid_file "/var/run/odyssey.pid"

View File

@ -1,81 +0,0 @@
#!/bin/bash -x
set -ex
/usr/bin/odyssey /group/config.conf
users=("group_user1" "group_user2" "group_user3" "group_user4" "group_user5")
for user in "${users[@]}"; do
psql -h localhost -p 6432 -U "$user" -c "SELECT 1" group_db >/dev/null 2>&1 || {
echo "ERROR: failed backend auth with correct user auth"
cat /var/log/odyssey.log
echo "
"
cat /var/log/postgresql/postgresql-14-main.log
exit 1
}
done
ody-stop
psql -h localhost -p 5432 -U postgres -c "GRANT group1 TO group_user2;" group_db
psql -h localhost -p 5432 -U postgres -c "GRANT group1 TO group_user4;" group_db
psql -h localhost -p 5432 -U postgres -c "GRANT group2 TO group_user4;" group_db
psql -h localhost -p 5432 -U postgres -c "GRANT group1 TO group_user1;" group_db
/usr/bin/odyssey /group/config.conf
sleep 1
psql -h localhost -p 6432 -U group_user1 -c "SELECT 1" group_db >/dev/null 2>&1 || {
echo "ERROR: group auth apply for over user at config"
cat /var/log/odyssey.log
echo "
"
cat /var/log/postgresql/postgresql-14-main.log
exit 1
}
psql -h localhost -p 6432 -U group_user2 -c "SELECT 1" group_db >/dev/null 2>&1 && {
echo "ERROR: group auth not apply"
cat /var/log/odyssey.log
echo "
"
cat /var/log/postgresql/postgresql-14-main.log
exit 1
}
PGPASSWORD=password1 psql -h localhost -p 6432 -U group_user4 -c "SELECT 1" group_db >/dev/null 2>&1 && {
echo "ERROR: group auth not accepted down group"
cat /var/log/odyssey.log
echo "
"
cat /var/log/postgresql/postgresql-14-main.log
exit 1
}
PGPASSWORD=password2 psql -h localhost -p 6432 -U group_user4 -c "SELECT 1" group_db >/dev/null 2>&1 || {
echo "ERROR: group auth not apply"
cat /var/log/odyssey.log
echo "
"
cat /var/log/postgresql/postgresql-14-main.log
exit 1
}
ody-stop

View File

@ -56,6 +56,18 @@ PGPASSWORD=correct_password psql -h localhost -p 6432 -U user_unknown -c "SELECT
exit 1
}
kill -s HUP $(pgrep odyssey)
PGPASSWORD=correct_password PGCONNECT_TIMEOUT=5 psql -h localhost -p 6432 -U user_allow -c "SELECT 1" hba_db > /dev/null 2>&1 || {
echo "ERROR: unable to connect after SIGHUP"
cat /var/log/odyssey.log
echo "
"
cat /var/log/postgresql/postgresql-14-main.log
exit 1
}
ody-stop
#

View File

@ -16,6 +16,16 @@ By default Odyssey does not run as a daemon. Set to 'yes' to enable.
`daemonize no`
#### sequential\_routing_ *yes|no*
Try to match routes exactly in config order.
By default, Odyssey tries to match all specific routes first, and then all default ones.
It may be confusing because auth-denying default route can be overridden with more specific auth-permitting route below in the config.
With this option set, Odyssey will match routes exactly in config order, like in HBA files.
`sequential_routing no`
#### priority *integer*
Process priority.

View File

@ -1,10 +1,43 @@
#!/usr/bin/env bash
set -ex
set -e
sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo apt-get update
sudo apt-get -y --no-install-recommends install postgresql-14 postgresql-server-dev-14 libpq5 libpq-dev clang-format-11 libpam0g-dev libldap-dev
sudo pkill -9 postgres || true
cho -n | openssl s_client -connect https://scan.coverity.com:443 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | sudo tee -a /etc/ssl/certs/ca-
if ! sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'; then
echo "Error adding PostgreSQL repository."
exit 1
fi
if ! wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -; then
echo "Error adding PostgreSQL repository key."
exit 1
fi
if ! sudo apt-get update; then
echo "Error updating package list."
exit 1
fi
if ! sudo apt-get -y --no-install-recommends install postgresql-14 postgresql-server-dev-14 libpq5 libpq-dev clang-format-11 libpam0g-dev libldap-dev; then
echo "Error installing PostgreSQL and its dependencies."
exit 1
fi
if pgrep "postgres" > /dev/null; then
if ! sudo pkill -9 postgres; then
echo "Error stopping PostgreSQL process."
exit 1
fi
fi
if ! sudo sh -c 'echo -n | openssl s_client -connect https://scan.coverity.com:443 | sed -ne "/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p" >> /etc/ssl/certs/ca-certificates.crt'; then
echo "Error adding SSL certificate."
exit 1
fi
if ! sudo apt-get clean; then
echo "Error cleaning apt-get cache."
exit 1
fi
echo "Script completed successfully."
exit 0

View File

@ -52,8 +52,7 @@ set(od_src
hba.c
hba_reader.c
hba_rule.c
mdb_iamproxy.c
group.c)
mdb_iamproxy.c)
if (PAM_FOUND)
list(APPEND od_src pam.c)

View File

@ -14,6 +14,7 @@ void od_config_init(od_config_t *config)
{
config->daemonize = 0;
config->priority = 0;
config->sequential_routing = 0;
config->log_debug = 0;
config->log_to_stdout = 1;
config->log_config = 0;
@ -245,6 +246,8 @@ void od_config_print(od_config_t *config, od_logger_t *logger)
od_config_yes_no(config->daemonize));
od_log(logger, "config", NULL, NULL, "priority %d",
config->priority);
od_log(logger, "config", NULL, NULL, "sequential_routing %s",
od_config_yes_no(config->sequential_routing));
if (config->pid_file)
od_log(logger, "config", NULL, NULL,
"pid_file %s", config->pid_file);

View File

@ -27,6 +27,7 @@ struct od_config_listen {
struct od_config {
int daemonize;
int priority;
int sequential_routing;
/* logging */
int log_to_stdout;
int log_debug;

View File

@ -16,6 +16,7 @@ typedef enum {
OD_LINCLUDE,
OD_LDAEMONIZE,
OD_LPRIORITY,
OD_LSEQROUTING,
OD_LLOG_TO_STDOUT,
OD_LLOG_DEBUG,
OD_LLOG_CONFIG,
@ -82,7 +83,6 @@ typedef enum {
OD_LDEFAULT,
OD_LDATABASE,
OD_LUSER,
OD_LGROUP,
OD_LPASSWORD,
OD_LROLE,
OD_LPOOL,
@ -146,7 +146,6 @@ typedef enum {
OD_LOPTIONS,
OD_LBACKEND_STARTUP_OPTIONS,
OD_LHBA_FILE,
OD_LGROUP_QUERY,
} od_lexeme_t;
static od_keyword_t od_config_keywords[] = {
@ -156,6 +155,7 @@ static od_keyword_t od_config_keywords[] = {
od_keyword("include", OD_LINCLUDE),
od_keyword("daemonize", OD_LDAEMONIZE),
od_keyword("priority", OD_LPRIORITY),
od_keyword("sequential_routing", OD_LSEQROUTING),
od_keyword("pid_file", OD_LPID_FILE),
od_keyword("unix_socket_dir", OD_LUNIX_SOCKET_DIR),
od_keyword("unix_socket_mode", OD_LUNIX_SOCKET_MODE),
@ -238,7 +238,6 @@ static od_keyword_t od_config_keywords[] = {
/* database */
od_keyword("database", OD_LDATABASE),
od_keyword("group", OD_LGROUP),
od_keyword("user", OD_LUSER),
od_keyword("password", OD_LPASSWORD),
od_keyword("role", OD_LROLE),
@ -269,9 +268,6 @@ static od_keyword_t od_config_keywords[] = {
od_keyword("storage_user", OD_LSTORAGE_USER),
od_keyword("storage_password", OD_LSTORAGE_PASSWORD),
/* group */
od_keyword("group_query", OD_LGROUP_QUERY),
/* auth */
od_keyword("authentication", OD_LAUTHENTICATION),
od_keyword("auth_common_name", OD_LAUTH_COMMON_NAME),
@ -1726,19 +1722,6 @@ static int od_config_reader_rule_settings(od_config_reader_t *reader,
return NOT_OK_RESPONSE;
}
continue;
/* group_query */
case OD_LGROUP_QUERY:
if (rule->group == NULL) {
od_config_reader_error(
reader, NULL,
"group settings specified for non-group route");
return NOT_OK_RESPONSE;
}
if (!od_config_reader_string(
reader, &rule->group->group_query)) {
return NOT_OK_RESPONSE;
}
continue;
default:
return NOT_OK_RESPONSE;
}
@ -1882,80 +1865,6 @@ static int od_config_reader_route(od_config_reader_t *reader, char *db_name,
return od_config_reader_rule_settings(reader, rule, extentions, NULL);
}
static int od_config_reader_group(od_config_reader_t *reader, char *db_name,
od_group_t *group, od_extention_t *extentions)
{
/* group name */
char *group_name = NULL;
if (!od_config_reader_is(reader, OD_PARSER_STRING))
return NOT_OK_RESPONSE;
if (!od_config_reader_string(reader, &group_name))
return NOT_OK_RESPONSE;
// TODO: need to find a way to create internal rules for a specific database
char route_usr[strlen("group_") + strlen(group_name) + 1];
char route_db[strlen("group_") + strlen(group_name) + 1];
snprintf(route_usr, sizeof route_usr, "%s%s", "group_", group_name);
snprintf(route_db, sizeof route_db, "%s%s", "group_", group_name);
od_rule_t *rule;
od_address_range_t default_address_range =
od_address_range_create_default();
rule = od_rules_match(reader->rules, route_db, route_usr,
&default_address_range, 0, 0, 1);
if (rule) {
od_errorf(reader->error, "route '%s.%s': is redefined",
route_usr, route_usr);
return NOT_OK_RESPONSE;
}
rule = od_rules_add(reader->rules);
if (rule == NULL) {
return NOT_OK_RESPONSE;
}
rule->user_is_default = 0;
rule->user_name = strdup(route_usr);
rule->user_name_len = strlen(rule->user_name);
if (rule->user_name == NULL) {
return NOT_OK_RESPONSE;
}
rule->db_is_default = 0;
rule->db_name = strdup(route_db);
rule->db_name_len = strlen(rule->db_name);
if (rule->db_name == NULL)
return NOT_OK_RESPONSE;
rule->address_range = default_address_range;
group->group_name = strdup(group_name);
group->route_usr = strdup(rule->user_name);
group->route_db = strdup(rule->db_name);
rule->group = group;
/* { */
if (!od_config_reader_symbol(reader, '{'))
return NOT_OK_RESPONSE;
/* unreach */
if (od_config_reader_rule_settings(reader, rule, extentions, NULL) ==
NOT_OK_RESPONSE) {
goto error;
}
free(group_name);
// force several settings
group->storage_db = rule->storage_db;
group->storage_user = rule->storage_user;
rule->pool->routing = OD_RULE_POOL_INTERNAL;
return OK_RESPONSE;
error:
free(group_name);
return NOT_OK_RESPONSE;
}
static inline int od_config_reader_watchdog(od_config_reader_t *reader,
od_storage_watchdog_t *watchdog,
od_extention_t *extentions)
@ -2006,7 +1915,7 @@ static inline int od_config_reader_watchdog(od_config_reader_t *reader,
// force several settings
watchdog->storage_db = rule->storage_db;
watchdog->storage_user = rule->storage_user;
rule->pool->routing = OD_RULE_POOL_INTERNAL;
rule->pool->routing = OD_RULE_POOL_INTERVAL;
return OK_RESPONSE;
}
@ -2280,17 +2189,6 @@ static int od_config_reader_database(od_config_reader_t *reader,
if (rc == -1)
goto error;
continue;
case OD_LGROUP:;
od_group_t *group;
group = od_rules_group_allocate(reader->global);
if (group == NULL) {
return NOT_OK_RESPONSE;
}
rc = od_config_reader_group(reader, db_name, group,
extentions);
if (rc == -1)
goto error;
continue;
default:
od_config_reader_error(reader, &token,
"unexpected parameter");
@ -2378,6 +2276,13 @@ static int od_config_reader_parse(od_config_reader_t *reader,
goto error;
}
continue;
/* sequential_routing */
case OD_LSEQROUTING:
if (!od_config_reader_yes_no(
reader, &config->sequential_routing)) {
goto error;
}
continue;
/* pid_file */
case OD_LPID_FILE:
if (!od_config_reader_string(reader,

View File

@ -1,78 +0,0 @@
/*
* Odyssey.
*
* Scalable PostgreSQL connection pooler.
*/
#include <kiwi.h>
#include <machinarium.h>
#include <odyssey.h>
int od_group_free(od_group_t *group)
{
if (group == NULL)
return NOT_OK_RESPONSE;
if (group->route_usr)
free(group->route_usr);
if (group->route_db)
free(group->route_db);
if (group->storage_user)
free(group->storage_user);
if (group->storage_db)
free(group->storage_db);
if (group->group_name)
free(group->group_name);
if (group->group_query)
free(group->group_query);
free(group);
return OK_RESPONSE;
}
int od_group_parse_val_datarow(machine_msg_t *msg, char **group_member)
{
char *pos = (char *)machine_msg_data(msg) + 1;
uint32_t pos_size = machine_msg_size(msg) - 1;
/* size */
uint32_t size;
int rc;
rc = kiwi_read32(&size, &pos, &pos_size);
if (kiwi_unlikely(rc == -1))
goto error;
/* count */
uint16_t count;
rc = kiwi_read16(&count, &pos, &pos_size);
if (kiwi_unlikely(rc == -1))
goto error;
if (count != 1)
goto error;
/* (not used) */
uint32_t val_len;
rc = kiwi_read32(&val_len, &pos, &pos_size);
if (kiwi_unlikely(rc == -1)) {
goto error;
}
*group_member = strdup(pos);
return OK_RESPONSE;
error:
return NOT_OK_RESPONSE;
}
od_group_member_name_item_t *od_group_member_name_item_add(od_list_t *members)
{
od_group_member_name_item_t *item;
item = (od_group_member_name_item_t *)malloc(sizeof(*item));
if (item == NULL)
return NULL;
memset(item, 0, sizeof(*item));
od_list_init(&item->link);
od_list_append(members, &item->link);
return item;
}

View File

@ -1,41 +0,0 @@
/*
* Odyssey.
*
* Scalable PostgreSQL connection pooler.
*/
#ifndef ODYSSEY_GROUP_CHECK_ITER_INTERVAL
#define ODYSSEY_GROUP_CHECK_ITER_INTERVAL 500 // ms
typedef struct od_group od_group_t;
struct od_group {
char *route_usr;
char *route_db;
char *storage_user;
char *storage_db;
char *group_name;
char *group_query;
int check_retry;
int online;
od_global_t *global;
od_list_t link;
};
typedef struct od_group_member_name_item od_group_member_name_item_t;
struct od_group_member_name_item {
char *value;
int is_checked;
od_list_t link;
};
int od_group_free(od_group_t *);
int od_group_parse_val_datarow(machine_msg_t *, char **);
od_group_member_name_item_t *od_group_member_name_item_add(od_list_t *);
#endif /* ODYSSEY_GROUP_CHECK_ITER_INTERVAL */

View File

@ -30,6 +30,7 @@ od_hba_rule_t *od_hba_rule_create()
memset(hba, 0, sizeof(*hba));
od_list_init(&hba->database.values);
od_list_init(&hba->user.values);
hba->address_range = od_address_range_create_default();
return hba;
}

View File

@ -68,7 +68,4 @@ static inline int od_list_empty(od_list_t *list)
for (iterator = (list)->next; \
iterator != list && (safe = iterator->next); iterator = safe)
#define od_list_foreach_with_start(list, iterator) \
for (; iterator != list; iterator = (iterator)->next)
#endif /* ODYSSEY_LIST_H */

View File

@ -55,7 +55,6 @@
#include "sources/address.h"
#include "sources/storage.h"
#include "sources/group.h"
#include "sources/pool.h"
#include "sources/rules.h"
#include "sources/hba_rule.h"

View File

@ -98,7 +98,7 @@ int od_rule_matches_client(od_rule_pool_t *pool, od_pool_client_type_t t)
{
switch (t) {
case OD_POOL_CLIENT_INTERNAL:
return pool->routing == OD_RULE_POOL_INTERNAL;
return pool->routing == OD_RULE_POOL_INTERVAL;
case OD_POOL_CLIENT_EXTERNAL:
return pool->routing == OD_RULE_POOL_CLIENT_VISIBLE;
default:

View File

@ -16,7 +16,7 @@ typedef enum {
} od_rule_pool_type_t;
typedef enum {
OD_RULE_POOL_INTERNAL,
OD_RULE_POOL_INTERVAL,
OD_RULE_POOL_CLIENT_VISIBLE,
} od_rule_routing_type_t;

View File

@ -344,6 +344,19 @@ od_router_status_t od_router_route(od_router_t *router, od_client_t *client)
kiwi_be_startup_t *startup = &client->startup;
od_instance_t *instance = router->global->instance;
struct sockaddr_storage sa;
int salen;
struct sockaddr *saddr;
int rc;
salen = sizeof(sa);
saddr = (struct sockaddr *)&sa;
if (client->type == OD_POOL_CLIENT_EXTERNAL) {
rc = machine_getpeername(client->io.io, saddr, &salen);
if (rc == -1) {
return OD_ROUTER_ERROR;
}
}
/* match route */
assert(startup->database.value_len);
assert(startup->user.value_len);
@ -354,23 +367,17 @@ od_router_status_t od_router_route(od_router_t *router, od_client_t *client)
od_rule_t *rule =
NULL; // initialize rule for (line 365) and flag '-Wmaybe-uninitialized'
struct sockaddr_storage sa;
int salen;
struct sockaddr *saddr;
int rc;
int sequential = instance->config.sequential_routing;
switch (client->type) {
case OD_POOL_CLIENT_INTERNAL:
rule = od_rules_forward(&router->rules, startup->database.value,
startup->user.value, NULL, 1);
startup->user.value, NULL, 1,
sequential);
break;
case OD_POOL_CLIENT_EXTERNAL:
salen = sizeof(sa);
saddr = (struct sockaddr *)&sa;
rc = machine_getpeername(client->io.io, saddr, &salen);
if (rc == -1)
return OD_ROUTER_ERROR;
rule = od_rules_forward(&router->rules, startup->database.value,
startup->user.value, &sa, 0);
startup->user.value, &sa, 0,
sequential);
break;
case OD_POOL_CLIENT_UNDEF: // create that case for correct work of '-Wswitch' flag
break;

View File

@ -125,324 +125,6 @@ static inline od_rule_auth_t *od_rules_auth_find(od_rule_t *rule, char *name)
return NULL;
}
od_group_t *od_rules_group_allocate(od_global_t *global)
{
/* Allocate and force defaults */
od_group_t *group;
group = calloc(1, sizeof(*group));
if (group == NULL)
return NULL;
group->global = global;
group->check_retry = 10;
group->online = 1;
od_list_init(&group->link);
return group;
}
static inline int od_rule_update_auth(od_route_t *route, void **argv)
{
od_rule_t *rule = (od_rule_t *)argv[0];
od_rule_t *group_rule = (od_rule_t *)argv[1];
/* auth */
rule->auth = group_rule->auth;
rule->auth_mode = group_rule->auth_mode;
rule->auth_query = group_rule->auth_query;
rule->auth_query_db = group_rule->auth_query_db;
rule->auth_query_user = group_rule->auth_query_user;
rule->auth_common_name_default = group_rule->auth_common_name_default;
rule->auth_common_names = group_rule->auth_common_names;
rule->auth_common_names_count = group_rule->auth_common_names_count;
#ifdef PAM_FOUND
rule->auth_pam_service = group_rule->auth_pam_service;
rule->auth_pam_data = group_rule->auth_pam_data;
#endif
#ifdef LDAP_FOUND
rule->ldap_endpoint_name = group_rule->ldap_endpoint_name;
rule->ldap_endpoint = group_rule->ldap_endpoint;
rule->ldap_pool_timeout = group_rule->ldap_pool_timeout;
rule->ldap_pool_size = group_rule->ldap_pool_size;
rule->ldap_pool_ttl = group_rule->ldap_pool_ttl;
rule->ldap_storage_creds_list = group_rule->ldap_storage_creds_list;
rule->ldap_storage_credentials_attr =
group_rule->ldap_storage_credentials_attr;
#endif
rule->auth_module = group_rule->auth_module;
/* password */
rule->password = group_rule->password;
rule->password_len = group_rule->password_len;
return 0;
}
void od_rules_group_checker_run(void *arg)
{
od_group_checker_run_args *args = (od_group_checker_run_args *)arg;
od_rule_t *group_rule = args->rule;
od_group_t *group = group_rule->group;
od_rules_t *rules = args->rules;
od_list_t *i_copy = args->i_copy;
od_global_t *global = group->global;
od_router_t *router = global->router;
od_instance_t *instance = global->instance;
od_debug(&instance->logger, "group_checker", NULL, NULL,
"start group checking");
/* create internal auth client */
od_client_t *group_checker_client;
group_checker_client =
od_client_allocate_internal(global, "rule-group-checker");
if (group_checker_client == NULL) {
od_error(&instance->logger, "group_checker", NULL, NULL,
"route rule group_checker failed to allocate client");
return;
}
group_checker_client->global = global;
group_checker_client->type = OD_POOL_CLIENT_INTERNAL;
od_id_generate(&group_checker_client->id, "a");
/* set storage user and database */
kiwi_var_set(&group_checker_client->startup.user, KIWI_VAR_UNDEF,
group->route_usr, strlen(group->route_usr) + 1);
kiwi_var_set(&group_checker_client->startup.database, KIWI_VAR_UNDEF,
group->route_db, strlen(group->route_db) + 1);
machine_msg_t *msg;
char *group_member;
int rc;
/* route */
od_router_status_t status;
status = od_router_route(router, group_checker_client);
od_debug(&instance->logger, "group_checker", group_checker_client, NULL,
"routing to internal group_checker route status: %s",
od_router_status_to_str(status));
if (status != OD_ROUTER_OK) {
od_error(&instance->logger, "group_checker",
group_checker_client, NULL,
"route rule group_checker failed: %s",
od_router_status_to_str(status));
return;
}
for (;;) {
/* attach client to some route */
status = od_router_attach(router, group_checker_client, false);
od_debug(
&instance->logger, "group_checker",
group_checker_client, NULL,
"attaching group_checker client to backend connection status: %s",
od_router_status_to_str(status));
if (status != OD_ROUTER_OK) {
/* 1 second soft interval */
machine_sleep(1000);
continue;
}
od_server_t *server;
server = group_checker_client->server;
od_debug(&instance->logger, "group_checker",
group_checker_client, server,
"attached to server %s%.*s", server->id.id_prefix,
(int)sizeof(server->id.id), server->id.id);
/* connect to server, if necessary */
if (server->io.io == NULL) {
rc = od_backend_connect(server, "group_checker", NULL,
group_checker_client);
if (rc == NOT_OK_RESPONSE) {
od_debug(
&instance->logger, "group_checker",
group_checker_client, server,
"backend connect failed, retry after 1 sec");
od_router_close(router, group_checker_client);
/* 1 second soft interval */
machine_sleep(1000);
continue;
}
}
for (int retry = 0; retry < group->check_retry; ++retry) {
if (od_backend_query_send(
server, "group_checker", group->group_query,
NULL, strlen(group->group_query) + 1) ==
NOT_OK_RESPONSE) {
/* Retry later. TODO: Add logging. */
break;
}
int response_is_read = 0;
od_list_t members;
od_list_init(&members);
od_group_member_name_item_t *member;
while (1) {
msg = od_read(&server->io, UINT32_MAX);
if (msg == NULL) {
if (!machine_timedout()) {
od_error(&instance->logger,
"group_checker",
server->client, server,
"read error: %s",
od_io_error(
&server->io));
}
}
kiwi_be_type_t type;
type = *(char *)machine_msg_data(msg);
od_debug(&instance->logger, "group_checker",
server->client, server, "%s",
kiwi_be_type_to_string(type));
switch (type) {
case KIWI_BE_ERROR_RESPONSE:
od_backend_error(server,
"group_checker",
machine_msg_data(msg),
machine_msg_size(msg));
{
rc = NOT_OK_RESPONSE;
response_is_read = 1;
break;
}
case KIWI_BE_DATA_ROW: {
rc = od_group_parse_val_datarow(
msg, &group_member);
member = od_group_member_name_item_add(
&members);
member->value = group_member;
break;
}
case KIWI_BE_READY_FOR_QUERY:
od_backend_ready(server,
machine_msg_data(msg),
machine_msg_size(msg));
machine_msg_free(msg);
response_is_read = 1;
break;
default:
break;
}
if (response_is_read)
break;
}
od_router_close(router, group_checker_client);
bool have_default = false;
od_list_t *i;
od_list_foreach(&members, i)
{
od_group_member_name_item_t *member_name;
member_name = od_container_of(
i, od_group_member_name_item_t, link);
od_list_t *j = i_copy;
od_list_foreach_with_start(&rules->rules, j)
{
od_rule_t *rule;
rule = od_container_of(j, od_rule_t,
link);
if (rule->obsolete ||
rule->pool->routing ==
OD_RULE_POOL_INTERNAL ||
rule->db_is_default !=
group_rule->db_is_default)
continue;
if (rule->user_is_default) {
have_default = true;
} else if (strcmp(member_name->value,
rule->user_name) ==
0) {
void *argv[] = { rule,
group_rule };
od_router_foreach(
router,
od_rule_update_auth,
argv);
member_name->is_checked = 1;
}
}
}
// TODO: handle members with is_checked = 0. these rules should be inherited from the default one, if there is one
if (rc == OK_RESPONSE) {
od_debug(&instance->logger, "group_checker",
group_checker_client, server,
"group check success");
break;
}
// retry
}
/* detach and unroute */
if (group_checker_client->server) {
od_router_detach(router, group_checker_client);
}
if (group->online == 0) {
od_debug(&instance->logger, "group_checker",
group_checker_client, NULL,
"deallocating obsolete group_checker");
od_client_free(group_checker_client);
od_group_free(group);
return;
}
/* 7 second soft interval */
machine_sleep(7000);
}
}
od_retcode_t od_rules_groups_checkers_run(od_logger_t *logger,
od_rules_t *rules)
{
od_list_t *i;
od_list_foreach(&rules->rules, i)
{
od_rule_t *rule;
rule = od_container_of(i, od_rule_t, link);
if (rule->group) {
od_group_checker_run_args *args =
malloc(sizeof(od_group_checker_run_args));
args->rules = rules;
args->rule = rule;
args->i_copy = i->next;
int64_t coroutine_id;
coroutine_id = machine_coroutine_create(
od_rules_group_checker_run, args);
if (coroutine_id == INVALID_COROUTINE_ID) {
od_error(
logger, "system", NULL, NULL,
"failed to start group_checker coroutine");
return NOT_OK_RESPONSE;
}
machine_sleep(1000);
}
}
return OK_RESPONSE;
}
od_rule_t *od_rules_add(od_rules_t *rules)
{
od_rule_t *rule;
@ -520,8 +202,6 @@ void od_rules_rule_free(od_rule_t *rule)
free(rule->storage_password);
if (rule->pool)
od_rule_pool_free(rule->pool);
if (rule->group)
rule->group->online = 0;
if (rule->mdb_iamproxy_socket_path)
free(rule->mdb_iamproxy_socket_path);
@ -577,7 +257,8 @@ void od_rules_unref(od_rule_t *rule)
od_rules_rule_free(rule);
}
od_rule_t *od_rules_forward(od_rules_t *rules, char *db_name, char *user_name,
static od_rule_t *od_rules_forward_default(od_rules_t *rules, char *db_name,
char *user_name,
struct sockaddr_storage *user_addr,
int pool_internal)
{
@ -598,7 +279,7 @@ od_rule_t *od_rules_forward(od_rules_t *rules, char *db_name, char *user_name,
if (rule->obsolete)
continue;
if (pool_internal) {
if (rule->pool->routing != OD_RULE_POOL_INTERNAL) {
if (rule->pool->routing != OD_RULE_POOL_INTERVAL) {
continue;
}
} else {
@ -666,6 +347,64 @@ od_rule_t *od_rules_forward(od_rules_t *rules, char *db_name, char *user_name,
return rule_default_default_default;
}
static od_rule_t *
od_rules_forward_sequential(od_rules_t *rules, char *db_name, char *user_name,
struct sockaddr_storage *user_addr,
int pool_internal)
{
od_list_t *i;
od_rule_t *rule_matched = NULL;
bool db_matched = false, user_matched = false, addr_matched = false;
od_list_foreach(&rules->rules, i)
{
od_rule_t *rule;
rule = od_container_of(i, od_rule_t, link);
}
od_list_foreach(&rules->rules, i)
{
od_rule_t *rule;
rule = od_container_of(i, od_rule_t, link);
if (rule->obsolete) {
continue;
}
if (pool_internal) {
if (rule->pool->routing != OD_RULE_POOL_INTERVAL) {
continue;
}
} else {
if (rule->pool->routing !=
OD_RULE_POOL_CLIENT_VISIBLE) {
continue;
}
}
db_matched = rule->db_is_default ||
(strcmp(rule->db_name, db_name) == 0);
user_matched = rule->user_is_default ||
(strcmp(rule->user_name, user_name) == 0);
addr_matched =
rule->address_range.is_default ||
od_address_validate(&rule->address_range, user_addr);
if (db_matched && user_matched && addr_matched) {
rule_matched = rule;
break;
}
}
assert(rule_matched);
return rule_matched;
}
od_rule_t *od_rules_forward(od_rules_t *rules, char *db_name, char *user_name,
struct sockaddr_storage *user_addr,
int pool_internal, int sequential)
{
if (sequential) {
return od_rules_forward_sequential(rules, db_name, user_name,
user_addr, pool_internal);
}
return od_rules_forward_default(rules, db_name, user_name, user_addr,
pool_internal);
}
od_rule_t *od_rules_match(od_rules_t *rules, char *db_name, char *user_name,
od_address_range_t *address_range, int db_is_default,
int user_is_default, int pool_internal)
@ -677,7 +416,7 @@ od_rule_t *od_rules_match(od_rules_t *rules, char *db_name, char *user_name,
rule = od_container_of(i, od_rule_t, link);
/* filter out internal or client-vidible rules */
if (pool_internal) {
if (rule->pool->routing != OD_RULE_POOL_INTERNAL) {
if (rule->pool->routing != OD_RULE_POOL_INTERVAL) {
continue;
}
} else {
@ -950,16 +689,25 @@ __attribute__((hot)) int od_rules_merge(od_rules_t *rules, od_rules_t *src,
int count_mark = 0;
int count_deleted = 0;
int count_new = 0;
int src_length = 0;
/* set order for new rules */
od_list_t *i;
od_list_foreach(&src->rules, i)
{
od_rule_t *rule;
rule = od_container_of(i, od_rule_t, link);
rule->order = src_length;
src_length++;
}
/* mark all rules for obsoletion */
od_list_t *i;
od_list_foreach(&rules->rules, i)
{
od_rule_t *rule;
rule = od_container_of(i, od_rule_t, link);
rule->mark = 1;
count_mark++;
od_hashmap_empty(rule->storage->acache);
}
@ -1061,6 +809,7 @@ __attribute__((hot)) int od_rules_merge(od_rules_t *rules, od_rules_t *src,
if (od_rules_rule_compare(origin, rule)) {
origin->mark = 0;
count_mark--;
origin->order = rule->order;
continue;
/* select rules with changes what needed disconnect */
} else if (!od_rules_rule_compare_to_drop(origin,
@ -1117,17 +866,37 @@ __attribute__((hot)) int od_rules_merge(od_rules_t *rules, od_rules_t *src,
}
}
/* sort rules according order, leaving obsolete at the end of the list */
od_list_t **sorted = calloc(src_length, sizeof(od_list_t *));
od_list_foreach_safe(&rules->rules, i, n)
{
od_rule_t *rule;
rule = od_container_of(i, od_rule_t, link);
if (rule->obsolete) {
continue;
}
assert(rule->order >= 0 && rule->order < src_length &&
sorted[rule->order] == NULL);
od_list_unlink(&rule->link);
sorted[rule->order] = &rule->link;
}
for (int s = src_length - 1; s >= 0; s--) {
assert(sorted[s] != NULL);
od_list_push(&rules->rules, sorted[s]);
}
free(sorted);
return count_new + count_mark + count_deleted;
}
int od_pool_validate(od_logger_t *logger, od_rule_pool_t *pool, char *db_name,
char *user_name, od_address_range_t *address_range)
char *user_name, char *address_range_string)
{
/* pooling mode */
if (!pool->type) {
od_error(logger, "rules", NULL, NULL,
"rule '%s.%s %s': pooling mode is not set", db_name,
user_name, address_range->string_value);
user_name, address_range_string);
return NOT_OK_RESPONSE;
}
if (strcmp(pool->type, "session") == 0) {
@ -1139,7 +908,7 @@ int od_pool_validate(od_logger_t *logger, od_rule_pool_t *pool, char *db_name,
} else {
od_error(logger, "rules", NULL, NULL,
"rule '%s.%s %s': unknown pooling mode", db_name,
user_name, address_range->string_value);
user_name, address_range_string);
return NOT_OK_RESPONSE;
}
@ -1148,24 +917,15 @@ int od_pool_validate(od_logger_t *logger, od_rule_pool_t *pool, char *db_name,
od_debug(
logger, "rules", NULL, NULL,
"rule '%s.%s %s': pool routing mode is not set, assuming \"client_visible\" by default",
db_name, user_name, address_range->string_value);
db_name, user_name, address_range_string);
} else if (strcmp(pool->routing_type, "internal") == 0) {
pool->routing = OD_RULE_POOL_INTERNAL;
pool->routing = OD_RULE_POOL_INTERVAL;
} else if (strcmp(pool->routing_type, "client_visible") == 0) {
pool->routing = OD_RULE_POOL_CLIENT_VISIBLE;
} else {
od_error(logger, "rules", NULL, NULL,
"rule '%s.%s %s': unknown pool routing mode", db_name,
user_name, address_range->string_value);
return NOT_OK_RESPONSE;
}
if (pool->routing == OD_RULE_POOL_INTERNAL &&
!address_range->is_default) {
od_error(
logger, "rules", NULL, NULL,
"rule '%s.%s %s': internal rules must have default address_range",
db_name, user_name, address_range->string_value);
user_name, address_range_string);
return NOT_OK_RESPONSE;
}
@ -1175,7 +935,7 @@ int od_pool_validate(od_logger_t *logger, od_rule_pool_t *pool, char *db_name,
od_error(
logger, "rules", NULL, NULL,
"rule '%s.%s %s': prepared statements support in session pool makes no sence",
db_name, user_name, address_range->string_value);
db_name, user_name, address_range_string);
return NOT_OK_RESPONSE;
}
@ -1183,7 +943,7 @@ int od_pool_validate(od_logger_t *logger, od_rule_pool_t *pool, char *db_name,
od_error(
logger, "rules", NULL, NULL,
"rule '%s.%s %s': pool discard is forbidden when using prepared statements support",
db_name, user_name, address_range->string_value);
db_name, user_name, address_range_string);
return NOT_OK_RESPONSE;
}
@ -1191,7 +951,7 @@ int od_pool_validate(od_logger_t *logger, od_rule_pool_t *pool, char *db_name,
od_error(
logger, "rules", NULL, NULL,
"rule '%s.%s %s': pool smart discard is forbidden without using prepared statements support",
db_name, user_name, address_range->string_value);
db_name, user_name, address_range_string);
return NOT_OK_RESPONSE;
}
@ -1200,8 +960,7 @@ int od_pool_validate(od_logger_t *logger, od_rule_pool_t *pool, char *db_name,
od_error(
logger, "rules", NULL, NULL,
"rule '%s.%s %s': cannot support prepared statements when 'DEALLOCATE ALL' present in discard string",
db_name, user_name,
address_range->string_value);
db_name, user_name, address_range_string);
return NOT_OK_RESPONSE;
}
}
@ -1284,7 +1043,7 @@ int od_rules_autogenerate_defaults(od_rules_t *rules, od_logger_t *logger)
rule->pool->pool = OD_RULE_POOL_TRANSACTION;
rule->pool->routing_type = strdup("internal");
rule->pool->routing = OD_RULE_POOL_INTERNAL;
rule->pool->routing = OD_RULE_POOL_INTERVAL;
rule->pool->size = OD_DEFAULT_INTERNAL_POLL_SZ;
rule->enable_password_passthrough = true;
@ -1421,7 +1180,8 @@ int od_rules_validate(od_rules_t *rules, od_config_t *config,
if (od_pool_validate(logger, rule->pool, rule->db_name,
rule->user_name,
&rule->address_range) == NOT_OK_RESPONSE) {
rule->address_range.string_value) ==
NOT_OK_RESPONSE) {
return NOT_OK_RESPONSE;
}

View File

@ -21,12 +21,6 @@ typedef enum {
OD_RULE_AUTH_CERT
} od_rule_auth_type_t;
typedef struct {
od_rule_t *rule;
od_rules_t *rules;
od_list_t *i_copy;
} od_group_checker_run_args;
struct od_rule_auth {
char *common_name;
od_list_t link;
@ -66,6 +60,7 @@ struct od_rule {
int mark;
int obsolete;
int refs;
int order;
/* id */
char *db_name;
@ -127,10 +122,6 @@ struct od_rule {
int catchup_timeout;
int catchup_checks;
/* group */
od_group_t *group; // set if rule is group
od_rule_t *group_rule;
/* PostgreSQL options */
kiwi_vars_t vars;
@ -184,16 +175,13 @@ void od_rules_unref(od_rule_t *);
int od_rules_compare(od_rule_t *, od_rule_t *);
od_rule_t *od_rules_forward(od_rules_t *, char *, char *,
struct sockaddr_storage *, int);
struct sockaddr_storage *, int, int);
/* search rule with desored characteristik */
od_rule_t *od_rules_match(od_rules_t *rules, char *db_name, char *user_name,
od_address_range_t *address_range, int db_is_default,
int user_is_default, int pool_internal);
/* group */
od_group_t *od_rules_group_allocate(od_global_t *global);
void od_rules_rule_free(od_rule_t *rule);
/* storage API */
@ -218,7 +206,4 @@ od_rule_auth_t *od_rules_auth_add(od_rule_t *);
void od_rules_auth_free(od_rule_auth_t *);
od_retcode_t od_rules_groups_checkers_run(od_logger_t *logger,
od_rules_t *rules);
#endif /* ODYSSEY_RULES_H */

View File

@ -305,7 +305,7 @@ static int calculate_client_proof(od_scram_state_t *scram_state,
od_scram_ctx_t *ctx = od_scram_HMAC_create();
const char *errstr = NULL;
od_scram_SaltedPassword(prepared_password, salt, strlen(salt),
od_scram_SaltedPassword(prepared_password, salt, SCRAM_DEFAULT_SALT_LEN,
iterations, scram_state->salted_password,
&errstr);

View File

@ -511,7 +511,6 @@ void od_system_config_reload(od_system_t *system)
}
od_config_free(&config);
od_hba_rules_free(&hba_rules);
if (instance->config.log_config)
od_rules_print(&rules, &instance->logger);
@ -582,8 +581,6 @@ static inline void od_system(void *arg)
if (rc == NOT_OK_RESPONSE)
return;
}
od_rules_groups_checkers_run(&instance->logger, &router->rules);
}
void od_system_init(od_system_t *system)