Merge pull request #5817 from FinnStutzenstein/debugging
Performance improvements for OS3+
This commit is contained in:
commit
97950d5baa
@ -112,6 +112,5 @@ export abstract class BaseComponent {
|
|||||||
protected onLeaveTinyMce(event: any): void {
|
protected onLeaveTinyMce(event: any): void {
|
||||||
console.log('tinyevent:', event.event.type);
|
console.log('tinyevent:', event.event.type);
|
||||||
this.saveHint = false;
|
this.saveHint = false;
|
||||||
// console.log("event: ", event.event.type);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -76,7 +76,6 @@ export class AutoupdateThrottleService {
|
|||||||
|
|
||||||
public disableUntil(changeId: number): void {
|
public disableUntil(changeId: number): void {
|
||||||
// Wait for an autoupdate with to_id >= changeId.
|
// Wait for an autoupdate with to_id >= changeId.
|
||||||
console.log(this.delay);
|
|
||||||
if (!this.isActive) {
|
if (!this.isActive) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -99,6 +98,7 @@ export class AutoupdateThrottleService {
|
|||||||
}
|
}
|
||||||
this.pendingAutoupdates = [];
|
this.pendingAutoupdates = [];
|
||||||
|
|
||||||
|
console.log(`Processing ${autoupdates.length} pending autoupdates`);
|
||||||
const autoupdate = this.mergeAutoupdates(autoupdates);
|
const autoupdate = this.mergeAutoupdates(autoupdates);
|
||||||
this._autoupdatesToInject.next(autoupdate);
|
this._autoupdatesToInject.next(autoupdate);
|
||||||
}
|
}
|
||||||
|
@ -40,9 +40,7 @@ export class ConstantsService {
|
|||||||
|
|
||||||
public constructor(communicationManager: CommunicationManagerService, private http: HttpService) {
|
public constructor(communicationManager: CommunicationManagerService, private http: HttpService) {
|
||||||
communicationManager.startCommunicationEvent.subscribe(async () => {
|
communicationManager.startCommunicationEvent.subscribe(async () => {
|
||||||
console.log('start communication');
|
|
||||||
this.constants = await this.http.get<Constants>(environment.urlPrefix + '/core/constants/');
|
this.constants = await this.http.get<Constants>(environment.urlPrefix + '/core/constants/');
|
||||||
console.log('constants:', this.constants);
|
|
||||||
Object.keys(this.subjects).forEach(key => {
|
Object.keys(this.subjects).forEach(key => {
|
||||||
this.subjects[key].next(this.constants[key]);
|
this.subjects[key].next(this.constants[key]);
|
||||||
});
|
});
|
||||||
|
@ -133,7 +133,7 @@ export class NotifyService {
|
|||||||
);
|
);
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
if (!(e instanceof OfflineError)) {
|
if (!(e instanceof OfflineError)) {
|
||||||
console.log(e);
|
console.error(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -94,7 +94,7 @@ export class ProjectorDataService {
|
|||||||
);
|
);
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
if (!(e instanceof OfflineError)) {
|
if (!(e instanceof OfflineError)) {
|
||||||
console.log(e);
|
console.error(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -215,7 +215,6 @@ export class RelationManagerService {
|
|||||||
result = descriptor.get.bind(viewModel)();
|
result = descriptor.get.bind(viewModel)();
|
||||||
} else {
|
} else {
|
||||||
result = target[property];
|
result = target[property];
|
||||||
// console.log(property, target);
|
|
||||||
}
|
}
|
||||||
} else if (property in _model) {
|
} else if (property in _model) {
|
||||||
result = _model[property];
|
result = _model[property];
|
||||||
|
@ -269,8 +269,6 @@ export class UserRepositoryService extends BaseRepository<ViewUser, User, UserTi
|
|||||||
|
|
||||||
public async update(update: Partial<User>, viewModel: ViewUser): Promise<void> {
|
public async update(update: Partial<User>, viewModel: ViewUser): Promise<void> {
|
||||||
this.preventAlterationOnDemoUsers(viewModel);
|
this.preventAlterationOnDemoUsers(viewModel);
|
||||||
console.log('update: ', update);
|
|
||||||
|
|
||||||
return super.update(update, viewModel);
|
return super.update(update, viewModel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -54,7 +54,6 @@ export class UserImportListComponent extends BaseImportListComponentDirective<Us
|
|||||||
prop: `newEntry.${property}`,
|
prop: `newEntry.${property}`,
|
||||||
type: this.guessType(property as keyof User)
|
type: this.guessType(property as keyof User)
|
||||||
};
|
};
|
||||||
console.log('singleColumnDef ', singleColumnDef);
|
|
||||||
|
|
||||||
return singleColumnDef;
|
return singleColumnDef;
|
||||||
});
|
});
|
||||||
|
@ -104,7 +104,7 @@ export class SlideManager {
|
|||||||
// Read from the moduleRef injector and locate the dynamic component type
|
// Read from the moduleRef injector and locate the dynamic component type
|
||||||
dynamicComponentType = moduleRef.injector.get(SlideToken.token);
|
dynamicComponentType = moduleRef.injector.get(SlideToken.token);
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.log(
|
console.error(
|
||||||
'The module for Slide "' + slideName + '" is not configured right: Cannot file the slide token.'
|
'The module for Slide "' + slideName + '" is not configured right: Cannot file the slide token.'
|
||||||
);
|
);
|
||||||
throw e;
|
throw e;
|
||||||
|
@ -3,22 +3,32 @@ services:
|
|||||||
client:
|
client:
|
||||||
image: os3-client-dev
|
image: os3-client-dev
|
||||||
build:
|
build:
|
||||||
context: ../client
|
context: ../client
|
||||||
dockerfile: docker/Dockerfile.dev
|
dockerfile: docker/Dockerfile.dev
|
||||||
volumes:
|
volumes:
|
||||||
- ../client:/app
|
- ../client:/app
|
||||||
|
|
||||||
server:
|
server:
|
||||||
image: os3-server-dev
|
image: os3-server-dev
|
||||||
user: $UID:$GID
|
user: $UID:$GID
|
||||||
build:
|
build:
|
||||||
context: ../server
|
context: ../server
|
||||||
dockerfile: docker/Dockerfile.dev
|
dockerfile: docker/Dockerfile.dev
|
||||||
volumes:
|
volumes:
|
||||||
- ../server:/app
|
- ../server:/app
|
||||||
depends_on:
|
depends_on:
|
||||||
- redis
|
- redis
|
||||||
|
|
||||||
|
postgres:
|
||||||
|
image: postgres:11
|
||||||
|
environment:
|
||||||
|
- POSTGRES_USER=openslides
|
||||||
|
- POSTGRES_PASSWORD=openslides
|
||||||
|
- POSTGRES_DB=openslides
|
||||||
|
volumes:
|
||||||
|
- ./postgresql.dev.conf:/etc/postgresql/postgresql.conf
|
||||||
|
command: postgres -c 'config_file=/etc/postgresql/postgresql.conf'
|
||||||
|
|
||||||
autoupdate:
|
autoupdate:
|
||||||
image: os3-autoupdate-dev
|
image: os3-autoupdate-dev
|
||||||
environment:
|
environment:
|
||||||
|
@ -158,7 +158,8 @@ services:
|
|||||||
- redis
|
- redis
|
||||||
- server
|
- server
|
||||||
environment:
|
environment:
|
||||||
MESSAGE_BUS_HOST: redis
|
REDIS_WRITE_HOST: redis
|
||||||
|
MESSAGE_BUS_HOST: redis-slave
|
||||||
WORKER_HOST: server
|
WORKER_HOST: server
|
||||||
networks:
|
networks:
|
||||||
- back
|
- back
|
||||||
|
691
docker/postgresql.dev.conf
Normal file
691
docker/postgresql.dev.conf
Normal file
@ -0,0 +1,691 @@
|
|||||||
|
# -----------------------------
|
||||||
|
# PostgreSQL configuration file
|
||||||
|
# -----------------------------
|
||||||
|
#
|
||||||
|
# This file consists of lines of the form:
|
||||||
|
#
|
||||||
|
# name = value
|
||||||
|
#
|
||||||
|
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
|
||||||
|
# "#" anywhere on a line. The complete list of parameter names and allowed
|
||||||
|
# values can be found in the PostgreSQL documentation.
|
||||||
|
#
|
||||||
|
# The commented-out settings shown in this file represent the default values.
|
||||||
|
# Re-commenting a setting is NOT sufficient to revert it to the default value;
|
||||||
|
# you need to reload the server.
|
||||||
|
#
|
||||||
|
# This file is read on server startup and when the server receives a SIGHUP
|
||||||
|
# signal. If you edit the file on a running system, you have to SIGHUP the
|
||||||
|
# server for the changes to take effect, run "pg_ctl reload", or execute
|
||||||
|
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
|
||||||
|
# require a server shutdown and restart to take effect.
|
||||||
|
#
|
||||||
|
# Any parameter can also be given as a command-line option to the server, e.g.,
|
||||||
|
# "postgres -c log_connections=on". Some parameters can be changed at run time
|
||||||
|
# with the "SET" SQL command.
|
||||||
|
#
|
||||||
|
# Memory units: kB = kilobytes Time units: ms = milliseconds
|
||||||
|
# MB = megabytes s = seconds
|
||||||
|
# GB = gigabytes min = minutes
|
||||||
|
# TB = terabytes h = hours
|
||||||
|
# d = days
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# FILE LOCATIONS
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# The default values of these variables are driven from the -D command-line
|
||||||
|
# option or PGDATA environment variable, represented here as ConfigDir.
|
||||||
|
|
||||||
|
#data_directory = 'ConfigDir' # use data in another directory
|
||||||
|
# (change requires restart)
|
||||||
|
#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
|
||||||
|
# (change requires restart)
|
||||||
|
#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
|
||||||
|
# (change requires restart)
|
||||||
|
|
||||||
|
# If external_pid_file is not explicitly set, no extra PID file is written.
|
||||||
|
#external_pid_file = '' # write an extra PID file
|
||||||
|
# (change requires restart)
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# CONNECTIONS AND AUTHENTICATION
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# - Connection Settings -
|
||||||
|
|
||||||
|
listen_addresses = '*'
|
||||||
|
# comma-separated list of addresses;
|
||||||
|
# defaults to 'localhost'; use '*' for all
|
||||||
|
# (change requires restart)
|
||||||
|
#port = 5432 # (change requires restart)
|
||||||
|
#max_connections = 100 # (change requires restart)
|
||||||
|
#superuser_reserved_connections = 3 # (change requires restart)
|
||||||
|
#unix_socket_directories = '/tmp' # comma-separated list of directories
|
||||||
|
# (change requires restart)
|
||||||
|
#unix_socket_group = '' # (change requires restart)
|
||||||
|
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
|
||||||
|
# (change requires restart)
|
||||||
|
#bonjour = off # advertise server via Bonjour
|
||||||
|
# (change requires restart)
|
||||||
|
#bonjour_name = '' # defaults to the computer name
|
||||||
|
# (change requires restart)
|
||||||
|
|
||||||
|
# - TCP Keepalives -
|
||||||
|
# see "man 7 tcp" for details
|
||||||
|
|
||||||
|
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
|
||||||
|
# 0 selects the system default
|
||||||
|
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
|
||||||
|
# 0 selects the system default
|
||||||
|
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
|
||||||
|
# 0 selects the system default
|
||||||
|
|
||||||
|
# - Authentication -
|
||||||
|
|
||||||
|
#authentication_timeout = 1min # 1s-600s
|
||||||
|
#password_encryption = md5 # md5 or scram-sha-256
|
||||||
|
#db_user_namespace = off
|
||||||
|
|
||||||
|
# GSSAPI using Kerberos
|
||||||
|
#krb_server_keyfile = ''
|
||||||
|
#krb_caseins_users = off
|
||||||
|
|
||||||
|
# - SSL -
|
||||||
|
|
||||||
|
#ssl = off
|
||||||
|
#ssl_ca_file = ''
|
||||||
|
#ssl_cert_file = 'server.crt'
|
||||||
|
#ssl_crl_file = ''
|
||||||
|
#ssl_key_file = 'server.key'
|
||||||
|
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
|
||||||
|
#ssl_prefer_server_ciphers = on
|
||||||
|
#ssl_ecdh_curve = 'prime256v1'
|
||||||
|
#ssl_dh_params_file = ''
|
||||||
|
#ssl_passphrase_command = ''
|
||||||
|
#ssl_passphrase_command_supports_reload = off
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# RESOURCE USAGE (except WAL)
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# - Memory -
|
||||||
|
|
||||||
|
#shared_buffers = 32MB # min 128kB
|
||||||
|
# (change requires restart)
|
||||||
|
#huge_pages = try # on, off, or try
|
||||||
|
# (change requires restart)
|
||||||
|
#temp_buffers = 8MB # min 800kB
|
||||||
|
#max_prepared_transactions = 0 # zero disables the feature
|
||||||
|
# (change requires restart)
|
||||||
|
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
|
||||||
|
# you actively intend to use prepared transactions.
|
||||||
|
#work_mem = 4MB # min 64kB
|
||||||
|
#maintenance_work_mem = 64MB # min 1MB
|
||||||
|
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
|
||||||
|
#max_stack_depth = 2MB # min 100kB
|
||||||
|
#dynamic_shared_memory_type = posix # the default is the first option
|
||||||
|
# supported by the operating system:
|
||||||
|
# posix
|
||||||
|
# sysv
|
||||||
|
# windows
|
||||||
|
# mmap
|
||||||
|
# use none to disable dynamic shared memory
|
||||||
|
# (change requires restart)
|
||||||
|
|
||||||
|
# - Disk -
|
||||||
|
|
||||||
|
#temp_file_limit = -1 # limits per-process temp file space
|
||||||
|
# in kB, or -1 for no limit
|
||||||
|
|
||||||
|
# - Kernel Resources -
|
||||||
|
|
||||||
|
#max_files_per_process = 1000 # min 25
|
||||||
|
# (change requires restart)
|
||||||
|
|
||||||
|
# - Cost-Based Vacuum Delay -
|
||||||
|
|
||||||
|
#vacuum_cost_delay = 0 # 0-100 milliseconds
|
||||||
|
#vacuum_cost_page_hit = 1 # 0-10000 credits
|
||||||
|
#vacuum_cost_page_miss = 10 # 0-10000 credits
|
||||||
|
#vacuum_cost_page_dirty = 20 # 0-10000 credits
|
||||||
|
#vacuum_cost_limit = 200 # 1-10000 credits
|
||||||
|
|
||||||
|
# - Background Writer -
|
||||||
|
|
||||||
|
#bgwriter_delay = 200ms # 10-10000ms between rounds
|
||||||
|
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
|
||||||
|
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
|
||||||
|
#bgwriter_flush_after = 0 # measured in pages, 0 disables
|
||||||
|
|
||||||
|
# - Asynchronous Behavior -
|
||||||
|
|
||||||
|
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
|
||||||
|
#max_worker_processes = 8 # (change requires restart)
|
||||||
|
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
|
||||||
|
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
|
||||||
|
#parallel_leader_participation = on
|
||||||
|
#max_parallel_workers = 8 # maximum number of max_worker_processes that
|
||||||
|
# can be used in parallel operations
|
||||||
|
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
|
||||||
|
# (change requires restart)
|
||||||
|
#backend_flush_after = 0 # measured in pages, 0 disables
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# WRITE-AHEAD LOG
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# - Settings -
|
||||||
|
|
||||||
|
#wal_level = replica # minimal, replica, or logical
|
||||||
|
# (change requires restart)
|
||||||
|
#fsync = on # flush data to disk for crash safety
|
||||||
|
# (turning this off can cause
|
||||||
|
# unrecoverable data corruption)
|
||||||
|
#synchronous_commit = on # synchronization level;
|
||||||
|
# off, local, remote_write, remote_apply, or on
|
||||||
|
#wal_sync_method = fsync # the default is the first option
|
||||||
|
# supported by the operating system:
|
||||||
|
# open_datasync
|
||||||
|
# fdatasync (default on Linux)
|
||||||
|
# fsync
|
||||||
|
# fsync_writethrough
|
||||||
|
# open_sync
|
||||||
|
#full_page_writes = on # recover from partial page writes
|
||||||
|
#wal_compression = off # enable compression of full-page writes
|
||||||
|
#wal_log_hints = off # also do full page writes of non-critical updates
|
||||||
|
# (change requires restart)
|
||||||
|
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
|
||||||
|
# (change requires restart)
|
||||||
|
#wal_writer_delay = 200ms # 1-10000 milliseconds
|
||||||
|
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
|
||||||
|
|
||||||
|
#commit_delay = 0 # range 0-100000, in microseconds
|
||||||
|
#commit_siblings = 5 # range 1-1000
|
||||||
|
|
||||||
|
# - Checkpoints -
|
||||||
|
|
||||||
|
#checkpoint_timeout = 5min # range 30s-1d
|
||||||
|
#max_wal_size = 1GB
|
||||||
|
#min_wal_size = 80MB
|
||||||
|
#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0
|
||||||
|
#checkpoint_flush_after = 0 # measured in pages, 0 disables
|
||||||
|
#checkpoint_warning = 30s # 0 disables
|
||||||
|
|
||||||
|
# - Archiving -
|
||||||
|
|
||||||
|
#archive_mode = off # enables archiving; off, on, or always
|
||||||
|
# (change requires restart)
|
||||||
|
#archive_command = '' # command to use to archive a logfile segment
|
||||||
|
# placeholders: %p = path of file to archive
|
||||||
|
# %f = file name only
|
||||||
|
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
|
||||||
|
#archive_timeout = 0 # force a logfile segment switch after this
|
||||||
|
# number of seconds; 0 disables
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# REPLICATION
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# - Sending Servers -
|
||||||
|
|
||||||
|
# Set these on the master and on any standby that will send replication data.
|
||||||
|
|
||||||
|
#max_wal_senders = 10 # max number of walsender processes
|
||||||
|
# (change requires restart)
|
||||||
|
#wal_keep_segments = 0 # in logfile segments; 0 disables
|
||||||
|
#wal_sender_timeout = 60s # in milliseconds; 0 disables
|
||||||
|
|
||||||
|
#max_replication_slots = 10 # max number of replication slots
|
||||||
|
# (change requires restart)
|
||||||
|
#track_commit_timestamp = off # collect timestamp of transaction commit
|
||||||
|
# (change requires restart)
|
||||||
|
|
||||||
|
# - Master Server -
|
||||||
|
|
||||||
|
# These settings are ignored on a standby server.
|
||||||
|
|
||||||
|
#synchronous_standby_names = '' # standby servers that provide sync rep
|
||||||
|
# method to choose sync standbys, number of sync standbys,
|
||||||
|
# and comma-separated list of application_name
|
||||||
|
# from standby(s); '*' = all
|
||||||
|
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
|
||||||
|
|
||||||
|
# - Standby Servers -
|
||||||
|
|
||||||
|
# These settings are ignored on a master server.
|
||||||
|
|
||||||
|
#hot_standby = on # "off" disallows queries during recovery
|
||||||
|
# (change requires restart)
|
||||||
|
#max_standby_archive_delay = 30s # max delay before canceling queries
|
||||||
|
# when reading WAL from archive;
|
||||||
|
# -1 allows indefinite delay
|
||||||
|
#max_standby_streaming_delay = 30s # max delay before canceling queries
|
||||||
|
# when reading streaming WAL;
|
||||||
|
# -1 allows indefinite delay
|
||||||
|
#wal_receiver_status_interval = 10s # send replies at least this often
|
||||||
|
# 0 disables
|
||||||
|
#hot_standby_feedback = off # send info from standby to prevent
|
||||||
|
# query conflicts
|
||||||
|
#wal_receiver_timeout = 60s # time that receiver waits for
|
||||||
|
# communication from master
|
||||||
|
# in milliseconds; 0 disables
|
||||||
|
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
|
||||||
|
# retrieve WAL after a failed attempt
|
||||||
|
|
||||||
|
# - Subscribers -
|
||||||
|
|
||||||
|
# These settings are ignored on a publisher.
|
||||||
|
|
||||||
|
#max_logical_replication_workers = 4 # taken from max_worker_processes
|
||||||
|
# (change requires restart)
|
||||||
|
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# QUERY TUNING
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# - Planner Method Configuration -
|
||||||
|
|
||||||
|
#enable_bitmapscan = on
|
||||||
|
#enable_hashagg = on
|
||||||
|
#enable_hashjoin = on
|
||||||
|
#enable_indexscan = on
|
||||||
|
#enable_indexonlyscan = on
|
||||||
|
#enable_material = on
|
||||||
|
#enable_mergejoin = on
|
||||||
|
#enable_nestloop = on
|
||||||
|
#enable_parallel_append = on
|
||||||
|
#enable_seqscan = on
|
||||||
|
#enable_sort = on
|
||||||
|
#enable_tidscan = on
|
||||||
|
#enable_partitionwise_join = off
|
||||||
|
#enable_partitionwise_aggregate = off
|
||||||
|
#enable_parallel_hash = on
|
||||||
|
#enable_partition_pruning = on
|
||||||
|
|
||||||
|
# - Planner Cost Constants -
|
||||||
|
|
||||||
|
#seq_page_cost = 1.0 # measured on an arbitrary scale
|
||||||
|
#random_page_cost = 4.0 # same scale as above
|
||||||
|
#cpu_tuple_cost = 0.01 # same scale as above
|
||||||
|
#cpu_index_tuple_cost = 0.005 # same scale as above
|
||||||
|
#cpu_operator_cost = 0.0025 # same scale as above
|
||||||
|
#parallel_tuple_cost = 0.1 # same scale as above
|
||||||
|
#parallel_setup_cost = 1000.0 # same scale as above
|
||||||
|
|
||||||
|
#jit_above_cost = 100000 # perform JIT compilation if available
|
||||||
|
# and query more expensive than this;
|
||||||
|
# -1 disables
|
||||||
|
#jit_inline_above_cost = 500000 # inline small functions if query is
|
||||||
|
# more expensive than this; -1 disables
|
||||||
|
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
|
||||||
|
# query is more expensive than this;
|
||||||
|
# -1 disables
|
||||||
|
|
||||||
|
#min_parallel_table_scan_size = 8MB
|
||||||
|
#min_parallel_index_scan_size = 512kB
|
||||||
|
#effective_cache_size = 4GB
|
||||||
|
|
||||||
|
# - Genetic Query Optimizer -
|
||||||
|
|
||||||
|
#geqo = on
|
||||||
|
#geqo_threshold = 12
|
||||||
|
#geqo_effort = 5 # range 1-10
|
||||||
|
#geqo_pool_size = 0 # selects default based on effort
|
||||||
|
#geqo_generations = 0 # selects default based on effort
|
||||||
|
#geqo_selection_bias = 2.0 # range 1.5-2.0
|
||||||
|
#geqo_seed = 0.0 # range 0.0-1.0
|
||||||
|
|
||||||
|
# - Other Planner Options -
|
||||||
|
|
||||||
|
#default_statistics_target = 100 # range 1-10000
|
||||||
|
#constraint_exclusion = partition # on, off, or partition
|
||||||
|
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
|
||||||
|
#from_collapse_limit = 8
|
||||||
|
#join_collapse_limit = 8 # 1 disables collapsing of explicit
|
||||||
|
# JOIN clauses
|
||||||
|
#force_parallel_mode = off
|
||||||
|
#jit = off # allow JIT compilation
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# REPORTING AND LOGGING
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# - Where to Log -
|
||||||
|
|
||||||
|
#log_destination = 'stderr' # Valid values are combinations of
|
||||||
|
# stderr, csvlog, syslog, and eventlog,
|
||||||
|
# depending on platform. csvlog
|
||||||
|
# requires logging_collector to be on.
|
||||||
|
|
||||||
|
# This is used when logging to stderr:
|
||||||
|
#logging_collector = off # Enable capturing of stderr and csvlog
|
||||||
|
# into log files. Required to be on for
|
||||||
|
# csvlogs.
|
||||||
|
# (change requires restart)
|
||||||
|
|
||||||
|
# These are only used if logging_collector is on:
|
||||||
|
#log_directory = 'log' # directory where log files are written,
|
||||||
|
# can be absolute or relative to PGDATA
|
||||||
|
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
|
||||||
|
# can include strftime() escapes
|
||||||
|
#log_file_mode = 0600 # creation mode for log files,
|
||||||
|
# begin with 0 to use octal notation
|
||||||
|
#log_truncate_on_rotation = off # If on, an existing log file with the
|
||||||
|
# same name as the new log file will be
|
||||||
|
# truncated rather than appended to.
|
||||||
|
# But such truncation only occurs on
|
||||||
|
# time-driven rotation, not on restarts
|
||||||
|
# or size-driven rotation. Default is
|
||||||
|
# off, meaning append to existing files
|
||||||
|
# in all cases.
|
||||||
|
#log_rotation_age = 1d # Automatic rotation of logfiles will
|
||||||
|
# happen after that time. 0 disables.
|
||||||
|
#log_rotation_size = 10MB # Automatic rotation of logfiles will
|
||||||
|
# happen after that much log output.
|
||||||
|
# 0 disables.
|
||||||
|
|
||||||
|
# These are relevant when logging to syslog:
|
||||||
|
#syslog_facility = 'LOCAL0'
|
||||||
|
#syslog_ident = 'postgres'
|
||||||
|
#syslog_sequence_numbers = on
|
||||||
|
#syslog_split_messages = on
|
||||||
|
|
||||||
|
# This is only relevant when logging to eventlog (win32):
|
||||||
|
# (change requires restart)
|
||||||
|
#event_source = 'PostgreSQL'
|
||||||
|
|
||||||
|
# - When to Log -
|
||||||
|
|
||||||
|
log_min_messages = info # values in order of decreasing detail:
|
||||||
|
# debug5
|
||||||
|
# debug4
|
||||||
|
# debug3
|
||||||
|
# debug2
|
||||||
|
# debug1
|
||||||
|
# info
|
||||||
|
# notice
|
||||||
|
# warning
|
||||||
|
# error
|
||||||
|
# log
|
||||||
|
# fatal
|
||||||
|
# panic
|
||||||
|
|
||||||
|
#log_min_error_statement = error # values in order of decreasing detail:
|
||||||
|
# debug5
|
||||||
|
# debug4
|
||||||
|
# debug3
|
||||||
|
# debug2
|
||||||
|
# debug1
|
||||||
|
# info
|
||||||
|
# notice
|
||||||
|
# warning
|
||||||
|
# error
|
||||||
|
# log
|
||||||
|
# fatal
|
||||||
|
# panic (effectively off)
|
||||||
|
|
||||||
|
log_min_duration_statement = 0 # -1 is disabled, 0 logs all statements
|
||||||
|
# and their durations, > 0 logs only
|
||||||
|
# statements running at least this number
|
||||||
|
# of milliseconds
|
||||||
|
|
||||||
|
|
||||||
|
# - What to Log -
|
||||||
|
|
||||||
|
#debug_print_parse = off
|
||||||
|
#debug_print_rewritten = off
|
||||||
|
#debug_print_plan = off
|
||||||
|
#debug_pretty_print = on
|
||||||
|
#log_checkpoints = off
|
||||||
|
#log_connections = off
|
||||||
|
#log_disconnections = off
|
||||||
|
#log_duration = off
|
||||||
|
#log_error_verbosity = default # terse, default, or verbose messages
|
||||||
|
#log_hostname = off
|
||||||
|
#log_line_prefix = '%m [%p] ' # special values:
|
||||||
|
# %a = application name
|
||||||
|
# %u = user name
|
||||||
|
# %d = database name
|
||||||
|
# %r = remote host and port
|
||||||
|
# %h = remote host
|
||||||
|
# %p = process ID
|
||||||
|
# %t = timestamp without milliseconds
|
||||||
|
# %m = timestamp with milliseconds
|
||||||
|
# %n = timestamp with milliseconds (as a Unix epoch)
|
||||||
|
# %i = command tag
|
||||||
|
# %e = SQL state
|
||||||
|
# %c = session ID
|
||||||
|
# %l = session line number
|
||||||
|
# %s = session start timestamp
|
||||||
|
# %v = virtual transaction ID
|
||||||
|
# %x = transaction ID (0 if none)
|
||||||
|
# %q = stop here in non-session
|
||||||
|
# processes
|
||||||
|
# %% = '%'
|
||||||
|
# e.g. '<%u%%%d> '
|
||||||
|
#log_lock_waits = off # log lock waits >= deadlock_timeout
|
||||||
|
log_statement = 'all' # none, ddl, mod, all
|
||||||
|
#log_replication_commands = off
|
||||||
|
#log_temp_files = -1 # log temporary files equal or larger
|
||||||
|
# than the specified size in kilobytes;
|
||||||
|
# -1 disables, 0 logs all temp files
|
||||||
|
#log_timezone = 'GMT'
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# PROCESS TITLE
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#cluster_name = '' # added to process titles if nonempty
|
||||||
|
# (change requires restart)
|
||||||
|
#update_process_title = on
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# STATISTICS
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# - Query and Index Statistics Collector -
|
||||||
|
|
||||||
|
#track_activities = on
|
||||||
|
#track_counts = on
|
||||||
|
#track_io_timing = off
|
||||||
|
#track_functions = none # none, pl, all
|
||||||
|
#track_activity_query_size = 1024 # (change requires restart)
|
||||||
|
#stats_temp_directory = 'pg_stat_tmp'
|
||||||
|
|
||||||
|
|
||||||
|
# - Monitoring -
|
||||||
|
|
||||||
|
#log_parser_stats = off
|
||||||
|
#log_planner_stats = off
|
||||||
|
#log_executor_stats = off
|
||||||
|
#log_statement_stats = off
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# AUTOVACUUM
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#autovacuum = on # Enable autovacuum subprocess? 'on'
|
||||||
|
# requires track_counts to also be on.
|
||||||
|
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
|
||||||
|
# their durations, > 0 logs only
|
||||||
|
# actions running at least this number
|
||||||
|
# of milliseconds.
|
||||||
|
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
|
||||||
|
# (change requires restart)
|
||||||
|
#autovacuum_naptime = 1min # time between autovacuum runs
|
||||||
|
#autovacuum_vacuum_threshold = 50 # min number of row updates before
|
||||||
|
# vacuum
|
||||||
|
#autovacuum_analyze_threshold = 50 # min number of row updates before
|
||||||
|
# analyze
|
||||||
|
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
|
||||||
|
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
|
||||||
|
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
|
||||||
|
# (change requires restart)
|
||||||
|
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
|
||||||
|
# before forced vacuum
|
||||||
|
# (change requires restart)
|
||||||
|
#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for
|
||||||
|
# autovacuum, in milliseconds;
|
||||||
|
# -1 means use vacuum_cost_delay
|
||||||
|
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
|
||||||
|
# autovacuum, -1 means use
|
||||||
|
# vacuum_cost_limit
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# CLIENT CONNECTION DEFAULTS
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# - Statement Behavior -
|
||||||
|
|
||||||
|
#client_min_messages = notice # values in order of decreasing detail:
|
||||||
|
# debug5
|
||||||
|
# debug4
|
||||||
|
# debug3
|
||||||
|
# debug2
|
||||||
|
# debug1
|
||||||
|
# log
|
||||||
|
# notice
|
||||||
|
# warning
|
||||||
|
# error
|
||||||
|
#search_path = '"$user", public' # schema names
|
||||||
|
#row_security = on
|
||||||
|
#default_tablespace = '' # a tablespace name, '' uses the default
|
||||||
|
#temp_tablespaces = '' # a list of tablespace names, '' uses
|
||||||
|
# only default tablespace
|
||||||
|
#check_function_bodies = on
|
||||||
|
#default_transaction_isolation = 'read committed'
|
||||||
|
#default_transaction_read_only = off
|
||||||
|
#default_transaction_deferrable = off
|
||||||
|
#session_replication_role = 'origin'
|
||||||
|
#statement_timeout = 0 # in milliseconds, 0 is disabled
|
||||||
|
#lock_timeout = 0 # in milliseconds, 0 is disabled
|
||||||
|
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
|
||||||
|
#vacuum_freeze_min_age = 50000000
|
||||||
|
#vacuum_freeze_table_age = 150000000
|
||||||
|
#vacuum_multixact_freeze_min_age = 5000000
|
||||||
|
#vacuum_multixact_freeze_table_age = 150000000
|
||||||
|
#vacuum_cleanup_index_scale_factor = 0.1 # fraction of total number of tuples
|
||||||
|
# before index cleanup, 0 always performs
|
||||||
|
# index cleanup
|
||||||
|
#bytea_output = 'hex' # hex, escape
|
||||||
|
#xmlbinary = 'base64'
|
||||||
|
#xmloption = 'content'
|
||||||
|
#gin_fuzzy_search_limit = 0
|
||||||
|
#gin_pending_list_limit = 4MB
|
||||||
|
|
||||||
|
# - Locale and Formatting -
|
||||||
|
|
||||||
|
#datestyle = 'iso, mdy'
|
||||||
|
#intervalstyle = 'postgres'
|
||||||
|
#timezone = 'GMT'
|
||||||
|
#timezone_abbreviations = 'Default' # Select the set of available time zone
|
||||||
|
# abbreviations. Currently, there are
|
||||||
|
# Default
|
||||||
|
# Australia (historical usage)
|
||||||
|
# India
|
||||||
|
# You can create your own file in
|
||||||
|
# share/timezonesets/.
|
||||||
|
#extra_float_digits = 0 # min -15, max 3
|
||||||
|
#client_encoding = sql_ascii # actually, defaults to database
|
||||||
|
# encoding
|
||||||
|
|
||||||
|
# These settings are initialized by initdb, but they can be changed.
|
||||||
|
#lc_messages = 'C' # locale for system error message
|
||||||
|
# strings
|
||||||
|
#lc_monetary = 'C' # locale for monetary formatting
|
||||||
|
#lc_numeric = 'C' # locale for number formatting
|
||||||
|
#lc_time = 'C' # locale for time formatting
|
||||||
|
|
||||||
|
# default configuration for text search
|
||||||
|
#default_text_search_config = 'pg_catalog.simple'
|
||||||
|
|
||||||
|
# - Shared Library Preloading -
|
||||||
|
|
||||||
|
#shared_preload_libraries = '' # (change requires restart)
|
||||||
|
#local_preload_libraries = ''
|
||||||
|
#session_preload_libraries = ''
|
||||||
|
#jit_provider = 'llvmjit' # JIT library to use
|
||||||
|
|
||||||
|
# - Other Defaults -
|
||||||
|
|
||||||
|
#dynamic_library_path = '$libdir'
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# LOCK MANAGEMENT
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#deadlock_timeout = 1s
|
||||||
|
#max_locks_per_transaction = 64 # min 10
|
||||||
|
# (change requires restart)
|
||||||
|
#max_pred_locks_per_transaction = 64 # min 10
|
||||||
|
# (change requires restart)
|
||||||
|
#max_pred_locks_per_relation = -2 # negative values mean
|
||||||
|
# (max_pred_locks_per_transaction
|
||||||
|
# / -max_pred_locks_per_relation) - 1
|
||||||
|
#max_pred_locks_per_page = 2 # min 0
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# VERSION AND PLATFORM COMPATIBILITY
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# - Previous PostgreSQL Versions -
|
||||||
|
|
||||||
|
#array_nulls = on
|
||||||
|
#backslash_quote = safe_encoding # on, off, or safe_encoding
|
||||||
|
#default_with_oids = off
|
||||||
|
#escape_string_warning = on
|
||||||
|
#lo_compat_privileges = off
|
||||||
|
#operator_precedence_warning = off
|
||||||
|
#quote_all_identifiers = off
|
||||||
|
#standard_conforming_strings = on
|
||||||
|
#synchronize_seqscans = on
|
||||||
|
|
||||||
|
# - Other Platforms and Clients -
|
||||||
|
|
||||||
|
#transform_null_equals = off
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# ERROR HANDLING
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#exit_on_error = off # terminate session on any error?
|
||||||
|
#restart_after_crash = on # reinitialize after backend crash?
|
||||||
|
#data_sync_retry = off # retry or panic on failure to fsync
|
||||||
|
# data?
|
||||||
|
# (change requires restart)
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# CONFIG FILE INCLUDES
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# These options allow settings to be loaded from files other than the
|
||||||
|
# default postgresql.conf. Note that these are directives, not variable
|
||||||
|
# assignments, so they can usefully be given more than once.
|
||||||
|
|
||||||
|
#include_dir = '...' # include files ending in '.conf' from
|
||||||
|
# a directory, e.g., 'conf.d'
|
||||||
|
#include_if_exists = '...' # include file only if it exists
|
||||||
|
#include = '...' # include file
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# CUSTOMIZED OPTIONS
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Add settings for extensions here
|
@ -10,6 +10,7 @@ RUN apt-get -y update && apt-get install --no-install-recommends -y \
|
|||||||
wait-for-it \
|
wait-for-it \
|
||||||
gcc \
|
gcc \
|
||||||
git \
|
git \
|
||||||
|
vim \
|
||||||
libxml2-dev \
|
libxml2-dev \
|
||||||
libxmlsec1-dev \
|
libxmlsec1-dev \
|
||||||
libxmlsec1-openssl \
|
libxmlsec1-openssl \
|
||||||
@ -27,4 +28,4 @@ EXPOSE 8000
|
|||||||
COPY docker/entrypoint-dev /usr/local/sbin/
|
COPY docker/entrypoint-dev /usr/local/sbin/
|
||||||
COPY . .
|
COPY . .
|
||||||
ENTRYPOINT ["/usr/local/sbin/entrypoint-dev"]
|
ENTRYPOINT ["/usr/local/sbin/entrypoint-dev"]
|
||||||
CMD ["python", "manage.py", "runserver", "0.0.0.0:8000"]
|
CMD ["python", "-u", "manage.py", "runserver", "0.0.0.0:8000"]
|
||||||
|
@ -39,7 +39,7 @@ wait-for-it redis:6379
|
|||||||
wait-for-it redis-slave:6379
|
wait-for-it redis-slave:6379
|
||||||
|
|
||||||
echo 'running migrations'
|
echo 'running migrations'
|
||||||
python manage.py migrate
|
python -u manage.py migrate
|
||||||
|
|
||||||
# Admin
|
# Admin
|
||||||
if [[ -f /run/secrets/os_admin ]]; then
|
if [[ -f /run/secrets/os_admin ]]; then
|
||||||
|
@ -4,6 +4,11 @@ set -e
|
|||||||
|
|
||||||
wait-for-it -t 0 redis:6379
|
wait-for-it -t 0 redis:6379
|
||||||
|
|
||||||
|
until pg_isready -h postgres -p 5432 -U openslides; do
|
||||||
|
echo "Waiting for Postgres to become available..."
|
||||||
|
sleep 3
|
||||||
|
done
|
||||||
|
|
||||||
if [[ ! -f "/app/personal_data/var/settings.py" ]]; then
|
if [[ ! -f "/app/personal_data/var/settings.py" ]]; then
|
||||||
echo "Create settings"
|
echo "Create settings"
|
||||||
python manage.py createsettings
|
python manage.py createsettings
|
||||||
|
@ -83,6 +83,8 @@ DATABASES = {
|
|||||||
"PASSWORD": get_env("DATABASE_PASSWORD", "openslides"),
|
"PASSWORD": get_env("DATABASE_PASSWORD", "openslides"),
|
||||||
"HOST": get_env("DATABASE_HOST", "db"),
|
"HOST": get_env("DATABASE_HOST", "db"),
|
||||||
"PORT": get_env("DATABASE_PORT", "5432"),
|
"PORT": get_env("DATABASE_PORT", "5432"),
|
||||||
|
"USE_TZ": False, # Requires postgresql to have UTC set as default
|
||||||
|
"DISABLE_SERVER_SIDE_CURSORS": True,
|
||||||
},
|
},
|
||||||
"mediafiles": {
|
"mediafiles": {
|
||||||
"ENGINE": "django.db.backends.postgresql",
|
"ENGINE": "django.db.backends.postgresql",
|
||||||
|
@ -11,6 +11,7 @@ from openslides.utils.cache import element_cache, get_element_id
|
|||||||
from openslides.utils.locking import locking
|
from openslides.utils.locking import locking
|
||||||
from openslides.utils.manager import BaseManager
|
from openslides.utils.manager import BaseManager
|
||||||
from openslides.utils.models import SET_NULL_AND_AUTOUPDATE, RESTModelMixin
|
from openslides.utils.models import SET_NULL_AND_AUTOUPDATE, RESTModelMixin
|
||||||
|
from openslides.utils.postgres import is_postgres
|
||||||
|
|
||||||
from .access_permissions import (
|
from .access_permissions import (
|
||||||
ConfigAccessPermissions,
|
ConfigAccessPermissions,
|
||||||
@ -268,31 +269,60 @@ class HistoryManager(BaseManager):
|
|||||||
"""
|
"""
|
||||||
Method to add elements to the history. This does not trigger autoupdate.
|
Method to add elements to the history. This does not trigger autoupdate.
|
||||||
"""
|
"""
|
||||||
|
history_time = now()
|
||||||
|
elements = [
|
||||||
|
element for element in elements if not element.get("disable_history", False)
|
||||||
|
]
|
||||||
|
|
||||||
with transaction.atomic():
|
with transaction.atomic():
|
||||||
instances = []
|
if is_postgres():
|
||||||
history_time = now()
|
return self._add_elements_postgres(elements, history_time)
|
||||||
for element in elements:
|
else:
|
||||||
if element.get("disable_history"):
|
return self._add_elements_other_dbs(elements, history_time)
|
||||||
# Do not update history if history is disabled.
|
|
||||||
continue
|
def _add_elements_postgres(self, elements, history_time):
|
||||||
# HistoryData is not a root rest element so there is no autoupdate and not history saving here.
|
"""
|
||||||
data = HistoryData.objects.create(full_data=element.get("full_data"))
|
Postgres supports returning ids from bulk requests, so after doing `bulk_create`
|
||||||
instance = self.model(
|
every HistoryData has an id. This can be used to bulk_create History-Models in a
|
||||||
element_id=get_element_id(
|
second step.
|
||||||
element["collection_string"], element["id"]
|
"""
|
||||||
),
|
history_data = [
|
||||||
now=history_time,
|
HistoryData(full_data=element.get("full_data")) for element in elements
|
||||||
information=element.get("information", []),
|
]
|
||||||
user_id=element.get("user_id"),
|
HistoryData.objects.bulk_create(history_data)
|
||||||
full_data=data,
|
|
||||||
)
|
history_entries = [
|
||||||
instance.save()
|
self.model(
|
||||||
instances.append(instance)
|
element_id=get_element_id(element["collection_string"], element["id"]),
|
||||||
return instances
|
now=history_time,
|
||||||
|
information=element.get("information", []),
|
||||||
|
user_id=element.get("user_id"),
|
||||||
|
full_data_id=hd.id,
|
||||||
|
)
|
||||||
|
for element, hd in zip(elements, history_data)
|
||||||
|
]
|
||||||
|
self.bulk_create(history_entries)
|
||||||
|
return history_entries
|
||||||
|
|
||||||
|
def _add_elements_other_dbs(self, elements, history_time):
|
||||||
|
history_entries = []
|
||||||
|
for element in elements:
|
||||||
|
# HistoryData is not a root rest element so there is no autoupdate and not history saving here.
|
||||||
|
data = HistoryData.objects.create(full_data=element.get("full_data"))
|
||||||
|
instance = self.model(
|
||||||
|
element_id=get_element_id(element["collection_string"], element["id"]),
|
||||||
|
now=history_time,
|
||||||
|
information=element.get("information", []),
|
||||||
|
user_id=element.get("user_id"),
|
||||||
|
full_data=data,
|
||||||
|
)
|
||||||
|
instance.save()
|
||||||
|
history_entries.append(instance)
|
||||||
|
return history_entries
|
||||||
|
|
||||||
def build_history(self):
|
def build_history(self):
|
||||||
"""
|
"""
|
||||||
Method to add all cachables to the history.
|
Method to add all cacheables to the history.
|
||||||
"""
|
"""
|
||||||
async_to_sync(self.async_build_history)()
|
async_to_sync(self.async_build_history)()
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ INSTALLED_APPS = [
|
|||||||
"openslides.mediafiles",
|
"openslides.mediafiles",
|
||||||
]
|
]
|
||||||
|
|
||||||
INSTALLED_PLUGINS = collect_plugins() # Adds all automaticly collected plugins
|
INSTALLED_PLUGINS = collect_plugins() # Adds all automatically collected plugins
|
||||||
|
|
||||||
MIDDLEWARE = [
|
MIDDLEWARE = [
|
||||||
"django.middleware.security.SecurityMiddleware",
|
"django.middleware.security.SecurityMiddleware",
|
||||||
@ -33,7 +33,7 @@ MIDDLEWARE = [
|
|||||||
"django.middleware.csrf.CsrfViewMiddleware",
|
"django.middleware.csrf.CsrfViewMiddleware",
|
||||||
"django.contrib.auth.middleware.AuthenticationMiddleware",
|
"django.contrib.auth.middleware.AuthenticationMiddleware",
|
||||||
"django.middleware.clickjacking.XFrameOptionsMiddleware",
|
"django.middleware.clickjacking.XFrameOptionsMiddleware",
|
||||||
"openslides.utils.autoupdate.AutoupdateBundleMiddleware",
|
"openslides.utils.autoupdate_bundle.AutoupdateBundleMiddleware",
|
||||||
]
|
]
|
||||||
|
|
||||||
ROOT_URLCONF = "openslides.urls"
|
ROOT_URLCONF = "openslides.urls"
|
||||||
|
@ -1260,15 +1260,18 @@ class MotionPollViewSet(BasePollViewSet):
|
|||||||
self, data, poll, weight_user, vote_user, request_user
|
self, data, poll, weight_user, vote_user, request_user
|
||||||
):
|
):
|
||||||
option = poll.options.get()
|
option = poll.options.get()
|
||||||
vote = MotionVote.objects.create(
|
weight = (
|
||||||
user=vote_user, delegated_user=request_user, option=option
|
|
||||||
)
|
|
||||||
vote.value = data
|
|
||||||
vote.weight = (
|
|
||||||
weight_user.vote_weight
|
weight_user.vote_weight
|
||||||
if config["users_activate_vote_weight"]
|
if config["users_activate_vote_weight"]
|
||||||
else Decimal(1)
|
else Decimal(1)
|
||||||
)
|
)
|
||||||
|
vote = MotionVote(
|
||||||
|
user=vote_user,
|
||||||
|
delegated_user=request_user,
|
||||||
|
option=option,
|
||||||
|
value=data,
|
||||||
|
weight=weight,
|
||||||
|
)
|
||||||
vote.save(no_delete_on_restriction=True)
|
vote.save(no_delete_on_restriction=True)
|
||||||
inform_changed_data(option)
|
inform_changed_data(option)
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@ from django.db.utils import IntegrityError
|
|||||||
from rest_framework import status
|
from rest_framework import status
|
||||||
|
|
||||||
from openslides.utils.auth import in_some_groups
|
from openslides.utils.auth import in_some_groups
|
||||||
from openslides.utils.autoupdate import inform_changed_data
|
from openslides.utils.autoupdate import disable_history, inform_changed_data
|
||||||
from openslides.utils.rest_api import (
|
from openslides.utils.rest_api import (
|
||||||
DecimalField,
|
DecimalField,
|
||||||
GenericViewSet,
|
GenericViewSet,
|
||||||
@ -158,10 +158,14 @@ class BasePollViewSet(ModelViewSet):
|
|||||||
poll.state = BasePoll.STATE_PUBLISHED
|
poll.state = BasePoll.STATE_PUBLISHED
|
||||||
poll.save()
|
poll.save()
|
||||||
inform_changed_data(
|
inform_changed_data(
|
||||||
(vote.user for vote in poll.get_votes().all() if vote.user), final_data=True
|
(
|
||||||
|
vote.user
|
||||||
|
for vote in poll.get_votes().prefetch_related("user").all()
|
||||||
|
if vote.user
|
||||||
|
)
|
||||||
)
|
)
|
||||||
inform_changed_data(poll.get_votes(), final_data=True)
|
inform_changed_data(poll.get_votes())
|
||||||
inform_changed_data(poll.get_options(), final_data=True)
|
inform_changed_data(poll.get_options())
|
||||||
return Response()
|
return Response()
|
||||||
|
|
||||||
@detail_route(methods=["POST"])
|
@detail_route(methods=["POST"])
|
||||||
@ -194,6 +198,9 @@ class BasePollViewSet(ModelViewSet):
|
|||||||
"""
|
"""
|
||||||
poll = self.get_object()
|
poll = self.get_object()
|
||||||
|
|
||||||
|
# Disable history for these requests
|
||||||
|
disable_history()
|
||||||
|
|
||||||
if isinstance(request.user, AnonymousUser):
|
if isinstance(request.user, AnonymousUser):
|
||||||
self.permission_denied(request)
|
self.permission_denied(request)
|
||||||
|
|
||||||
@ -205,7 +212,11 @@ class BasePollViewSet(ModelViewSet):
|
|||||||
if "data" not in data:
|
if "data" not in data:
|
||||||
raise ValidationError({"detail": "No data provided."})
|
raise ValidationError({"detail": "No data provided."})
|
||||||
vote_data = data["data"]
|
vote_data = data["data"]
|
||||||
if "user_id" in data and poll.type != BasePoll.TYPE_ANALOG:
|
if (
|
||||||
|
"user_id" in data
|
||||||
|
and data["user_id"] != request.user.id
|
||||||
|
and poll.type != BasePoll.TYPE_ANALOG
|
||||||
|
):
|
||||||
try:
|
try:
|
||||||
vote_user = get_user_model().objects.get(pk=data["user_id"])
|
vote_user = get_user_model().objects.get(pk=data["user_id"])
|
||||||
except get_user_model().DoesNotExist:
|
except get_user_model().DoesNotExist:
|
||||||
@ -241,9 +252,9 @@ class BasePollViewSet(ModelViewSet):
|
|||||||
@transaction.atomic
|
@transaction.atomic
|
||||||
def refresh(self, request, pk):
|
def refresh(self, request, pk):
|
||||||
poll = self.get_object()
|
poll = self.get_object()
|
||||||
inform_changed_data(poll, final_data=True)
|
inform_changed_data(poll)
|
||||||
inform_changed_data(poll.get_options(), final_data=True)
|
inform_changed_data(poll.get_options())
|
||||||
inform_changed_data(poll.get_votes(), final_data=True)
|
inform_changed_data(poll.get_votes())
|
||||||
return Response()
|
return Response()
|
||||||
|
|
||||||
def assert_can_vote(self, poll, request, vote_user):
|
def assert_can_vote(self, poll, request, vote_user):
|
||||||
|
@ -0,0 +1,28 @@
|
|||||||
|
# Generated by Django 2.2.17 on 2021-01-20 14:33
|
||||||
|
|
||||||
|
from django.db import migrations
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
("users", "0015_user_vote_delegated_to"),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AlterModelOptions(
|
||||||
|
name="user",
|
||||||
|
options={
|
||||||
|
"default_permissions": (),
|
||||||
|
"permissions": (
|
||||||
|
("can_see_name", "Can see names of users"),
|
||||||
|
(
|
||||||
|
"can_see_extra_data",
|
||||||
|
"Can see extra data of users (e.g. email and comment)",
|
||||||
|
),
|
||||||
|
("can_change_password", "Can change its own password"),
|
||||||
|
("can_manage", "Can manage users"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
]
|
@ -193,7 +193,6 @@ class User(RESTModelMixin, PermissionsMixin, AbstractBaseUser):
|
|||||||
("can_change_password", "Can change its own password"),
|
("can_change_password", "Can change its own password"),
|
||||||
("can_manage", "Can manage users"),
|
("can_manage", "Can manage users"),
|
||||||
)
|
)
|
||||||
ordering = ("last_name", "first_name", "username")
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
# Strip white spaces from the name parts
|
# Strip white spaces from the name parts
|
||||||
|
@ -0,0 +1,11 @@
|
|||||||
|
# overwrite the builtin print to always flush
|
||||||
|
def flushprint(*args, **kwargs): # type: ignore
|
||||||
|
if "flush" not in kwargs:
|
||||||
|
kwargs["flush"] = True
|
||||||
|
|
||||||
|
__builtins__["oldprint"](*args, **kwargs) # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
if "oldprint" not in __builtins__: # type: ignore
|
||||||
|
__builtins__["oldprint"] = __builtins__["print"] # type: ignore
|
||||||
|
__builtins__["print"] = flushprint # type: ignore
|
@ -1,17 +1,13 @@
|
|||||||
import json
|
|
||||||
import threading
|
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
|
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
|
||||||
|
|
||||||
from asgiref.sync import async_to_sync
|
|
||||||
from django.db.models import Model
|
from django.db.models import Model
|
||||||
from mypy_extensions import TypedDict
|
from mypy_extensions import TypedDict
|
||||||
|
|
||||||
from .auth import UserDoesNotExist
|
from .auth import UserDoesNotExist
|
||||||
from .cache import ChangeIdTooLowError, element_cache, get_element_id
|
from .autoupdate_bundle import AutoupdateElement, autoupdate_bundle
|
||||||
from .stream import stream
|
from .cache import ChangeIdTooLowError, element_cache
|
||||||
from .timing import Timing
|
from .utils import is_iterable, split_element_id
|
||||||
from .utils import get_model_from_collection_string, is_iterable, split_element_id
|
|
||||||
|
|
||||||
|
|
||||||
AutoupdateFormat = TypedDict(
|
AutoupdateFormat = TypedDict(
|
||||||
@ -26,131 +22,10 @@ AutoupdateFormat = TypedDict(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class AutoupdateElementBase(TypedDict):
|
def disable_history() -> None:
|
||||||
id: int
|
""""""
|
||||||
collection_string: str
|
with autoupdate_bundle() as bundle:
|
||||||
|
bundle.disable_history()
|
||||||
|
|
||||||
class AutoupdateElement(AutoupdateElementBase, total=False):
|
|
||||||
"""
|
|
||||||
Data container to handle one root rest element for the autoupdate, history
|
|
||||||
and caching process.
|
|
||||||
|
|
||||||
The fields `id` and `collection_string` are required to identify the element. All
|
|
||||||
other fields are optional:
|
|
||||||
|
|
||||||
full_data: If a value is given (dict or None), it won't be loaded from the DB.
|
|
||||||
If otherwise no value is given, the AutoupdateBundle will try to resolve the object
|
|
||||||
from the DB and serialize it into the full_data.
|
|
||||||
|
|
||||||
information and user_id: These fields are for the history indicating what and who
|
|
||||||
made changes.
|
|
||||||
|
|
||||||
disable_history: If this is True, the element (and the containing full_data) won't
|
|
||||||
be saved into the history. Information and user_id is then irrelevant.
|
|
||||||
|
|
||||||
no_delete_on_restriction is a flag, which is saved into the models in the cache
|
|
||||||
as the _no_delete_on_restriction key. If this is true, there should neither be an
|
|
||||||
entry for one specific model in the changed *nor the deleted* part of the
|
|
||||||
autoupdate, if the model was restricted.
|
|
||||||
"""
|
|
||||||
|
|
||||||
information: List[str]
|
|
||||||
user_id: Optional[int]
|
|
||||||
disable_history: bool
|
|
||||||
no_delete_on_restriction: bool
|
|
||||||
full_data: Optional[Dict[str, Any]]
|
|
||||||
|
|
||||||
|
|
||||||
class AutoupdateBundle:
|
|
||||||
"""
|
|
||||||
Collects changed elements via inform*_data. After the collecting-step is finished,
|
|
||||||
the bundle releases all changes to the history and element cache via `.done()`.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self) -> None:
|
|
||||||
self.autoupdate_elements: Dict[str, Dict[int, AutoupdateElement]] = defaultdict(
|
|
||||||
dict
|
|
||||||
)
|
|
||||||
|
|
||||||
def add(self, elements: Iterable[AutoupdateElement]) -> None:
|
|
||||||
""" Adds the elements to the bundle """
|
|
||||||
for element in elements:
|
|
||||||
self.autoupdate_elements[element["collection_string"]][
|
|
||||||
element["id"]
|
|
||||||
] = element
|
|
||||||
|
|
||||||
def done(self) -> Optional[int]:
|
|
||||||
"""
|
|
||||||
Finishes the bundle by resolving all missing data and passing it to
|
|
||||||
the history and element cache.
|
|
||||||
|
|
||||||
Returns the change id, if there are autoupdate elements. Otherwise none.
|
|
||||||
"""
|
|
||||||
if not self.autoupdate_elements:
|
|
||||||
return None
|
|
||||||
|
|
||||||
for collection, elements in self.autoupdate_elements.items():
|
|
||||||
# Get all ids, that do not have a full_data key
|
|
||||||
# (element["full_data"]=None will not be resolved again!)
|
|
||||||
ids = [
|
|
||||||
element["id"]
|
|
||||||
for element in elements.values()
|
|
||||||
if "full_data" not in element
|
|
||||||
]
|
|
||||||
if ids:
|
|
||||||
# Get all missing models. If e.g. an id could not be found it
|
|
||||||
# means, it was deleted. Since there is not full_data entry
|
|
||||||
# for the element, the data will be interpreted as None, which
|
|
||||||
# is correct for deleted elements.
|
|
||||||
model_class = get_model_from_collection_string(collection)
|
|
||||||
for full_data in model_class.get_elements(ids):
|
|
||||||
elements[full_data["id"]]["full_data"] = full_data
|
|
||||||
|
|
||||||
# Save histroy here using sync code.
|
|
||||||
save_history(self.element_iterator)
|
|
||||||
|
|
||||||
# Update cache and send autoupdate using async code.
|
|
||||||
return async_to_sync(self.dispatch_autoupdate)()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def element_iterator(self) -> Iterable[AutoupdateElement]:
|
|
||||||
""" Iterator for all elements in this bundle """
|
|
||||||
for elements in self.autoupdate_elements.values():
|
|
||||||
yield from elements.values()
|
|
||||||
|
|
||||||
async def get_data_for_cache(self) -> Dict[str, Optional[Dict[str, Any]]]:
|
|
||||||
"""
|
|
||||||
Async helper function to update the cache.
|
|
||||||
|
|
||||||
Returns the change_id
|
|
||||||
"""
|
|
||||||
cache_elements: Dict[str, Optional[Dict[str, Any]]] = {}
|
|
||||||
for element in self.element_iterator:
|
|
||||||
element_id = get_element_id(element["collection_string"], element["id"])
|
|
||||||
full_data = element.get("full_data")
|
|
||||||
if full_data:
|
|
||||||
full_data["_no_delete_on_restriction"] = element.get(
|
|
||||||
"no_delete_on_restriction", False
|
|
||||||
)
|
|
||||||
cache_elements[element_id] = full_data
|
|
||||||
return cache_elements
|
|
||||||
|
|
||||||
async def dispatch_autoupdate(self) -> int:
|
|
||||||
"""
|
|
||||||
Async helper function to update cache and send autoupdate.
|
|
||||||
|
|
||||||
Return the change_id
|
|
||||||
"""
|
|
||||||
# Update cache
|
|
||||||
cache_elements = await self.get_data_for_cache()
|
|
||||||
change_id = await element_cache.change_elements(cache_elements)
|
|
||||||
|
|
||||||
# Send autoupdate
|
|
||||||
autoupdate_payload = {"elements": cache_elements, "change_id": change_id}
|
|
||||||
await stream.send("autoupdate", autoupdate_payload)
|
|
||||||
|
|
||||||
return change_id
|
|
||||||
|
|
||||||
|
|
||||||
def inform_changed_data(
|
def inform_changed_data(
|
||||||
@ -159,7 +34,6 @@ def inform_changed_data(
|
|||||||
user_id: Optional[int] = None,
|
user_id: Optional[int] = None,
|
||||||
disable_history: bool = False,
|
disable_history: bool = False,
|
||||||
no_delete_on_restriction: bool = False,
|
no_delete_on_restriction: bool = False,
|
||||||
final_data: bool = False,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Informs the autoupdate system and the caching system about the creation or
|
Informs the autoupdate system and the caching system about the creation or
|
||||||
@ -186,8 +60,6 @@ def inform_changed_data(
|
|||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
no_delete_on_restriction=no_delete_on_restriction,
|
no_delete_on_restriction=no_delete_on_restriction,
|
||||||
)
|
)
|
||||||
if final_data:
|
|
||||||
element["full_data"] = root_instance.get_full_data()
|
|
||||||
elements.append(element)
|
elements.append(element)
|
||||||
inform_elements(elements)
|
inform_elements(elements)
|
||||||
|
|
||||||
@ -227,62 +99,8 @@ def inform_elements(elements: Iterable[AutoupdateElement]) -> None:
|
|||||||
If you want to save history information, user id or disable history you
|
If you want to save history information, user id or disable history you
|
||||||
have to put information or flag inside the elements.
|
have to put information or flag inside the elements.
|
||||||
"""
|
"""
|
||||||
bundle = autoupdate_bundle.get(threading.get_ident())
|
with autoupdate_bundle() as bundle:
|
||||||
if bundle is not None:
|
|
||||||
# Put all elements into the autoupdate_bundle.
|
|
||||||
bundle.add(elements)
|
bundle.add(elements)
|
||||||
else:
|
|
||||||
# Send autoupdate directly
|
|
||||||
bundle = AutoupdateBundle()
|
|
||||||
bundle.add(elements)
|
|
||||||
bundle.done()
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
Global container for autoupdate bundles
|
|
||||||
"""
|
|
||||||
autoupdate_bundle: Dict[int, AutoupdateBundle] = {}
|
|
||||||
|
|
||||||
|
|
||||||
class AutoupdateBundleMiddleware:
|
|
||||||
"""
|
|
||||||
Middleware to handle autoupdate bundling.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, get_response: Any) -> None:
|
|
||||||
self.get_response = get_response
|
|
||||||
# One-time configuration and initialization.
|
|
||||||
|
|
||||||
def __call__(self, request: Any) -> Any:
|
|
||||||
thread_id = threading.get_ident()
|
|
||||||
autoupdate_bundle[thread_id] = AutoupdateBundle()
|
|
||||||
|
|
||||||
timing = Timing("request")
|
|
||||||
|
|
||||||
response = self.get_response(request)
|
|
||||||
|
|
||||||
timing()
|
|
||||||
|
|
||||||
status_ok = response.status_code >= 200 and response.status_code < 300
|
|
||||||
status_redirect = response.status_code >= 300 and response.status_code < 400
|
|
||||||
|
|
||||||
# rewrite the response by adding the autoupdate on any success-case (2xx status)
|
|
||||||
bundle: AutoupdateBundle = autoupdate_bundle.pop(thread_id)
|
|
||||||
if status_ok or status_redirect:
|
|
||||||
change_id = bundle.done()
|
|
||||||
|
|
||||||
# inject the change id, if there was an autoupdate and the response status is
|
|
||||||
# ok (and not redirect; redirects do not have a useful content)
|
|
||||||
if change_id is not None and status_ok:
|
|
||||||
# Inject the autoupdate in the response.
|
|
||||||
# The complete response body will be overwritten!
|
|
||||||
content = {"change_id": change_id, "data": response.data}
|
|
||||||
# Note: autoupdate may be none on skipped ones (which should not happen
|
|
||||||
# since the user has made the request....)
|
|
||||||
response.content = json.dumps(content)
|
|
||||||
|
|
||||||
timing(True)
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
async def get_autoupdate_data(
|
async def get_autoupdate_data(
|
||||||
@ -337,14 +155,3 @@ async def _get_autoupdate_data(
|
|||||||
all_data=all_data,
|
all_data=all_data,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def save_history(element_iterator: Iterable[AutoupdateElement]) -> Iterable:
|
|
||||||
"""
|
|
||||||
Thin wrapper around the call of history saving manager method.
|
|
||||||
|
|
||||||
This is separated to patch it during tests.
|
|
||||||
"""
|
|
||||||
from ..core.models import History
|
|
||||||
|
|
||||||
return History.objects.add_elements(element_iterator)
|
|
||||||
|
219
server/openslides/utils/autoupdate_bundle.py
Normal file
219
server/openslides/utils/autoupdate_bundle.py
Normal file
@ -0,0 +1,219 @@
|
|||||||
|
import json
|
||||||
|
import threading
|
||||||
|
from collections import defaultdict
|
||||||
|
from contextlib import contextmanager
|
||||||
|
from typing import Any, Dict, Iterable, Iterator, List, Optional
|
||||||
|
|
||||||
|
from asgiref.sync import async_to_sync
|
||||||
|
from mypy_extensions import TypedDict
|
||||||
|
|
||||||
|
from .cache import element_cache, get_element_id
|
||||||
|
from .stream import stream
|
||||||
|
from .timing import Timing
|
||||||
|
from .utils import get_model_from_collection_string
|
||||||
|
|
||||||
|
|
||||||
|
class AutoupdateElementBase(TypedDict):
|
||||||
|
id: int
|
||||||
|
collection_string: str
|
||||||
|
|
||||||
|
|
||||||
|
class AutoupdateElement(AutoupdateElementBase, total=False):
|
||||||
|
"""
|
||||||
|
Data container to handle one root rest element for the autoupdate, history
|
||||||
|
and caching process.
|
||||||
|
|
||||||
|
The fields `id` and `collection_string` are required to identify the element. All
|
||||||
|
other fields are optional:
|
||||||
|
|
||||||
|
full_data: If a value is given (dict or None), it won't be loaded from the DB.
|
||||||
|
If otherwise no value is given, the AutoupdateBundle will try to resolve the object
|
||||||
|
from the DB and serialize it into the full_data.
|
||||||
|
|
||||||
|
information and user_id: These fields are for the history indicating what and who
|
||||||
|
made changes.
|
||||||
|
|
||||||
|
disable_history: If this is True, the element (and the containing full_data) won't
|
||||||
|
be saved into the history. Information and user_id is then irrelevant.
|
||||||
|
|
||||||
|
no_delete_on_restriction is a flag, which is saved into the models in the cache
|
||||||
|
as the _no_delete_on_restriction key. If this is true, there should neither be an
|
||||||
|
entry for one specific model in the changed *nor the deleted* part of the
|
||||||
|
autoupdate, if the model was restricted.
|
||||||
|
"""
|
||||||
|
|
||||||
|
information: List[str]
|
||||||
|
user_id: Optional[int]
|
||||||
|
disable_history: bool
|
||||||
|
no_delete_on_restriction: bool
|
||||||
|
full_data: Optional[Dict[str, Any]]
|
||||||
|
|
||||||
|
|
||||||
|
class AutoupdateBundle:
|
||||||
|
"""
|
||||||
|
Collects changed elements via inform*_data. After the collecting-step is finished,
|
||||||
|
the bundle releases all changes to the history and element cache via `.done()`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.autoupdate_elements: Dict[str, Dict[int, AutoupdateElement]] = defaultdict(
|
||||||
|
dict
|
||||||
|
)
|
||||||
|
self._disable_history = False
|
||||||
|
|
||||||
|
def add(self, elements: Iterable[AutoupdateElement]) -> None:
|
||||||
|
""" Adds the elements to the bundle """
|
||||||
|
for element in elements:
|
||||||
|
self.autoupdate_elements[element["collection_string"]][
|
||||||
|
element["id"]
|
||||||
|
] = element
|
||||||
|
|
||||||
|
def disable_history(self) -> None:
|
||||||
|
self._disable_history = True
|
||||||
|
|
||||||
|
def done(self) -> Optional[int]:
|
||||||
|
"""
|
||||||
|
Finishes the bundle by resolving all missing data and passing it to
|
||||||
|
the history and element cache.
|
||||||
|
|
||||||
|
Returns the change id, if there are autoupdate elements. Otherwise none.
|
||||||
|
"""
|
||||||
|
if not self.autoupdate_elements:
|
||||||
|
return None
|
||||||
|
|
||||||
|
for collection, elements in self.autoupdate_elements.items():
|
||||||
|
# Get all ids, that do not have a full_data key
|
||||||
|
# (element["full_data"]=None will not be resolved again!)
|
||||||
|
ids = [
|
||||||
|
element["id"]
|
||||||
|
for element in elements.values()
|
||||||
|
if "full_data" not in element
|
||||||
|
]
|
||||||
|
if ids:
|
||||||
|
# Get all missing models. If e.g. an id could not be found it
|
||||||
|
# means, it was deleted. Since there is not full_data entry
|
||||||
|
# for the element, the data will be interpreted as None, which
|
||||||
|
# is correct for deleted elements.
|
||||||
|
model_class = get_model_from_collection_string(collection)
|
||||||
|
for full_data in model_class.get_elements(ids):
|
||||||
|
elements[full_data["id"]]["full_data"] = full_data
|
||||||
|
|
||||||
|
# Save histroy here using sync code.
|
||||||
|
if not self._disable_history:
|
||||||
|
save_history(self.element_iterator)
|
||||||
|
|
||||||
|
# Update cache and send autoupdate using async code.
|
||||||
|
change_id = async_to_sync(self.dispatch_autoupdate)()
|
||||||
|
|
||||||
|
return change_id
|
||||||
|
|
||||||
|
@property
|
||||||
|
def element_iterator(self) -> Iterable[AutoupdateElement]:
|
||||||
|
""" Iterator for all elements in this bundle """
|
||||||
|
for elements in self.autoupdate_elements.values():
|
||||||
|
yield from elements.values()
|
||||||
|
|
||||||
|
async def get_data_for_cache(self) -> Dict[str, Optional[Dict[str, Any]]]:
|
||||||
|
"""
|
||||||
|
Async helper function to update the cache.
|
||||||
|
|
||||||
|
Returns the change_id
|
||||||
|
"""
|
||||||
|
cache_elements: Dict[str, Optional[Dict[str, Any]]] = {}
|
||||||
|
for element in self.element_iterator:
|
||||||
|
element_id = get_element_id(element["collection_string"], element["id"])
|
||||||
|
full_data = element.get("full_data")
|
||||||
|
if full_data:
|
||||||
|
full_data["_no_delete_on_restriction"] = element.get(
|
||||||
|
"no_delete_on_restriction", False
|
||||||
|
)
|
||||||
|
cache_elements[element_id] = full_data
|
||||||
|
return cache_elements
|
||||||
|
|
||||||
|
async def dispatch_autoupdate(self) -> int:
|
||||||
|
"""
|
||||||
|
Async helper function to update cache and send autoupdate.
|
||||||
|
|
||||||
|
Return the change_id
|
||||||
|
"""
|
||||||
|
# Update cache
|
||||||
|
cache_elements = await self.get_data_for_cache()
|
||||||
|
change_id = await element_cache.change_elements(cache_elements)
|
||||||
|
|
||||||
|
# Send autoupdate
|
||||||
|
autoupdate_payload = {"elements": cache_elements, "change_id": change_id}
|
||||||
|
await stream.send("autoupdate", autoupdate_payload)
|
||||||
|
|
||||||
|
return change_id
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def autoupdate_bundle() -> Iterator[AutoupdateBundle]:
|
||||||
|
bundle = _autoupdate_bundle.get(threading.get_ident())
|
||||||
|
autodone = False
|
||||||
|
if bundle is None:
|
||||||
|
bundle = AutoupdateBundle()
|
||||||
|
autodone = True
|
||||||
|
|
||||||
|
yield bundle
|
||||||
|
|
||||||
|
if autodone:
|
||||||
|
bundle.done()
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
Global container for autoupdate bundles
|
||||||
|
"""
|
||||||
|
_autoupdate_bundle: Dict[int, AutoupdateBundle] = {}
|
||||||
|
|
||||||
|
|
||||||
|
class AutoupdateBundleMiddleware:
|
||||||
|
"""
|
||||||
|
Middleware to handle autoupdate bundling.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, get_response: Any) -> None:
|
||||||
|
self.get_response = get_response
|
||||||
|
# One-time configuration and initialization.
|
||||||
|
|
||||||
|
def __call__(self, request: Any) -> Any:
|
||||||
|
thread_id = threading.get_ident()
|
||||||
|
_autoupdate_bundle[thread_id] = AutoupdateBundle()
|
||||||
|
|
||||||
|
timing = Timing("request")
|
||||||
|
|
||||||
|
response = self.get_response(request)
|
||||||
|
|
||||||
|
timing()
|
||||||
|
|
||||||
|
status_ok = response.status_code >= 200 and response.status_code < 300
|
||||||
|
status_redirect = response.status_code >= 300 and response.status_code < 400
|
||||||
|
|
||||||
|
# rewrite the response by adding the autoupdate on any success-case (2xx status)
|
||||||
|
bundle: AutoupdateBundle = _autoupdate_bundle.pop(thread_id)
|
||||||
|
if status_ok or status_redirect:
|
||||||
|
change_id = bundle.done()
|
||||||
|
|
||||||
|
# inject the change id, if there was an autoupdate and the response status is
|
||||||
|
# ok (and not redirect; redirects do not have a useful content)
|
||||||
|
if change_id is not None and status_ok:
|
||||||
|
# Inject the autoupdate in the response.
|
||||||
|
# The complete response body will be overwritten!
|
||||||
|
content = {"change_id": change_id, "data": response.data}
|
||||||
|
# Note: autoupdate may be none on skipped ones (which should not happen
|
||||||
|
# since the user has made the request....)
|
||||||
|
response.content = json.dumps(content)
|
||||||
|
|
||||||
|
timing(True)
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
def save_history(elements: Iterable[AutoupdateElement]) -> Iterable:
|
||||||
|
"""
|
||||||
|
Thin wrapper around the call of history saving manager method.
|
||||||
|
|
||||||
|
This is separated to patch it during tests.
|
||||||
|
"""
|
||||||
|
from ..core.models import History
|
||||||
|
|
||||||
|
return History.objects.add_elements(elements)
|
@ -167,7 +167,8 @@ class RESTModelMixin:
|
|||||||
|
|
||||||
# For logging the progress
|
# For logging the progress
|
||||||
last_time = time.time()
|
last_time = time.time()
|
||||||
instances_length = len(instances)
|
instances_length = len(instances) # this evaluates the query
|
||||||
|
|
||||||
for i, instance in enumerate(instances):
|
for i, instance in enumerate(instances):
|
||||||
# Append full data from this instance
|
# Append full data from this instance
|
||||||
full_data.append(instance.get_full_data())
|
full_data.append(instance.get_full_data())
|
||||||
@ -177,6 +178,7 @@ class RESTModelMixin:
|
|||||||
if current_time > last_time + 5:
|
if current_time > last_time + 5:
|
||||||
last_time = current_time
|
last_time = current_time
|
||||||
logger.info(f" {i+1}/{instances_length}...")
|
logger.info(f" {i+1}/{instances_length}...")
|
||||||
|
|
||||||
return full_data
|
return full_data
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -1,17 +1,21 @@
|
|||||||
from django.db import connection
|
from django.db import connection
|
||||||
|
|
||||||
|
|
||||||
|
def is_postgres() -> bool:
|
||||||
|
return connection.vendor == "postgresql"
|
||||||
|
|
||||||
|
|
||||||
def restart_id_sequence(table_name: str) -> None:
|
def restart_id_sequence(table_name: str) -> None:
|
||||||
"""
|
"""
|
||||||
This function resets the id sequence from the given table (the current auto
|
This function resets the id sequence from the given table (the current auto
|
||||||
increment value for the id field) to the max_id+1. This is needed, when manually
|
increment value for the id field) to the max_id+1. This is needed, when manually
|
||||||
inserting object id, because Postgresql does not update the id sequence in this case.
|
inserting object id, because Postgresql does not update the id sequence in this case.
|
||||||
"""
|
"""
|
||||||
if connection.vendor == "postgresql":
|
if not is_postgres():
|
||||||
with connection.cursor() as cursor:
|
return
|
||||||
cursor.execute(f"SELECT max(id) + 1 as max FROM {table_name};")
|
|
||||||
max_id = cursor.fetchone()[0]
|
with connection.cursor() as cursor:
|
||||||
if max_id is not None:
|
cursor.execute(f"SELECT max(id) + 1 as max FROM {table_name};")
|
||||||
cursor.execute(
|
max_id = cursor.fetchone()[0]
|
||||||
f"ALTER SEQUENCE {table_name}_id_seq RESTART WITH {max_id};"
|
if max_id is not None:
|
||||||
)
|
cursor.execute(f"ALTER SEQUENCE {table_name}_id_seq RESTART WITH {max_id};")
|
||||||
|
@ -11,7 +11,7 @@ from . import logging
|
|||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
connection_pool_limit = getattr(settings, "CONNECTION_POOL_LIMIT", 100)
|
connection_pool_limit = getattr(settings, "CONNECTION_POOL_LIMIT", 10)
|
||||||
logger.info(f"CONNECTION_POOL_LIMIT={connection_pool_limit}")
|
logger.info(f"CONNECTION_POOL_LIMIT={connection_pool_limit}")
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user