Verbessertes Logging #64
13
app.py
13
app.py
@ -8,7 +8,6 @@ import os
|
|||||||
from dotenv import load_dotenv, find_dotenv
|
from dotenv import load_dotenv, find_dotenv
|
||||||
from flask import Flask
|
from flask import Flask
|
||||||
from flask_cors import CORS
|
from flask_cors import CORS
|
||||||
from flask.logging import default_handler
|
|
||||||
from flask_migrate import Migrate
|
from flask_migrate import Migrate
|
||||||
from flask_sqlalchemy import SQLAlchemy
|
from flask_sqlalchemy import SQLAlchemy
|
||||||
from ldap3.utils.log import logger as ldap3_logger
|
from ldap3.utils.log import logger as ldap3_logger
|
||||||
@ -17,13 +16,15 @@ from ldap3.utils.log import set_library_log_detail_level, BASIC
|
|||||||
load_dotenv(find_dotenv())
|
load_dotenv(find_dotenv())
|
||||||
|
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
|
|
||||||
|
# Configure logging
|
||||||
loglevel = os.getenv("KI_LOGLEVEL", logging.WARNING)
|
loglevel = os.getenv("KI_LOGLEVEL", logging.WARNING)
|
||||||
loglevel = int(loglevel)
|
loglevel = int(loglevel)
|
||||||
app.logger.setLevel(loglevel)
|
app.logger.setLevel(loglevel)
|
||||||
logging.basicConfig(level=loglevel)
|
app.logger.propagate = False # do not forward messages to the root logger
|
||||||
|
logging.basicConfig(level=loglevel, format='[%(asctime)s] %(levelname)s [%(name)s] %(message)s') # configure root logger as fallback
|
||||||
set_library_log_detail_level(BASIC)
|
logging.getLogger('werkzeug').propagate = False # werkzeug has its own ColorStreamHandler
|
||||||
ldap3_logger.addHandler(default_handler)
|
set_library_log_detail_level(BASIC) # ldap3 has different verbosity levels internally
|
||||||
|
|||||||
|
|
||||||
app.config["SQLALCHEMY_DATABASE_URI"] = os.getenv("SQLALCHEMY_DATABASE_URI")
|
app.config["SQLALCHEMY_DATABASE_URI"] = os.getenv("SQLALCHEMY_DATABASE_URI")
|
||||||
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
|
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
|
||||||
@ -40,6 +41,6 @@ CORS(app)
|
|||||||
db = SQLAlchemy(app)
|
db = SQLAlchemy(app)
|
||||||
migrate = Migrate(app, db, compare_type=True)
|
migrate = Migrate(app, db, compare_type=True)
|
||||||
|
|
||||||
logging.debug("Hello from KI")
|
app.logger.info("Hello from KI")
|
||||||
|
|
||||||
from ki import module # noqa
|
from ki import module # noqa
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||||
|
|
||||||
import csv
|
import csv
|
||||||
import logging
|
|
||||||
|
|
||||||
from app import app, db
|
from app import app, db
|
||||||
from ki.models import Address, Contact, ContactType, Language, Skill, Profile, ProfileLanguage, ProfileSearchtopic, \
|
from ki.models import Address, Contact, ContactType, Language, Skill, Profile, ProfileLanguage, ProfileSearchtopic, \
|
||||||
@ -13,7 +12,7 @@ from ki.models import Address, Contact, ContactType, Language, Skill, Profile, P
|
|||||||
def seed_contacttypes():
|
def seed_contacttypes():
|
||||||
contacttypes_seed_file_path = app.config["KI_DATA_DIR"] + "/seed_data/contacttypes.csv"
|
contacttypes_seed_file_path = app.config["KI_DATA_DIR"] + "/seed_data/contacttypes.csv"
|
||||||
|
|
||||||
logging.info("importing contacttypes")
|
app.logger.info("importing contacttypes")
|
||||||
|
|
||||||
with open(contacttypes_seed_file_path) as file:
|
with open(contacttypes_seed_file_path) as file:
|
||||||
csv_reader = csv.DictReader(file)
|
csv_reader = csv.DictReader(file)
|
||||||
|
@ -1,50 +0,0 @@
|
|||||||
# A generic, single database configuration.
|
|
||||||
|
|
||||||
[alembic]
|
|
||||||
# template used to generate migration files
|
|
||||||
# file_template = %%(rev)s_%%(slug)s
|
|
||||||
|
|
||||||
# set to 'true' to run the environment during
|
|
||||||
# the 'revision' command, regardless of autogenerate
|
|
||||||
# revision_environment = false
|
|
||||||
|
|
||||||
|
|
||||||
# Logging configuration
|
|
||||||
[loggers]
|
|
||||||
keys = root,sqlalchemy,alembic,flask_migrate
|
|
||||||
|
|
||||||
[handlers]
|
|
||||||
keys = console
|
|
||||||
|
|
||||||
[formatters]
|
|
||||||
keys = generic
|
|
||||||
|
|
||||||
[logger_root]
|
|
||||||
level = WARN
|
|
||||||
handlers = console
|
|
||||||
qualname =
|
|
||||||
|
|
||||||
[logger_sqlalchemy]
|
|
||||||
level = WARN
|
|
||||||
handlers =
|
|
||||||
qualname = sqlalchemy.engine
|
|
||||||
|
|
||||||
[logger_alembic]
|
|
||||||
level = INFO
|
|
||||||
handlers =
|
|
||||||
qualname = alembic
|
|
||||||
|
|
||||||
[logger_flask_migrate]
|
|
||||||
level = INFO
|
|
||||||
handlers =
|
|
||||||
qualname = flask_migrate
|
|
||||||
|
|
||||||
[handler_console]
|
|
||||||
class = StreamHandler
|
|
||||||
args = (sys.stderr,)
|
|
||||||
level = NOTSET
|
|
||||||
formatter = generic
|
|
||||||
|
|
||||||
[formatter_generic]
|
|
||||||
format = %(levelname)-5.5s [%(name)s] %(message)s
|
|
||||||
datefmt = %H:%M:%S
|
|
@ -11,9 +11,6 @@ from alembic import context
|
|||||||
# access to the values within the .ini file in use.
|
# access to the values within the .ini file in use.
|
||||||
config = context.config
|
config = context.config
|
||||||
|
|
||||||
# Interpret the config file for Python logging.
|
|
||||||
# This line sets up loggers basically.
|
|
||||||
fileConfig(config.config_file_name)
|
|
||||||
logger = logging.getLogger('alembic.env')
|
logger = logging.getLogger('alembic.env')
|
||||||
|
|
||||||
# add your model's MetaData object here
|
# add your model's MetaData object here
|
||||||
|
Loading…
Reference in New Issue
Block a user
Wäre es eine Idee das hier auf das höchste zu setzen, wenn der
KI_LOGLEVEL
z.B.<= DEBUG
ist?Also kein abgefahrenes Mapping sondern nur eine Stufe und fertig.
Wir hatten ja schon Probleme mit dem LDAP.
Gewissermaßen passiert das schon. ldap3 selbst loggt im Standard nur, wenn der root-Logger auf
DEBUG
steht.Gleichermaßen bedeutet das, dass ldap3 in production nie selbst loggen wird. Das könnte ich aber noch einbauen. Also wenn production, dann soll ldap3 loggen, aber auf Level
ERROR
stattBASIC
.BASIC
ist schon einigermaßen noisy. Die detaillierteren Level gehen dann auf Netzwerk- und Protokollebene, was für den normalen Entwicklungsalltag zu viel sein dürfte. Die sollten vielleicht nur konkret bei der Fehlersuche eingesetzt werden.