Refactor logging setup across multiple modules to use loguru with consistent configuration

- Updated logging initialization in MessageCalendar, admin_edit_prof, elsa_main, graph, iconLine, searchPage, and richtext modules to use loguru.
- Changed log rotation and retention settings for log files to improve log management.
- Replaced logger.debug/info calls with log.debug/info for consistency.
- Fixed a typo in the searchPage UI and updated related references in the UI files.
- Removed unused imports and cleaned up code for better readability.
This commit is contained in:
2025-05-13 15:49:52 +02:00
parent 4a3a95623a
commit f7c499ea6e
32 changed files with 412 additions and 491 deletions

View File

@@ -1,4 +1,4 @@
import re
from dataclasses import dataclass, field
from enum import Enum

View File

@@ -4,25 +4,20 @@ from bs4 import BeautifulSoup
# import sleep_and_retry decorator to retry requests
from ratelimit import limits, sleep_and_retry
from typing import Union, Any, Literal, Optional
from typing import Union, Any, Optional
from src.logic.dataclass import BookData
from src.transformers import ARRAYData, BibTeXData, COinSData, RDSData, RISData
from src.transformers.transformers import RDS_AVAIL_DATA, RDS_GENERIC_DATA
import loguru
import sys
from loguru import logger as log
logger = log
logger.remove()
logger.add("logs/application.log", rotation="1 week", retention="1 month", enqueue=True)
log.add(
f"logs/webrequest.log",
rotation="1 day",
compression="zip",
)
log = loguru.logger
log.remove()
log.add(sys.stdout)
log.add("logs/application.log", rotation="1 MB", retention="10 days")
# logger.add(sys.stderr, format="{time} {level} {message}", level="INFO")
logger.add(sys.stdout)
API_URL = "https://rds.ibs-bw.de/phfreiburg/opac/RDSIndexrecord/{}/"
@@ -53,20 +48,20 @@ class WebRequest:
self.ppn = None
self.data = None
self.timeout = 5
logger.info("Initialized WebRequest")
log.info("Initialized WebRequest")
@property
def use_any_book(self):
"""use any book that matches the search term"""
self.use_any = True
logger.info("Using any book")
log.info("Using any book")
return self
def set_apparat(self, apparat: int):
self.apparat = apparat
if int(self.apparat) < 10:
self.apparat = f"0{self.apparat}"
logger.info(f"Set apparat to {self.apparat}")
log.info(f"Set apparat to {self.apparat}")
return self
def get_ppn(self, signature: str):
@@ -100,12 +95,12 @@ class WebRequest:
response = requests.get(link, timeout=self.timeout)
return response.text
except requests.exceptions.RequestException as e:
logger.error(f"Request failed: {e}")
log.error(f"Request failed: {e}")
return None
def get_data(self) -> Union[list[str], None]:
links = self.get_book_links(self.ppn)
logger.debug(f"Links: {links}")
log.debug(f"Links: {links}")
return_data: list[str] = []
for link in links:
result: str = self.search(link) # type:ignore
@@ -118,7 +113,7 @@ class WebRequest:
item_location = location.find(
"div", class_="col-xs-12 col-md-7 col-lg-8 rds-dl-panel"
).text.strip()
logger.debug(f"Item location: {item_location}")
log.debug(f"Item location: {item_location}")
if self.use_any:
pre_tag = soup.find_all("pre")
if pre_tag:
@@ -127,7 +122,7 @@ class WebRequest:
return_data.append(data)
return return_data
else:
logger.error("No <pre> tag found")
log.error("No <pre> tag found")
raise ValueError("No <pre> tag found")
elif f"Semesterapparat-{self.apparat}" in item_location:
pre_tag = soup.find_all("pre")
@@ -138,10 +133,10 @@ class WebRequest:
return_data.append(data)
return return_data
else:
logger.error("No <pre> tag found")
log.error("No <pre> tag found")
return return_data
else:
logger.error(
log.error(
f"Signature {self.signature} not found in {item_location}"
)
# return_data = []
@@ -166,7 +161,7 @@ class WebRequest:
return_data.append(data)
return return_data
else:
logger.error("No <pre> tag found")
log.error("No <pre> tag found")
return return_data
@@ -184,7 +179,7 @@ class BibTextTransformer:
self.field = None
self.signature = None
if mode not in self.valid_modes:
logger.error(f"Mode {mode} not valid")
log.error(f"Mode {mode} not valid")
raise ValueError(f"Mode {mode} not valid")
self.data = None
# self.bookdata = BookData(**self.data)
@@ -274,7 +269,7 @@ class BibTextTransformer:
def cover(isbn):
test_url = f"https://www.buchhandel.de/cover/{isbn}/{isbn}-cover-m.jpg"
# logger.debug(test_url)
# log.debug(test_url)
data = requests.get(test_url, stream=True)
return data.content
@@ -284,8 +279,8 @@ def get_content(soup, css_class):
if __name__ == "__main__":
# logger.debug("main")
# log.debug("main")
link = "CU 8500 K64"
data = WebRequest(71).get_ppn(link).get_data()
bib = BibTextTransformer("ARRAY").get_data().return_data()
logger.debug(bib)
log.debug(bib)

View File

@@ -1,23 +1,17 @@
import pandas as pd
from docx import Document
from dataclasses import dataclass
import sys
from loguru import logger as log
from src.backend import Semester
from typing import Union, Any
logger = log
logger.remove()
logger.add("logs/wordparser.log", rotation="1 week", retention="1 month", enqueue=True)
log.add(
f"logs/application.log",
rotation="1 day",
compression="zip",
enqueue=True,
)
import loguru
import sys
log = loguru.logger
log.remove()
log.add(sys.stdout)
log.add("logs/application.log", rotation="1 MB", retention="10 days")
# logger.add(sys.stderr, format="{time} {level} {message}", level="INFO")
logger.add(sys.stdout)
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
@@ -218,12 +212,12 @@ def elsa_word_to_csv(path: str):
data = [
row for row in df.itertuples(index=False, name=None) if row != tuples[doctype]
]
# logger.debug(data)
# log.debug(data)
return tuple_to_dict(data, doctype), doctype
def word_to_semap(word_path: str) -> SemapDocument:
logger.info("Parsing Word Document {}", word_path)
log.info("Parsing Word Document {}", word_path)
semap = SemapDocument()
df = word_docx_to_csv(word_path)
apparatdata = df[0]
@@ -258,7 +252,7 @@ def word_to_semap(word_path: str) -> SemapDocument:
continue
else:
booklist.append(book)
logger.info("Found {} books", len(booklist))
log.info("Found {} books", len(booklist))
semap.books = booklist
return semap