mirror of
https://github.com/Xevion/linkpulse.git
synced 2025-12-06 11:15:34 -06:00
Setup structlog, delete randomized IPs on startup
- minor formatting, type fixes
This commit is contained in:
@@ -7,17 +7,21 @@ from dataclasses import dataclass, field
|
|||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import AsyncIterator
|
from typing import AsyncIterator
|
||||||
|
|
||||||
|
import structlog
|
||||||
|
import human_readable
|
||||||
|
from apscheduler.schedulers.background import BackgroundScheduler # type: ignore
|
||||||
|
from apscheduler.triggers.interval import IntervalTrigger # type: ignore
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
from fastapi import FastAPI, Request, Response, status
|
from fastapi import FastAPI, Request, Response, status
|
||||||
from fastapi_cache import FastAPICache
|
from fastapi_cache import FastAPICache
|
||||||
from fastapi_cache.backends.inmemory import InMemoryBackend
|
from fastapi_cache.backends.inmemory import InMemoryBackend
|
||||||
from fastapi_cache.decorator import cache
|
from fastapi_cache.decorator import cache
|
||||||
import human_readable
|
|
||||||
from linkpulse.utilities import get_ip, hide_ip, pluralize
|
from linkpulse.utilities import get_ip, hide_ip, pluralize
|
||||||
from peewee import PostgresqlDatabase
|
from peewee import PostgresqlDatabase
|
||||||
from psycopg2.extras import execute_values
|
from psycopg2.extras import execute_values
|
||||||
from apscheduler.schedulers.background import BackgroundScheduler
|
|
||||||
from apscheduler.triggers.interval import IntervalTrigger
|
if not structlog.is_configured():
|
||||||
|
import linkpulse.logging
|
||||||
|
|
||||||
load_dotenv(dotenv_path=".env")
|
load_dotenv(dotenv_path=".env")
|
||||||
|
|
||||||
@@ -27,6 +31,8 @@ from linkpulse import models, responses # type: ignore
|
|||||||
is_development = os.getenv("ENVIRONMENT") == "development"
|
is_development = os.getenv("ENVIRONMENT") == "development"
|
||||||
db: PostgresqlDatabase = models.BaseModel._meta.database # type: ignore
|
db: PostgresqlDatabase = models.BaseModel._meta.database # type: ignore
|
||||||
|
|
||||||
|
logger = structlog.get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def flush_ips():
|
def flush_ips():
|
||||||
if len(app.state.buffered_updates) == 0:
|
if len(app.state.buffered_updates) == 0:
|
||||||
@@ -51,10 +57,10 @@ def flush_ips():
|
|||||||
cur = db.cursor()
|
cur = db.cursor()
|
||||||
execute_values(cur, sql, rows)
|
execute_values(cur, sql, rows)
|
||||||
except:
|
except:
|
||||||
print("Failed to flush IPs to the database.")
|
logging.error("Failed to flush IPs to the database.")
|
||||||
|
|
||||||
i = len(app.state.buffered_updates)
|
i = len(app.state.buffered_updates)
|
||||||
print("Flushed {} IP{} to the database.".format(i, pluralize(i)))
|
logging.debug("Flushed {} IP{} to the database.".format(i, pluralize(i)))
|
||||||
|
|
||||||
# Finish up
|
# Finish up
|
||||||
app.state.buffered_updates.clear()
|
app.state.buffered_updates.clear()
|
||||||
@@ -77,6 +83,19 @@ async def lifespan(_: FastAPI) -> AsyncIterator[None]:
|
|||||||
# Connect to database, ensure specific tables exist
|
# Connect to database, ensure specific tables exist
|
||||||
db.connect()
|
db.connect()
|
||||||
db.create_tables([models.IPAddress])
|
db.create_tables([models.IPAddress])
|
||||||
|
|
||||||
|
# Delete all randomly generated IP addresses
|
||||||
|
with db.atomic():
|
||||||
|
logging.info(
|
||||||
|
"Deleting Randomized IP Addresses",
|
||||||
|
{"ip_pool_count": len(app.state.ip_pool)},
|
||||||
|
)
|
||||||
|
query = models.IPAddress.delete().where(
|
||||||
|
models.IPAddress.ip << app.state.ip_pool
|
||||||
|
)
|
||||||
|
rowcount = query.execute()
|
||||||
|
logger.info("Randomized IP Addresses deleted", {"rowcount": rowcount})
|
||||||
|
|
||||||
FastAPICache.init(
|
FastAPICache.init(
|
||||||
backend=InMemoryBackend(), prefix="fastapi-cache", cache_status_header="X-Cache"
|
backend=InMemoryBackend(), prefix="fastapi-cache", cache_status_header="X-Cache"
|
||||||
)
|
)
|
||||||
@@ -140,10 +159,6 @@ async def get_migration():
|
|||||||
return {"name": name, "migrated_at": migrated_at}
|
return {"name": name, "migrated_at": migrated_at}
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
logger.setLevel(logging.DEBUG)
|
|
||||||
|
|
||||||
|
|
||||||
@app.get("/api/ips")
|
@app.get("/api/ips")
|
||||||
async def get_ips(request: Request, response: Response):
|
async def get_ips(request: Request, response: Response):
|
||||||
"""
|
"""
|
||||||
@@ -152,13 +167,11 @@ async def get_ips(request: Request, response: Response):
|
|||||||
now = datetime.now()
|
now = datetime.now()
|
||||||
|
|
||||||
# Get the user's IP address
|
# Get the user's IP address
|
||||||
user_ip = (
|
user_ip = get_ip(request)
|
||||||
get_ip(request) if not is_development else random.choice(app.state.ip_pool)
|
|
||||||
)
|
|
||||||
|
|
||||||
# If the IP address is not found, return an error
|
# If the IP address is not found, return an error
|
||||||
if user_ip is None:
|
if user_ip is None:
|
||||||
print("No IP found!")
|
logging.info("No IP found!")
|
||||||
response.status_code = status.HTTP_403_FORBIDDEN
|
response.status_code = status.HTTP_403_FORBIDDEN
|
||||||
return {"error": "Unable to handle request."}
|
return {"error": "Unable to handle request."}
|
||||||
|
|
||||||
|
|||||||
61
backend/linkpulse/logging.py
Normal file
61
backend/linkpulse/logging.py
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
import structlog
|
||||||
|
from structlog.stdlib import ProcessorFormatter
|
||||||
|
from structlog.types import Processor
|
||||||
|
|
||||||
|
shared_processors: List[Processor] = [
|
||||||
|
structlog.stdlib.add_log_level,
|
||||||
|
structlog.processors.CallsiteParameterAdder(
|
||||||
|
{
|
||||||
|
structlog.processors.CallsiteParameter.MODULE,
|
||||||
|
structlog.processors.CallsiteParameter.FILENAME,
|
||||||
|
structlog.processors.CallsiteParameter.LINENO,
|
||||||
|
}
|
||||||
|
),
|
||||||
|
structlog.processors.TimeStamper(fmt="%Y-%m-%d %H:%M:%S.%f"),
|
||||||
|
]
|
||||||
|
|
||||||
|
structlog_processors = shared_processors + []
|
||||||
|
# Remove _record & _from_structlog.
|
||||||
|
logging_processors: List[Processor] = [ProcessorFormatter.remove_processors_meta]
|
||||||
|
|
||||||
|
if sys.stderr.isatty():
|
||||||
|
console_renderer = structlog.dev.ConsoleRenderer()
|
||||||
|
logging_processors.append(console_renderer)
|
||||||
|
structlog_processors.append(console_renderer)
|
||||||
|
else:
|
||||||
|
json_renderer = structlog.processors.JSONRenderer(indent=1, sort_keys=True)
|
||||||
|
structlog_processors.append(json_renderer)
|
||||||
|
logging_processors.append(json_renderer)
|
||||||
|
|
||||||
|
structlog.configure(
|
||||||
|
processors=structlog_processors,
|
||||||
|
wrapper_class=structlog.stdlib.BoundLogger,
|
||||||
|
# logger_factory=structlog.stdlib.LoggerFactory(),
|
||||||
|
logger_factory=structlog.PrintLoggerFactory(sys.stderr),
|
||||||
|
context_class=dict,
|
||||||
|
cache_logger_on_first_use=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
formatter = ProcessorFormatter(
|
||||||
|
# These run ONLY on `logging` entries that do NOT originate within
|
||||||
|
# structlog.
|
||||||
|
foreign_pre_chain=shared_processors,
|
||||||
|
# These run on ALL entries after the pre_chain is done.
|
||||||
|
processors=logging_processors,
|
||||||
|
)
|
||||||
|
|
||||||
|
handler = logging.StreamHandler(sys.stderr)
|
||||||
|
# Use OUR `ProcessorFormatter` to format all `logging` entries.
|
||||||
|
handler.setFormatter(formatter)
|
||||||
|
logging.basicConfig(handlers=[handler], level=logging.INFO)
|
||||||
|
|
||||||
|
external_loggers = ["uvicorn.error", "uvicorn.access"]
|
||||||
|
for logger_name in external_loggers:
|
||||||
|
logger = logging.getLogger(logger_name)
|
||||||
|
logger.handlers = [handler]
|
||||||
|
logger.propagate = False
|
||||||
Reference in New Issue
Block a user