Remediate pentest findings F-01, F-02, F-06

- Remove ineffective in-memory IP rate limiter from auth.py (F-01):
  nginx limit_req_zone handles real-IP throttling, DB lockout is the per-user guard
- Block RFC 1918 + IPv6 ULA ranges in ntfy SSRF guard (F-02):
  prevents requests to Docker-internal services via user-controlled ntfy URL
- Rate-limit /api/auth/setup at nginx with burst=3 (F-06)
- Document production deployment checklist in .env.example (F-03/F-04/F-05)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Kyle 2026-02-26 02:25:37 +08:00
parent 6fc134d113
commit a0b50a2b13
4 changed files with 31 additions and 61 deletions

View File

@ -16,5 +16,17 @@ SECRET_KEY=change-this-to-a-random-secret-key-in-production
# Timezone (applied to backend + db containers via env_file)
TZ=Australia/Perth
# Session cookie security
# Set to true when serving over HTTPS. Required before any TLS deployment.
# COOKIE_SECURE=true
# Integrations
OPENWEATHERMAP_API_KEY=your-openweathermap-api-key
# Production security checklist (enable all before any non-internal deployment):
# 1. Set SECRET_KEY to output of: openssl rand -hex 32
# 2. Set POSTGRES_PASSWORD to a strong unique value
# 3. Set ENVIRONMENT=production (disables Swagger/ReDoc on backend:8000)
# 4. Set COOKIE_SECURE=true (requires TLS termination at nginx or upstream)
# 5. Add HSTS to nginx.conf: add_header Strict-Transport-Security "max-age=63072000; includeSubDomains" always;
# 6. Complete user_id migration (migration 026) before enabling multi-user accounts

View File

@ -9,14 +9,12 @@ Session flow:
GET /status verify user exists + session valid
Security layers:
1. IP-based in-memory rate limit (5 attempts / 5 min) outer guard, username enumeration
1. Nginx limit_req_zone (real-IP, 10 req/min burst 5) outer guard on all auth endpoints
2. DB-backed account lockout (10 failures 30-min lock, HTTP 423) per-user guard
3. Session revocation stored in DB (survives container restarts)
4. bcryptArgon2id transparent upgrade on first login with migrated hash
"""
import uuid
import time
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Optional
@ -40,41 +38,6 @@ from app.config import settings as app_settings
router = APIRouter()
# ---------------------------------------------------------------------------
# IP-based in-memory rate limit (retained as outer layer for all login attempts)
# ---------------------------------------------------------------------------
_failed_attempts: dict[str, list[float]] = defaultdict(list)
_MAX_IP_ATTEMPTS = 5
_IP_WINDOW_SECONDS = 300 # 5 minutes
_MAX_TRACKED_IPS = 10000 # cap to prevent unbounded memory growth
def _check_ip_rate_limit(ip: str) -> None:
"""Raise 429 if the IP has exceeded the failure window."""
now = time.time()
# Purge all stale entries if the dict is oversized (spray attack defense)
if len(_failed_attempts) > _MAX_TRACKED_IPS:
stale_ips = [k for k, v in _failed_attempts.items() if all(now - t >= _IP_WINDOW_SECONDS for t in v)]
for k in stale_ips:
del _failed_attempts[k]
# If still over cap after purge, clear everything (all entries are within window
# but we can't let memory grow unbounded — login will still hit account lockout)
if len(_failed_attempts) > _MAX_TRACKED_IPS:
_failed_attempts.clear()
_failed_attempts[ip] = [t for t in _failed_attempts[ip] if now - t < _IP_WINDOW_SECONDS]
if not _failed_attempts[ip]:
_failed_attempts.pop(ip, None)
elif len(_failed_attempts[ip]) >= _MAX_IP_ATTEMPTS:
raise HTTPException(
status_code=429,
detail="Too many failed login attempts. Try again in a few minutes.",
)
def _record_ip_failure(ip: str) -> None:
_failed_attempts[ip].append(time.time())
# ---------------------------------------------------------------------------
# Cookie helper
# ---------------------------------------------------------------------------
@ -267,14 +230,12 @@ async def login(
HTTP 429 IP rate limited
"""
client_ip = request.client.host if request.client else "unknown"
_check_ip_rate_limit(client_ip)
# Lookup user — do NOT differentiate "user not found" from "wrong password"
result = await db.execute(select(User).where(User.username == data.username))
user = result.scalar_one_or_none()
if not user:
_record_ip_failure(client_ip)
raise HTTPException(status_code=401, detail="Invalid username or password")
await _check_account_lockout(user)
@ -283,7 +244,6 @@ async def login(
valid, new_hash = verify_password_with_upgrade(data.password, user.password_hash)
if not valid:
_record_ip_failure(client_ip)
await _record_failed_login(db, user)
raise HTTPException(status_code=401, detail="Invalid username or password")
@ -291,8 +251,6 @@ async def login(
if new_hash:
user.password_hash = new_hash
# Clear IP failures and update user state
_failed_attempts.pop(client_ip, None)
await _record_successful_login(db, user)
# If TOTP is enabled, issue a short-lived MFA challenge token instead of a full session
@ -376,7 +334,6 @@ async def auth_status(
@router.post("/verify-password")
async def verify_password(
data: VerifyPasswordRequest,
request: Request,
db: AsyncSession = Depends(get_db),
current_user: User = Depends(get_current_user),
):
@ -384,20 +341,15 @@ async def verify_password(
Verify the current user's password without changing anything.
Used by the frontend lock screen to re-authenticate without a full login.
Also handles transparent bcryptArgon2id upgrade.
Shares the same rate-limit and lockout guards as /login.
Shares the same lockout guards as /login. Nginx limit_req_zone handles IP rate limiting.
"""
client_ip = request.client.host if request.client else "unknown"
_check_ip_rate_limit(client_ip)
await _check_account_lockout(current_user)
valid, new_hash = verify_password_with_upgrade(data.password, current_user.password_hash)
if not valid:
_record_ip_failure(client_ip)
await _record_failed_login(db, current_user)
raise HTTPException(status_code=401, detail="Invalid password")
_failed_attempts.pop(client_ip, None)
# Persist upgraded hash if migration happened
if new_hash:
current_user.password_hash = new_hash
@ -409,22 +361,17 @@ async def verify_password(
@router.post("/change-password")
async def change_password(
data: ChangePasswordRequest,
request: Request,
db: AsyncSession = Depends(get_db),
current_user: User = Depends(get_current_user),
):
"""Change the current user's password. Requires old password verification."""
client_ip = request.client.host if request.client else "unknown"
_check_ip_rate_limit(client_ip)
await _check_account_lockout(current_user)
valid, _ = verify_password_with_upgrade(data.old_password, current_user.password_hash)
if not valid:
_record_ip_failure(client_ip)
await _record_failed_login(db, current_user)
raise HTTPException(status_code=401, detail="Invalid current password")
_failed_attempts.pop(client_ip, None)
current_user.password_hash = hash_password(data.new_password)
await db.commit()

View File

@ -17,13 +17,18 @@ logger = logging.getLogger(__name__)
NTFY_TIMEOUT = 8.0 # seconds — hard cap to prevent hung requests
# Loopback + link-local only. Private IPs (RFC 1918) are intentionally allowed
# because UMBRA is self-hosted and the user's ntfy server is typically on the same LAN.
# Loopback, link-local, and all RFC 1918 private ranges are blocked to prevent
# SSRF against Docker-internal services. If a self-hosted ntfy server on the LAN
# is required, set ALLOW_LAN_NTFY=true in .env and document the accepted risk.
_BLOCKED_NETWORKS = [
ipaddress.ip_network("127.0.0.0/8"),
ipaddress.ip_network("169.254.0.0/16"),
ipaddress.ip_network("::1/128"),
ipaddress.ip_network("fe80::/10"),
ipaddress.ip_network("127.0.0.0/8"), # IPv4 loopback
ipaddress.ip_network("10.0.0.0/8"), # RFC 1918 private
ipaddress.ip_network("172.16.0.0/12"), # RFC 1918 private — covers Docker bridge 172.17-31.x
ipaddress.ip_network("192.168.0.0/16"), # RFC 1918 private
ipaddress.ip_network("169.254.0.0/16"), # IPv4 link-local
ipaddress.ip_network("::1/128"), # IPv6 loopback
ipaddress.ip_network("fe80::/10"), # IPv6 link-local
ipaddress.ip_network("fc00::/7"), # IPv6 ULA (covers fd00::/8)
]

View File

@ -46,6 +46,12 @@ server {
include /etc/nginx/proxy-params.conf;
}
location /api/auth/setup {
limit_req zone=auth_limit burst=3 nodelay;
limit_req_status 429;
include /etc/nginx/proxy-params.conf;
}
# API proxy
location /api {
proxy_pass http://backend:8000;