Files
dockge-migration-guide/take_screenshots.py
olaf c6d8371ebe Add comprehensive verification report with curl/HTTP evidence
Migration verification completed WITHOUT screenshots due to browser automation resource constraints. This commit provides comprehensive evidence via:

- HTTP response verification (curl tests)
- Docker container status verification (SSH to fry)
- Detailed verification report documenting all 7 migrated containers
- Screenshot automation tools (puppeteer/playwright) for future use

Evidence Summary:
- Photon Dockge: HTTP 200 (services stopped as expected)
- Fry Dockge: HTTP 200 (services running)
- All Docker containers healthy on fry (29+ minutes uptime)
- Gitea + PostgreSQL: Running and healthy
- Mastodon (5 containers): All running and healthy

Pending Tasks:
- Mastodon media files transfer (public/system directory empty - 4KB)
- Gitea external port 3000 accessibility (firewall check needed)
- Screenshot capture when resources available

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-16 11:19:02 +00:00

109 lines
3.6 KiB
Python

"""
A script to capture full-page screenshots of specified URLs using Playwright.
This script navigates to a list of web pages, waits for them to be fully loaded,
and saves a full-page screenshot to a designated path.
Requirements:
- Python 3.7+
- Playwright library
One-time setup:
1. Install the Playwright library:
pip install playwright
2. Install the necessary browser binaries (this will download Chromium, Firefox, etc.):
playwright install
"""
import asyncio
from pathlib import Path
from typing import List, Dict
from playwright.async_api import async_playwright, TimeoutError as PlaywrightTimeoutError
# --- Configuration ---
# A list of dictionaries, each specifying a URL to capture and the output path.
TARGETS: List[Dict[str, str]] = [
{
"url": "http://photon.obnh.io:5001",
"output_path": "/home/olaf/dockge-migration-guide/screenshots/photon-dockge-home.png",
},
{
"url": "http://fry.obr.sh:5001",
"output_path": "/home/olaf/dockge-migration-guide/screenshots/fry-dockge-home.png",
},
{
"url": "http://45.131.64.213:3000",
"output_path": "/home/olaf/dockge-migration-guide/screenshots/gitea-on-fry.png",
},
]
# Browser settings
HEADLESS_MODE: bool = True
NETWORK_IDLE_TIMEOUT: int = 30000 # 30 seconds
async def capture_screenshot(target: Dict[str, str]):
"""
Navigates to a single URL and captures a full-page screenshot.
Args:
target: A dictionary containing the 'url' and 'output_path'.
"""
url = target["url"]
output_path_str = target["output_path"]
output_path = Path(output_path_str)
# Ensure the parent directory for the screenshot exists.
try:
output_path.parent.mkdir(parents=True, exist_ok=True)
except OSError as e:
print(f"❌ Failed to create directory for {output_path_str}. Error: {e}")
return
async with async_playwright() as p:
try:
browser = await p.chromium.launch(headless=HEADLESS_MODE)
context = await browser.new_context()
page = await context.new_page()
print(f"▶️ Navigating to {url}...")
await page.goto(
url,
wait_until="networkidle",
timeout=NETWORK_IDLE_TIMEOUT
)
print(f"📸 Capturing screenshot for {url}...")
await page.screenshot(path=output_path, full_page=True)
print(f"✅ Success! Screenshot saved to {output_path_str}")
except PlaywrightTimeoutError:
print(f"❌ Failure: Timed out while loading {url} after {NETWORK_IDLE_TIMEOUT / 1000}s.")
except Exception as e:
print(f"❌ Failure: An unexpected error occurred for {url}. Error: {e}")
finally:
if 'browser' in locals() and browser.is_connected():
await browser.close()
async def main():
"""
Main function to iterate through targets and capture screenshots.
"""
print("Starting screenshot capture process...")
# Running captures sequentially to avoid overwhelming the system.
# For parallel execution, one could use asyncio.gather.
for target in TARGETS:
await capture_screenshot(target)
print("-" * 20)
print("All tasks completed.")
if __name__ == "__main__":
# Note: A new browser instance is launched for each screenshot.
# This provides maximum isolation but is slower. For speed, you could
# refactor to launch one browser and use new pages within it for each target.
# The current approach is more robust against page-specific crashes.
asyncio.run(main())