mirror of
https://git.photon.obnh.io/olaf/recover-server.git
synced 2025-12-11 11:19:15 +00:00
Initial commit: Disaster recovery CLI tool
A Go-based CLI tool for recovering servers from backups to new cloud VMs. Features: - Multi-cloud support: Exoscale, Cloudscale, Hetzner Cloud - Backup sources: Local filesystem, Hetzner Storage Box - 6-stage restore pipeline with /etc whitelist protection - DNS migration with safety checks and auto-rollback - Dry-run by default, requires --yes to execute - Cloud-init for SSH key injection Packages: - cmd/recover-server: CLI commands (recover, migrate-dns, list, cleanup) - internal/providers: Cloud provider implementations - internal/backup: Backup source implementations - internal/restore: 6-stage restore pipeline - internal/dns: Exoscale DNS management - internal/ui: Prompts, progress, dry-run display - internal/config: Environment and host configuration 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
16
.env.example
Normal file
16
.env.example
Normal file
@@ -0,0 +1,16 @@
|
||||
# Exoscale credentials
|
||||
EXOSCALE_API_KEY=your-exoscale-api-key
|
||||
EXOSCALE_API_SECRET=your-exoscale-api-secret
|
||||
|
||||
# Cloudscale.ch credentials
|
||||
CLOUDSCALE_API_TOKEN=your-cloudscale-token
|
||||
|
||||
# Hetzner Cloud credentials
|
||||
HETZNER_API_KEY=your-hetzner-api-key
|
||||
|
||||
# Hetzner Storage Box (for backups)
|
||||
HETZNER_STORAGEBOX_USER=u480813
|
||||
HETZNER_STORAGEBOX_HOST=u480813.your-storagebox.de
|
||||
|
||||
# Local backup path
|
||||
LOCAL_BACKUP_PATH=/srv/backups
|
||||
11
.gitignore
vendored
Normal file
11
.gitignore
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# Build artifacts
|
||||
recover-server
|
||||
|
||||
# Environment files
|
||||
.env
|
||||
|
||||
# IDE
|
||||
.idea/
|
||||
.vscode/
|
||||
*.swp
|
||||
|
||||
774
cmd/recover-server/main.go
Normal file
774
cmd/recover-server/main.go
Normal file
@@ -0,0 +1,774 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"recover-server/internal/backup"
|
||||
"recover-server/internal/config"
|
||||
"recover-server/internal/dns"
|
||||
"recover-server/internal/providers"
|
||||
"recover-server/internal/restore"
|
||||
"recover-server/internal/ui"
|
||||
)
|
||||
|
||||
var (
|
||||
cfgFile string
|
||||
dryRun bool
|
||||
verbose bool
|
||||
yesFlag bool
|
||||
logFile string
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "recover-server",
|
||||
Short: "Disaster recovery CLI tool for server restoration",
|
||||
Long: `A Go-based CLI tool to recover servers from backups to new cloud VMs.
|
||||
|
||||
Supports:
|
||||
- Backup sources: local (/srv/backups/) or Hetzner Storage Box
|
||||
- Target clouds: Exoscale, Cloudscale, Hetzner
|
||||
- DNS migration via Exoscale API (separate step with safety checks)
|
||||
|
||||
Example workflow:
|
||||
recover-server recover --host proton --source hetzner --target exoscale
|
||||
recover-server migrate-dns --host proton --new-ip 1.2.3.4`,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
return config.Load(cfgFile)
|
||||
},
|
||||
}
|
||||
|
||||
var recoverCmd = &cobra.Command{
|
||||
Use: "recover",
|
||||
Short: "Recover a host to a new VM",
|
||||
Long: `Create a new VM on the target cloud provider and restore from backup.
|
||||
|
||||
This command:
|
||||
1. Creates a new VM with cloud-init SSH key injection
|
||||
2. Waits for SSH availability
|
||||
3. Rsyncs backup data from source
|
||||
4. Restores services (WireGuard, Docker, etc.)
|
||||
5. Verifies health
|
||||
|
||||
DNS is NOT modified - use 'migrate-dns' separately after verification.`,
|
||||
RunE: runRecover,
|
||||
}
|
||||
|
||||
var migrateDNSCmd = &cobra.Command{
|
||||
Use: "migrate-dns",
|
||||
Short: "Migrate DNS records after recovery verification",
|
||||
Long: `Update DNS A/AAAA records to point to the new VM.
|
||||
|
||||
SAFETY CHECKS (mandatory):
|
||||
1. Verify new VM responds on port 22 (SSH)
|
||||
2. Verify new VM responds on port 443 (HTTPS)
|
||||
3. Verify current DNS matches expected old IP
|
||||
4. Require typing hostname to confirm
|
||||
5. Auto-rollback if post-migration health fails
|
||||
|
||||
This command should ONLY be run after manually verifying the recovered server.`,
|
||||
RunE: runMigrateDNS,
|
||||
}
|
||||
|
||||
var listCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List available resources",
|
||||
Long: `List backups, providers, flavors, images, or DNS zones.`,
|
||||
}
|
||||
|
||||
var listBackupsCmd = &cobra.Command{
|
||||
Use: "backups",
|
||||
Short: "List available host backups",
|
||||
RunE: runListBackups,
|
||||
}
|
||||
|
||||
var listProvidersCmd = &cobra.Command{
|
||||
Use: "providers",
|
||||
Short: "List configured cloud providers",
|
||||
RunE: runListProviders,
|
||||
}
|
||||
|
||||
var listFlavorsCmd = &cobra.Command{
|
||||
Use: "flavors",
|
||||
Short: "List VM flavors for a provider",
|
||||
RunE: runListFlavors,
|
||||
}
|
||||
|
||||
var listImagesCmd = &cobra.Command{
|
||||
Use: "images",
|
||||
Short: "List available images for a provider",
|
||||
RunE: runListImages,
|
||||
}
|
||||
|
||||
var listDNSZonesCmd = &cobra.Command{
|
||||
Use: "dns-zones",
|
||||
Short: "List DNS zones managed via Exoscale",
|
||||
RunE: runListDNSZones,
|
||||
}
|
||||
|
||||
var cleanupCmd = &cobra.Command{
|
||||
Use: "cleanup",
|
||||
Short: "Delete a failed recovery VM",
|
||||
Long: `Clean up a failed recovery attempt by deleting the VM.
|
||||
|
||||
Use this when a recovery fails mid-way and you want to start fresh.`,
|
||||
RunE: runCleanup,
|
||||
}
|
||||
|
||||
// Recover command flags
|
||||
var (
|
||||
recoverHost string
|
||||
recoverSource string
|
||||
recoverTarget string
|
||||
recoverFlavor string
|
||||
recoverZone string
|
||||
recoverImage string
|
||||
recoverSSHKey string
|
||||
recoverRestoreHostKeys bool
|
||||
recoverSkipDocker bool
|
||||
recoverSkipWireGuard bool
|
||||
)
|
||||
|
||||
// Migrate DNS command flags
|
||||
var (
|
||||
migrateDNSHost string
|
||||
migrateDNSNewIP string
|
||||
migrateDNSOldIP string
|
||||
migrateDNSTTL int
|
||||
migrateDNSSkipHealthCheck bool
|
||||
migrateDNSRollbackOnFail bool
|
||||
)
|
||||
|
||||
// List command flags
|
||||
var (
|
||||
listSource string
|
||||
listProvider string
|
||||
)
|
||||
|
||||
// Cleanup command flags
|
||||
var (
|
||||
cleanupVMID string
|
||||
cleanupProvider string
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Global flags
|
||||
rootCmd.PersistentFlags().StringVar(&cfgFile, "env-file", ".env", "Path to .env file")
|
||||
rootCmd.PersistentFlags().BoolVar(&dryRun, "dry-run", false, "Show what would happen without executing")
|
||||
rootCmd.PersistentFlags().BoolVarP(&yesFlag, "yes", "y", false, "Skip confirmation prompts")
|
||||
rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Verbose output")
|
||||
rootCmd.PersistentFlags().StringVar(&logFile, "log-file", "", "Write logs to file")
|
||||
|
||||
// Recover command flags
|
||||
recoverCmd.Flags().StringVar(&recoverHost, "host", "", "Host to recover (e.g., proton, elektron)")
|
||||
recoverCmd.Flags().StringVar(&recoverSource, "source", "", "Backup source: local, hetzner")
|
||||
recoverCmd.Flags().StringVar(&recoverTarget, "target", "", "Target cloud: exoscale, cloudscale, hetzner")
|
||||
recoverCmd.Flags().StringVar(&recoverFlavor, "flavor", "", "VM flavor/size (default: auto-detect)")
|
||||
recoverCmd.Flags().StringVar(&recoverZone, "zone", "", "Target zone (default: provider default)")
|
||||
recoverCmd.Flags().StringVar(&recoverImage, "image", "ubuntu-24.04", "Base image")
|
||||
recoverCmd.Flags().StringVar(&recoverSSHKey, "ssh-key", "", "Path to SSH private key (default: generate ephemeral)")
|
||||
recoverCmd.Flags().BoolVar(&recoverRestoreHostKeys, "restore-host-keys", false, "Restore original SSH host keys (security warning)")
|
||||
recoverCmd.Flags().BoolVar(&recoverSkipDocker, "skip-docker", false, "Skip Docker service restoration")
|
||||
recoverCmd.Flags().BoolVar(&recoverSkipWireGuard, "skip-wireguard", false, "Skip WireGuard restoration")
|
||||
recoverCmd.MarkFlagRequired("host")
|
||||
recoverCmd.MarkFlagRequired("source")
|
||||
recoverCmd.MarkFlagRequired("target")
|
||||
|
||||
// Migrate DNS command flags
|
||||
migrateDNSCmd.Flags().StringVar(&migrateDNSHost, "host", "", "Host whose DNS to migrate")
|
||||
migrateDNSCmd.Flags().StringVar(&migrateDNSNewIP, "new-ip", "", "New IP address for A record")
|
||||
migrateDNSCmd.Flags().StringVar(&migrateDNSOldIP, "old-ip", "", "Expected current IP (safety check)")
|
||||
migrateDNSCmd.Flags().IntVar(&migrateDNSTTL, "ttl", 300, "TTL for new records")
|
||||
migrateDNSCmd.Flags().BoolVar(&migrateDNSSkipHealthCheck, "skip-health-check", false, "Skip health checks (DANGEROUS!)")
|
||||
migrateDNSCmd.Flags().BoolVar(&migrateDNSRollbackOnFail, "rollback-on-fail", true, "Auto-rollback if post-migration checks fail")
|
||||
migrateDNSCmd.MarkFlagRequired("host")
|
||||
migrateDNSCmd.MarkFlagRequired("new-ip")
|
||||
|
||||
// List command flags
|
||||
listBackupsCmd.Flags().StringVar(&listSource, "source", "local", "Backup source: local, hetzner")
|
||||
listFlavorsCmd.Flags().StringVar(&listProvider, "provider", "", "Provider name")
|
||||
listFlavorsCmd.MarkFlagRequired("provider")
|
||||
listImagesCmd.Flags().StringVar(&listProvider, "provider", "", "Provider name")
|
||||
listImagesCmd.MarkFlagRequired("provider")
|
||||
|
||||
// Cleanup command flags
|
||||
cleanupCmd.Flags().StringVar(&cleanupVMID, "vm-id", "", "VM ID to delete")
|
||||
cleanupCmd.Flags().StringVar(&cleanupProvider, "provider", "", "Provider name")
|
||||
cleanupCmd.MarkFlagRequired("vm-id")
|
||||
cleanupCmd.MarkFlagRequired("provider")
|
||||
|
||||
// Build command tree
|
||||
listCmd.AddCommand(listBackupsCmd)
|
||||
listCmd.AddCommand(listProvidersCmd)
|
||||
listCmd.AddCommand(listFlavorsCmd)
|
||||
listCmd.AddCommand(listImagesCmd)
|
||||
listCmd.AddCommand(listDNSZonesCmd)
|
||||
|
||||
rootCmd.AddCommand(recoverCmd)
|
||||
rootCmd.AddCommand(migrateDNSCmd)
|
||||
rootCmd.AddCommand(listCmd)
|
||||
rootCmd.AddCommand(cleanupCmd)
|
||||
}
|
||||
|
||||
// Command implementations
|
||||
|
||||
func runRecover(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
cfg := config.Get()
|
||||
dryRunTracker := ui.NewDryRun(!yesFlag)
|
||||
|
||||
// Show recovery plan
|
||||
ui.PrintInfo("Recovery Plan:")
|
||||
fmt.Printf(" Host: %s\n", recoverHost)
|
||||
fmt.Printf(" Source: %s\n", recoverSource)
|
||||
fmt.Printf(" Target: %s\n", recoverTarget)
|
||||
fmt.Printf(" Flavor: %s\n", recoverFlavor)
|
||||
fmt.Printf(" Image: %s\n", recoverImage)
|
||||
fmt.Printf(" Zone: %s\n", recoverZone)
|
||||
|
||||
// Check if this is dry-run
|
||||
if !yesFlag {
|
||||
dryRunTracker.AddOperation("VM", "Create", fmt.Sprintf("Create VM on %s", recoverTarget))
|
||||
dryRunTracker.AddOperation("SSH", "Wait", "Wait for SSH availability")
|
||||
dryRunTracker.AddOperation("Backup", "Sync", fmt.Sprintf("Rsync data from %s", recoverSource))
|
||||
dryRunTracker.AddOperation("Restore", "Execute", "Run restore pipeline")
|
||||
dryRunTracker.AddOperation("Health", "Check", "Verify service health")
|
||||
dryRunTracker.Print()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ask for confirmation
|
||||
if !ui.ConfirmRecovery(recoverHost, recoverSource, recoverTarget) {
|
||||
ui.PrintWarning("Recovery cancelled")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Step 1: Create backup source
|
||||
ui.PrintInfo("Creating backup source...")
|
||||
var backupSrc backup.BackupSource
|
||||
var err error
|
||||
|
||||
switch recoverSource {
|
||||
case "local":
|
||||
backupSrc = backup.NewLocalSource(cfg.LocalBackupPath)
|
||||
case "hetzner":
|
||||
backupSrc = backup.NewHetznerStorageSource(
|
||||
cfg.HetznerStorageBoxUser,
|
||||
cfg.HetznerStorageBoxHost,
|
||||
)
|
||||
default:
|
||||
return fmt.Errorf("unknown backup source: %s", recoverSource)
|
||||
}
|
||||
|
||||
if err := backupSrc.Validate(ctx); err != nil {
|
||||
return fmt.Errorf("backup source validation failed: %w", err)
|
||||
}
|
||||
ui.PrintSuccess("Backup source ready: %s", backupSrc.Name())
|
||||
|
||||
// Step 2: Create cloud provider
|
||||
ui.PrintInfo("Initializing cloud provider...")
|
||||
var provider providers.CloudProvider
|
||||
|
||||
switch recoverTarget {
|
||||
case "exoscale":
|
||||
provider, err = providers.NewExoscaleProvider(cfg.ExoscaleAPIKey, cfg.ExoscaleAPISecret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Exoscale provider: %w", err)
|
||||
}
|
||||
case "cloudscale":
|
||||
provider, err = providers.NewCloudscaleProvider(cfg.CloudscaleAPIToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Cloudscale provider: %w", err)
|
||||
}
|
||||
case "hetzner":
|
||||
provider, err = providers.NewHetznerProvider(cfg.HetznerAPIKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Hetzner provider: %w", err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported target cloud: %s", recoverTarget)
|
||||
}
|
||||
ui.PrintSuccess("Cloud provider ready: %s", provider.Name())
|
||||
|
||||
// Step 3: Generate ephemeral SSH key
|
||||
ui.PrintInfo("Generating ephemeral SSH key...")
|
||||
keyPair, err := restore.GenerateEphemeralKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate SSH key: %w", err)
|
||||
}
|
||||
defer keyPair.Cleanup()
|
||||
ui.PrintSuccess("SSH key generated: %s", keyPair.PrivateKeyPath)
|
||||
|
||||
// Step 4: Create VM
|
||||
ui.PrintInfo("Creating VM...")
|
||||
spinner := ui.NewSpinner("Creating VM on " + recoverTarget)
|
||||
spinner.Start()
|
||||
|
||||
vmOpts := providers.VMOptions{
|
||||
Name: recoverHost + "-recovery",
|
||||
Zone: recoverZone,
|
||||
Flavor: recoverFlavor,
|
||||
Image: recoverImage,
|
||||
SSHPublicKey: keyPair.PublicKey,
|
||||
UserData: providers.GenerateCloudInit(keyPair.PublicKey),
|
||||
DiskSizeGB: 50,
|
||||
Tags: map[string]string{
|
||||
"recovery": "true",
|
||||
"host": recoverHost,
|
||||
},
|
||||
}
|
||||
|
||||
vm, err := provider.CreateVM(ctx, vmOpts)
|
||||
if err != nil {
|
||||
spinner.Stop()
|
||||
return fmt.Errorf("failed to create VM: %w", err)
|
||||
}
|
||||
spinner.StopWithMessage(fmt.Sprintf("✓ VM created: %s (IP: %s)", vm.ID, vm.PublicIP))
|
||||
|
||||
// Step 5: Wait for SSH
|
||||
ui.PrintInfo("Waiting for SSH to become available...")
|
||||
spinner = ui.NewSpinner("Connecting to " + vm.PublicIP)
|
||||
spinner.Start()
|
||||
|
||||
sshCtx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
if err := provider.WaitForSSH(sshCtx, vm.PublicIP, 22, 5*time.Minute); err != nil {
|
||||
spinner.Stop()
|
||||
ui.PrintError("SSH timeout. VM ID: %s", vm.ID)
|
||||
return fmt.Errorf("SSH connection failed: %w", err)
|
||||
}
|
||||
spinner.StopWithMessage("✓ SSH ready")
|
||||
|
||||
// Step 6: Run restore pipeline
|
||||
ui.PrintInfo("Starting restore pipeline...")
|
||||
pipeline := restore.NewPipeline(vm, backupSrc, recoverHost, keyPair.PrivateKeyPath)
|
||||
pipeline.Verbose = verbose
|
||||
|
||||
if err := pipeline.Run(ctx); err != nil {
|
||||
ui.PrintError("Restore pipeline failed: %v", err)
|
||||
ui.PrintInfo("VM ID: %s (IP: %s)", vm.ID, vm.PublicIP)
|
||||
ui.PrintInfo("SSH: ssh -i %s root@%s", keyPair.PrivateKeyPath, vm.PublicIP)
|
||||
return err
|
||||
}
|
||||
|
||||
// Step 7: Output results
|
||||
ui.PrintSuccess("Recovery complete!")
|
||||
fmt.Printf("\n=== Recovery Summary ===\n")
|
||||
fmt.Printf("VM ID: %s\n", vm.ID)
|
||||
fmt.Printf("Public IP: %s\n", vm.PublicIP)
|
||||
fmt.Printf("SSH Key: %s\n", keyPair.PrivateKeyPath)
|
||||
fmt.Printf("SSH: ssh -i %s root@%s\n", keyPair.PrivateKeyPath, vm.PublicIP)
|
||||
fmt.Printf("\nNext steps:\n")
|
||||
fmt.Printf(" 1. Verify services: ssh -i %s root@%s\n", keyPair.PrivateKeyPath, vm.PublicIP)
|
||||
fmt.Printf(" 2. Test connectivity and functionality\n")
|
||||
fmt.Printf(" 3. Migrate DNS: %s migrate-dns --host %s --new-ip %s --old-ip <current-ip>\n",
|
||||
os.Args[0], recoverHost, vm.PublicIP)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runMigrateDNS(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
cfg := config.Get()
|
||||
dryRunTracker := ui.NewDryRun(!yesFlag)
|
||||
|
||||
// Show migration plan
|
||||
ui.PrintInfo("DNS Migration Plan:")
|
||||
fmt.Printf(" Host: %s\n", migrateDNSHost)
|
||||
fmt.Printf(" New IP: %s\n", migrateDNSNewIP)
|
||||
if migrateDNSOldIP != "" {
|
||||
fmt.Printf(" Old IP: %s\n", migrateDNSOldIP)
|
||||
}
|
||||
fmt.Printf(" TTL: %d\n", migrateDNSTTL)
|
||||
|
||||
// Check if this is dry-run
|
||||
if !yesFlag {
|
||||
dryRunTracker.AddOperation("Health", "Check", "Verify new VM is accessible on port 22 and 443")
|
||||
dryRunTracker.AddOperation("DNS", "Verify", "Check current DNS record")
|
||||
dryRunTracker.AddOperation("DNS", "Update", fmt.Sprintf("Update %s to point to %s", migrateDNSHost, migrateDNSNewIP))
|
||||
dryRunTracker.AddOperation("DNS", "Verify", "Verify DNS update succeeded")
|
||||
dryRunTracker.Print()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ask for hostname confirmation
|
||||
if !ui.ConfirmHostname(migrateDNSHost) {
|
||||
ui.PrintWarning("DNS migration cancelled")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create DNS client
|
||||
dnsClient, err := dns.NewExoscaleDNS(cfg.ExoscaleAPIKey, cfg.ExoscaleAPISecret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DNS client: %w", err)
|
||||
}
|
||||
migrator := dns.NewMigrator(dnsClient, verbose)
|
||||
|
||||
// Parse zone from hostname
|
||||
_, zone := parseHostnameForDNS(migrateDNSHost)
|
||||
|
||||
// Prepare migration request
|
||||
req := dns.MigrationRequest{
|
||||
Hostname: migrateDNSHost,
|
||||
OldIP: migrateDNSOldIP,
|
||||
NewIP: migrateDNSNewIP,
|
||||
Zone: zone,
|
||||
DryRun: false,
|
||||
}
|
||||
|
||||
// Execute migration
|
||||
ui.PrintInfo("Performing DNS migration...")
|
||||
result, err := migrator.Migrate(ctx, req)
|
||||
if err != nil {
|
||||
if result != nil && result.RolledBack {
|
||||
ui.PrintError("Migration failed and was rolled back: %v", err)
|
||||
} else {
|
||||
ui.PrintError("Migration failed: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Success
|
||||
ui.PrintSuccess(result.Message)
|
||||
fmt.Printf("\nDNS Record Updated:\n")
|
||||
fmt.Printf(" Type: A\n")
|
||||
fmt.Printf(" Name: %s\n", migrateDNSHost)
|
||||
fmt.Printf(" Old IP: %s\n", result.OldRecord.Content)
|
||||
fmt.Printf(" New IP: %s\n", result.NewRecord.Content)
|
||||
fmt.Printf(" TTL: %d\n", result.NewRecord.TTL)
|
||||
fmt.Printf("\nNote: DNS propagation may take up to %d seconds\n", result.NewRecord.TTL)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseHostnameForDNS extracts the zone from a hostname
|
||||
func parseHostnameForDNS(hostname string) (subdomain, zone string) {
|
||||
knownZones := []string{
|
||||
"obr.sh", "obnh.io", "obnh.network", "obnh.org",
|
||||
"obr.digital", "obr.im", "s-n-r.net", "as60284.net", "baumert.cc",
|
||||
}
|
||||
|
||||
for _, z := range knownZones {
|
||||
if len(hostname) > len(z) && hostname[len(hostname)-len(z)-1:] == "."+z {
|
||||
subdomain = hostname[:len(hostname)-len(z)-1]
|
||||
return subdomain, z
|
||||
}
|
||||
}
|
||||
|
||||
// Default: assume last two parts
|
||||
parts := []rune(hostname)
|
||||
dotCount := 0
|
||||
for i := len(parts) - 1; i >= 0; i-- {
|
||||
if parts[i] == '.' {
|
||||
dotCount++
|
||||
if dotCount == 1 {
|
||||
subdomain = string(parts[:i])
|
||||
zone = string(parts[i+1:])
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", hostname
|
||||
}
|
||||
|
||||
func runListBackups(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
cfg := config.Get()
|
||||
|
||||
// Create backup source
|
||||
var backupSrc backup.BackupSource
|
||||
switch listSource {
|
||||
case "local":
|
||||
backupSrc = backup.NewLocalSource(cfg.LocalBackupPath)
|
||||
case "hetzner":
|
||||
backupSrc = backup.NewHetznerStorageSource(
|
||||
cfg.HetznerStorageBoxUser,
|
||||
cfg.HetznerStorageBoxHost,
|
||||
)
|
||||
default:
|
||||
return fmt.Errorf("unknown backup source: %s", listSource)
|
||||
}
|
||||
|
||||
// List all backups
|
||||
backups, err := backupSrc.List(ctx, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list backups: %w", err)
|
||||
}
|
||||
|
||||
if len(backups) == 0 {
|
||||
ui.PrintWarning("No backups found on %s", listSource)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Format as table
|
||||
headers := []string{"Host", "Source", "Timestamp", "Size", "Directories"}
|
||||
rows := make([][]string, 0)
|
||||
|
||||
for _, b := range backups {
|
||||
dirs := ""
|
||||
if b.HasRoot {
|
||||
dirs += "root "
|
||||
}
|
||||
if b.HasOpt {
|
||||
dirs += "opt "
|
||||
}
|
||||
if b.HasEtc {
|
||||
dirs += "etc "
|
||||
}
|
||||
|
||||
size := formatSize(b.SizeBytes)
|
||||
timestamp := b.Timestamp.Format("2006-01-02 15:04")
|
||||
|
||||
rows = append(rows, []string{
|
||||
b.Host,
|
||||
b.Source,
|
||||
timestamp,
|
||||
size,
|
||||
dirs,
|
||||
})
|
||||
}
|
||||
|
||||
ui.PrintInfo("Available Backups:")
|
||||
ui.TablePrint(headers, rows)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func formatSize(bytes int64) string {
|
||||
const unit = 1024
|
||||
if bytes < unit {
|
||||
return fmt.Sprintf("%d B", bytes)
|
||||
}
|
||||
div, exp := int64(unit), 0
|
||||
for n := bytes / unit; n >= unit; n /= unit {
|
||||
div *= unit
|
||||
exp++
|
||||
}
|
||||
return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp])
|
||||
}
|
||||
|
||||
func runListProviders(cmd *cobra.Command, args []string) error {
|
||||
cfg := config.Get()
|
||||
fmt.Println("Configured providers:")
|
||||
if cfg.ExoscaleAPIKey != "" {
|
||||
fmt.Println(" - exoscale (configured)")
|
||||
}
|
||||
if cfg.CloudscaleAPIToken != "" {
|
||||
fmt.Println(" - cloudscale (configured)")
|
||||
}
|
||||
if cfg.HetznerAPIKey != "" {
|
||||
fmt.Println(" - hetzner (configured)")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func runListFlavors(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
cfg := config.Get()
|
||||
|
||||
// Create provider
|
||||
var provider providers.CloudProvider
|
||||
var err error
|
||||
|
||||
switch listProvider {
|
||||
case "exoscale":
|
||||
provider, err = providers.NewExoscaleProvider(cfg.ExoscaleAPIKey, cfg.ExoscaleAPISecret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Exoscale provider: %w", err)
|
||||
}
|
||||
case "cloudscale":
|
||||
provider, err = providers.NewCloudscaleProvider(cfg.CloudscaleAPIToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Cloudscale provider: %w", err)
|
||||
}
|
||||
case "hetzner":
|
||||
provider, err = providers.NewHetznerProvider(cfg.HetznerAPIKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Hetzner provider: %w", err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported provider: %s", listProvider)
|
||||
}
|
||||
|
||||
// List flavors
|
||||
flavors, err := provider.ListFlavors(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list flavors: %w", err)
|
||||
}
|
||||
|
||||
if len(flavors) == 0 {
|
||||
ui.PrintWarning("No flavors found for %s", listProvider)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Format as table
|
||||
headers := []string{"ID", "Name", "CPUs", "Memory", "Disk"}
|
||||
rows := make([][]string, 0)
|
||||
|
||||
for _, f := range flavors {
|
||||
disk := "-"
|
||||
if f.Disk > 0 {
|
||||
disk = fmt.Sprintf("%d GB", f.Disk)
|
||||
}
|
||||
rows = append(rows, []string{
|
||||
f.ID,
|
||||
f.Name,
|
||||
fmt.Sprintf("%d", f.CPUs),
|
||||
fmt.Sprintf("%d MB", f.Memory),
|
||||
disk,
|
||||
})
|
||||
}
|
||||
|
||||
ui.PrintInfo("Available Flavors for %s:", listProvider)
|
||||
ui.TablePrint(headers, rows)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runListImages(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
cfg := config.Get()
|
||||
|
||||
// Create provider
|
||||
var provider providers.CloudProvider
|
||||
var err error
|
||||
|
||||
switch listProvider {
|
||||
case "exoscale":
|
||||
provider, err = providers.NewExoscaleProvider(cfg.ExoscaleAPIKey, cfg.ExoscaleAPISecret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Exoscale provider: %w", err)
|
||||
}
|
||||
case "cloudscale":
|
||||
provider, err = providers.NewCloudscaleProvider(cfg.CloudscaleAPIToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Cloudscale provider: %w", err)
|
||||
}
|
||||
case "hetzner":
|
||||
provider, err = providers.NewHetznerProvider(cfg.HetznerAPIKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Hetzner provider: %w", err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported provider: %s", listProvider)
|
||||
}
|
||||
|
||||
// List images
|
||||
images, err := provider.ListImages(ctx, "ubuntu")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list images: %w", err)
|
||||
}
|
||||
|
||||
if len(images) == 0 {
|
||||
ui.PrintWarning("No images found for %s", listProvider)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Format as table
|
||||
headers := []string{"ID", "Name"}
|
||||
rows := make([][]string, 0)
|
||||
|
||||
for _, img := range images {
|
||||
rows = append(rows, []string{
|
||||
img.ID,
|
||||
img.Name,
|
||||
})
|
||||
}
|
||||
|
||||
ui.PrintInfo("Available Images for %s:", listProvider)
|
||||
ui.TablePrint(headers, rows)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runListDNSZones(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
cfg := config.Get()
|
||||
|
||||
// Create DNS client
|
||||
dnsClient, err := dns.NewExoscaleDNS(cfg.ExoscaleAPIKey, cfg.ExoscaleAPISecret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DNS client: %w", err)
|
||||
}
|
||||
|
||||
// List zones
|
||||
zones, err := dnsClient.ListZones(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list DNS zones: %w", err)
|
||||
}
|
||||
|
||||
if len(zones) == 0 {
|
||||
ui.PrintWarning("No DNS zones found")
|
||||
return nil
|
||||
}
|
||||
|
||||
ui.PrintInfo("DNS Zones managed via Exoscale:")
|
||||
for _, zone := range zones {
|
||||
fmt.Printf(" - %s\n", zone)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runCleanup(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
cfg := config.Get()
|
||||
|
||||
ui.PrintInfo("Cleanup Plan:")
|
||||
fmt.Printf(" Provider: %s\n", cleanupProvider)
|
||||
fmt.Printf(" VM ID: %s\n", cleanupVMID)
|
||||
|
||||
// Confirm deletion
|
||||
if !yesFlag {
|
||||
if !ui.ConfirmAction(fmt.Sprintf("Delete VM %s on %s?", cleanupVMID, cleanupProvider)) {
|
||||
ui.PrintWarning("Cleanup cancelled")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Create provider
|
||||
var provider providers.CloudProvider
|
||||
var err error
|
||||
|
||||
switch cleanupProvider {
|
||||
case "exoscale":
|
||||
provider, err = providers.NewExoscaleProvider(cfg.ExoscaleAPIKey, cfg.ExoscaleAPISecret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Exoscale provider: %w", err)
|
||||
}
|
||||
case "cloudscale":
|
||||
provider, err = providers.NewCloudscaleProvider(cfg.CloudscaleAPIToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Cloudscale provider: %w", err)
|
||||
}
|
||||
case "hetzner":
|
||||
provider, err = providers.NewHetznerProvider(cfg.HetznerAPIKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Hetzner provider: %w", err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported provider: %s", cleanupProvider)
|
||||
}
|
||||
|
||||
// Delete VM
|
||||
ui.PrintInfo("Deleting VM %s...", cleanupVMID)
|
||||
if err := provider.DeleteVM(ctx, cleanupVMID); err != nil {
|
||||
return fmt.Errorf("failed to delete VM: %w", err)
|
||||
}
|
||||
|
||||
ui.PrintSuccess("VM %s deleted successfully", cleanupVMID)
|
||||
return nil
|
||||
}
|
||||
56
go.mod
Normal file
56
go.mod
Normal file
@@ -0,0 +1,56 @@
|
||||
module recover-server
|
||||
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.11
|
||||
|
||||
require (
|
||||
github.com/cloudscale-ch/cloudscale-go-sdk/v6 v6.0.0
|
||||
github.com/exoscale/egoscale/v3 v3.1.31
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.32.0
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/spf13/cobra v1.8.0
|
||||
golang.org/x/crypto v0.45.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-playground/locales v0.14.0 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.0 // indirect
|
||||
github.com/go-playground/validator/v10 v10.9.0 // indirect
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
|
||||
github.com/prometheus/client_golang v1.23.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.66.1 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.6.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.18.2 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
google.golang.org/protobuf v1.36.8 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
163
go.sum
Normal file
163
go.sum
Normal file
@@ -0,0 +1,163 @@
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cloudscale-ch/cloudscale-go-sdk/v6 v6.0.0 h1:lIVkmacVa4GogQ17dtrTEh/ph+k8gH2bsQcfJu/Tk0s=
|
||||
github.com/cloudscale-ch/cloudscale-go-sdk/v6 v6.0.0/go.mod h1:agOnbZIZJUfW4V/4s5wYX7IoXoixGxXzQhW30+fpGPU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/exoscale/egoscale/v3 v3.1.31 h1:/dySEUSAxU+hlAS/eLxAoY8ZYmtOtaoL1P+lDwH7ojY=
|
||||
github.com/exoscale/egoscale/v3 v3.1.31/go.mod h1:0iY8OxgHJCS5TKqDNhwOW95JBKCnBZl3YGU4Yt+NqkU=
|
||||
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
|
||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU=
|
||||
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
|
||||
github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho=
|
||||
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
|
||||
github.com/go-playground/validator/v10 v10.9.0 h1:NgTtmN58D0m8+UuxtYmGztBJB7VnPgjj221I1QHci2A=
|
||||
github.com/go-playground/validator/v10 v10.9.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
|
||||
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
|
||||
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.32.0 h1:BRe+k7ESdYv3xQLBGdKUfk+XBFRJNGKzq70nJI24ciM=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.32.0/go.mod h1:hAanyyfn9M0cMmZ68CXzPCF54KRb9EXd8eiE2FHKGIE=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
||||
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
|
||||
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
|
||||
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
||||
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
||||
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
|
||||
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
|
||||
github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE=
|
||||
golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
|
||||
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
200
internal/backup/hetzner_storage.go
Normal file
200
internal/backup/hetzner_storage.go
Normal file
@@ -0,0 +1,200 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HetznerStorageSource implements BackupSource for Hetzner Storage Box
|
||||
type HetznerStorageSource struct {
|
||||
User string // e.g., u480813
|
||||
Host string // e.g., u480813.your-storagebox.de
|
||||
BasePath string // e.g., /backups
|
||||
Port int // SSH port (default 23 for sftp, 22 for ssh)
|
||||
}
|
||||
|
||||
// NewHetznerStorageSource creates a new Hetzner Storage Box source
|
||||
func NewHetznerStorageSource(user, host string) *HetznerStorageSource {
|
||||
if user == "" {
|
||||
user = "u480813"
|
||||
}
|
||||
if host == "" {
|
||||
host = "u480813.your-storagebox.de"
|
||||
}
|
||||
return &HetznerStorageSource{
|
||||
User: user,
|
||||
Host: host,
|
||||
BasePath: "/backups",
|
||||
Port: 23, // Hetzner uses port 23 for SFTP
|
||||
}
|
||||
}
|
||||
|
||||
func (s *HetznerStorageSource) Name() string {
|
||||
return "hetzner"
|
||||
}
|
||||
|
||||
func (s *HetznerStorageSource) sshAddress() string {
|
||||
return fmt.Sprintf("%s@%s", s.User, s.Host)
|
||||
}
|
||||
|
||||
func (s *HetznerStorageSource) List(ctx context.Context, host string) ([]BackupInfo, error) {
|
||||
var backups []BackupInfo
|
||||
|
||||
// Use sftp to list directories
|
||||
path := s.BasePath
|
||||
if host != "" {
|
||||
path = fmt.Sprintf("%s/%s", s.BasePath, host)
|
||||
}
|
||||
|
||||
// Run sftp ls command
|
||||
cmd := exec.CommandContext(ctx, "sftp", "-P", fmt.Sprintf("%d", s.Port), "-o", "BatchMode=yes", s.sshAddress())
|
||||
cmd.Stdin = strings.NewReader(fmt.Sprintf("ls -la %s\nquit\n", path))
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("sftp failed: %w: %s", err, stderr.String())
|
||||
}
|
||||
|
||||
// Parse output
|
||||
lines := strings.Split(stdout.String(), "\n")
|
||||
for _, line := range lines {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 9 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip . and ..
|
||||
name := fields[len(fields)-1]
|
||||
if name == "." || name == ".." {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if it's a directory
|
||||
if !strings.HasPrefix(fields[0], "d") {
|
||||
continue
|
||||
}
|
||||
|
||||
hostName := name
|
||||
if host != "" {
|
||||
// We're looking at subdirs of a specific host
|
||||
continue
|
||||
}
|
||||
|
||||
info := &BackupInfo{
|
||||
Host: hostName,
|
||||
Source: "hetzner",
|
||||
Path: fmt.Sprintf("%s/%s", s.BasePath, hostName),
|
||||
Timestamp: time.Now(), // Would need additional sftp commands for real timestamp
|
||||
}
|
||||
|
||||
// Check for subdirectories (simplified - assume all exist)
|
||||
info.HasRoot = true
|
||||
info.HasOpt = true
|
||||
info.HasEtc = true
|
||||
|
||||
backups = append(backups, *info)
|
||||
}
|
||||
|
||||
return backups, nil
|
||||
}
|
||||
|
||||
func (s *HetznerStorageSource) SyncTo(ctx context.Context, host string, targetSSH string, sshKeyPath string, dirs []string) error {
|
||||
// For Hetzner Storage Box, we need to rsync from storage box to target
|
||||
// This requires the target VM to pull from storage box
|
||||
// OR we rsync storage->local->target (two-hop)
|
||||
|
||||
// Option 1: Direct rsync from storage box (requires storage box SSH access from target)
|
||||
// Option 2: Two-hop: storage -> local staging -> target
|
||||
|
||||
// We'll use Option 2 for reliability (target may not have storage box access)
|
||||
stagingDir := fmt.Sprintf("/tmp/restore-staging-%s", host)
|
||||
if err := os.MkdirAll(stagingDir, 0700); err != nil {
|
||||
return fmt.Errorf("failed to create staging dir: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(stagingDir)
|
||||
|
||||
srcPath := fmt.Sprintf("%s:%s/%s/", s.sshAddress(), s.BasePath, host)
|
||||
|
||||
for _, dir := range dirs {
|
||||
// Step 1: Rsync from Hetzner to local staging
|
||||
localStaging := fmt.Sprintf("%s/%s/", stagingDir, dir)
|
||||
if err := os.MkdirAll(localStaging, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pullArgs := []string{
|
||||
"-avz",
|
||||
"--progress",
|
||||
"-e", fmt.Sprintf("ssh -p %d -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null", s.Port),
|
||||
fmt.Sprintf("%s%s/", srcPath, dir),
|
||||
localStaging,
|
||||
}
|
||||
|
||||
pullCmd := exec.CommandContext(ctx, "rsync", pullArgs...)
|
||||
pullCmd.Stdout = os.Stdout
|
||||
pullCmd.Stderr = os.Stderr
|
||||
|
||||
if err := pullCmd.Run(); err != nil {
|
||||
// Directory might not exist, continue
|
||||
continue
|
||||
}
|
||||
|
||||
// Step 2: Rsync from local staging to target
|
||||
var targetPath string
|
||||
switch dir {
|
||||
case "root":
|
||||
targetPath = "/root/"
|
||||
case "opt":
|
||||
targetPath = "/opt/"
|
||||
case "etc":
|
||||
targetPath = "/srv/restore/etc/"
|
||||
default:
|
||||
targetPath = "/" + dir + "/"
|
||||
}
|
||||
|
||||
pushArgs := []string{
|
||||
"-avz",
|
||||
"--progress",
|
||||
"-e", fmt.Sprintf("ssh -i %s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null", sshKeyPath),
|
||||
localStaging,
|
||||
fmt.Sprintf("%s:%s", targetSSH, targetPath),
|
||||
}
|
||||
|
||||
pushCmd := exec.CommandContext(ctx, "rsync", pushArgs...)
|
||||
pushCmd.Stdout = os.Stdout
|
||||
pushCmd.Stderr = os.Stderr
|
||||
|
||||
if err := pushCmd.Run(); err != nil {
|
||||
return fmt.Errorf("rsync to target failed for %s: %w", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *HetznerStorageSource) GetPath(host string) string {
|
||||
return fmt.Sprintf("%s@%s:%s/%s", s.User, s.Host, s.BasePath, host)
|
||||
}
|
||||
|
||||
func (s *HetznerStorageSource) Validate(ctx context.Context) error {
|
||||
// Test SFTP connection
|
||||
cmd := exec.CommandContext(ctx, "sftp", "-P", fmt.Sprintf("%d", s.Port), "-o", "BatchMode=yes", "-o", "ConnectTimeout=10", s.sshAddress())
|
||||
cmd.Stdin = strings.NewReader("ls\nquit\n")
|
||||
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("cannot connect to Hetzner Storage Box: %w: %s", err, stderr.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
169
internal/backup/local.go
Normal file
169
internal/backup/local.go
Normal file
@@ -0,0 +1,169 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// LocalSource implements BackupSource for local filesystem
|
||||
type LocalSource struct {
|
||||
BasePath string // e.g., /srv/backups
|
||||
}
|
||||
|
||||
// NewLocalSource creates a new local backup source
|
||||
func NewLocalSource(basePath string) *LocalSource {
|
||||
if basePath == "" {
|
||||
basePath = "/srv/backups"
|
||||
}
|
||||
return &LocalSource{BasePath: basePath}
|
||||
}
|
||||
|
||||
func (s *LocalSource) Name() string {
|
||||
return "local"
|
||||
}
|
||||
|
||||
func (s *LocalSource) List(ctx context.Context, host string) ([]BackupInfo, error) {
|
||||
var backups []BackupInfo
|
||||
|
||||
if host != "" {
|
||||
// List specific host
|
||||
info, err := s.getBackupInfo(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info != nil {
|
||||
backups = append(backups, *info)
|
||||
}
|
||||
} else {
|
||||
// List all hosts
|
||||
entries, err := os.ReadDir(s.BasePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read backup directory: %w", err)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
info, err := s.getBackupInfo(entry.Name())
|
||||
if err != nil {
|
||||
continue // Skip invalid backups
|
||||
}
|
||||
if info != nil {
|
||||
backups = append(backups, *info)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return backups, nil
|
||||
}
|
||||
|
||||
func (s *LocalSource) getBackupInfo(host string) (*BackupInfo, error) {
|
||||
hostPath := filepath.Join(s.BasePath, host)
|
||||
|
||||
stat, err := os.Stat(hostPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := &BackupInfo{
|
||||
Host: host,
|
||||
Source: "local",
|
||||
Path: hostPath,
|
||||
Timestamp: stat.ModTime(),
|
||||
}
|
||||
|
||||
// Check for subdirectories
|
||||
for _, dir := range SupportedDirs {
|
||||
dirPath := filepath.Join(hostPath, dir)
|
||||
if _, err := os.Stat(dirPath); err == nil {
|
||||
switch dir {
|
||||
case "root":
|
||||
info.HasRoot = true
|
||||
case "opt":
|
||||
info.HasOpt = true
|
||||
case "etc":
|
||||
info.HasEtc = true
|
||||
}
|
||||
|
||||
// Add directory size
|
||||
size, _ := dirSize(dirPath)
|
||||
info.SizeBytes += size
|
||||
}
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (s *LocalSource) SyncTo(ctx context.Context, host string, targetSSH string, sshKeyPath string, dirs []string) error {
|
||||
hostPath := filepath.Join(s.BasePath, host)
|
||||
|
||||
for _, dir := range dirs {
|
||||
srcPath := filepath.Join(hostPath, dir) + "/"
|
||||
|
||||
// Check source exists
|
||||
if _, err := os.Stat(srcPath); os.IsNotExist(err) {
|
||||
continue // Skip missing directories
|
||||
}
|
||||
|
||||
// Determine target path
|
||||
var targetPath string
|
||||
switch dir {
|
||||
case "root":
|
||||
targetPath = "/root/"
|
||||
case "opt":
|
||||
targetPath = "/opt/"
|
||||
case "etc":
|
||||
targetPath = "/srv/restore/etc/" // Stage /etc, don't overwrite directly
|
||||
default:
|
||||
targetPath = "/" + dir + "/"
|
||||
}
|
||||
|
||||
// Build rsync command
|
||||
args := []string{
|
||||
"-avz",
|
||||
"--progress",
|
||||
"-e", fmt.Sprintf("ssh -i %s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null", sshKeyPath),
|
||||
srcPath,
|
||||
fmt.Sprintf("%s:%s", targetSSH, targetPath),
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "rsync", args...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("rsync failed for %s: %w", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *LocalSource) GetPath(host string) string {
|
||||
return filepath.Join(s.BasePath, host)
|
||||
}
|
||||
|
||||
func (s *LocalSource) Validate(ctx context.Context) error {
|
||||
if _, err := os.Stat(s.BasePath); err != nil {
|
||||
return fmt.Errorf("backup path not accessible: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dirSize calculates the total size of a directory
|
||||
func dirSize(path string) (int64, error) {
|
||||
var size int64
|
||||
err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
size += info.Size()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return size, err
|
||||
}
|
||||
40
internal/backup/source.go
Normal file
40
internal/backup/source.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BackupInfo contains metadata about a backup
|
||||
type BackupInfo struct {
|
||||
Host string
|
||||
Source string // "local" or "hetzner"
|
||||
Path string // Full path to backup
|
||||
Timestamp time.Time // Last modified time
|
||||
SizeBytes int64 // Total size
|
||||
HasRoot bool // Has /root backup
|
||||
HasOpt bool // Has /opt backup
|
||||
HasEtc bool // Has /etc backup
|
||||
}
|
||||
|
||||
// BackupSource defines the interface for backup sources
|
||||
type BackupSource interface {
|
||||
// Name returns the source name
|
||||
Name() string
|
||||
|
||||
// List returns available backups for a host (or all hosts if empty)
|
||||
List(ctx context.Context, host string) ([]BackupInfo, error)
|
||||
|
||||
// SyncTo syncs backup data to target VM via rsync over SSH
|
||||
// targetSSH is user@host, sshKeyPath is path to private key
|
||||
SyncTo(ctx context.Context, host string, targetSSH string, sshKeyPath string, dirs []string) error
|
||||
|
||||
// GetPath returns the base path for a host's backup
|
||||
GetPath(host string) string
|
||||
|
||||
// Validate checks if backup source is accessible
|
||||
Validate(ctx context.Context) error
|
||||
}
|
||||
|
||||
// SupportedDirs lists directories that can be restored
|
||||
var SupportedDirs = []string{"root", "opt", "etc"}
|
||||
88
internal/config/config.go
Normal file
88
internal/config/config.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/joho/godotenv"
|
||||
)
|
||||
|
||||
// Config holds all configuration values
|
||||
type Config struct {
|
||||
// Exoscale
|
||||
ExoscaleAPIKey string
|
||||
ExoscaleAPISecret string
|
||||
|
||||
// Cloudscale
|
||||
CloudscaleAPIToken string
|
||||
|
||||
// Hetzner Cloud
|
||||
HetznerAPIKey string
|
||||
|
||||
// Hetzner Storage Box (for backups)
|
||||
HetznerStorageBoxUser string
|
||||
HetznerStorageBoxHost string
|
||||
|
||||
// Local backup path
|
||||
LocalBackupPath string
|
||||
}
|
||||
|
||||
var cfg *Config
|
||||
|
||||
// Load reads configuration from .env file
|
||||
func Load(envFile string) error {
|
||||
if envFile != "" {
|
||||
if err := godotenv.Load(envFile); err != nil {
|
||||
// Not fatal - env vars might be set directly
|
||||
fmt.Fprintf(os.Stderr, "Warning: could not load %s: %v\n", envFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
cfg = &Config{
|
||||
ExoscaleAPIKey: os.Getenv("EXOSCALE_API_KEY"),
|
||||
ExoscaleAPISecret: os.Getenv("EXOSCALE_API_SECRET"),
|
||||
CloudscaleAPIToken: os.Getenv("CLOUDSCALE_API_TOKEN"),
|
||||
HetznerAPIKey: os.Getenv("HETZNER_API_KEY"),
|
||||
HetznerStorageBoxUser: getEnvDefault("HETZNER_STORAGEBOX_USER", "u480813"),
|
||||
HetznerStorageBoxHost: getEnvDefault("HETZNER_STORAGEBOX_HOST", "u480813.your-storagebox.de"),
|
||||
LocalBackupPath: getEnvDefault("LOCAL_BACKUP_PATH", "/srv/backups"),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the current configuration
|
||||
func Get() *Config {
|
||||
if cfg == nil {
|
||||
cfg = &Config{}
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
// Validate checks required credentials for a provider
|
||||
func (c *Config) ValidateProvider(provider string) error {
|
||||
switch provider {
|
||||
case "exoscale":
|
||||
if c.ExoscaleAPIKey == "" || c.ExoscaleAPISecret == "" {
|
||||
return fmt.Errorf("EXOSCALE_API_KEY and EXOSCALE_API_SECRET required")
|
||||
}
|
||||
case "cloudscale":
|
||||
if c.CloudscaleAPIToken == "" {
|
||||
return fmt.Errorf("CLOUDSCALE_API_TOKEN required")
|
||||
}
|
||||
case "hetzner":
|
||||
if c.HetznerAPIKey == "" {
|
||||
return fmt.Errorf("HETZNER_API_KEY required")
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown provider: %s", provider)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEnvDefault(key, defaultVal string) string {
|
||||
if val := os.Getenv(key); val != "" {
|
||||
return val
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
60
internal/config/hosts.go
Normal file
60
internal/config/hosts.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package config
|
||||
|
||||
// HostConfig defines a recoverable host
|
||||
type HostConfig struct {
|
||||
Name string // Short name (proton, elektron, etc.)
|
||||
FQDN string // Full domain name
|
||||
DNSZone string // DNS zone for A/AAAA records
|
||||
Services []string // Docker services to restore
|
||||
BackupDir string // Subdirectory in backup source
|
||||
}
|
||||
|
||||
// KnownHosts contains all configured hosts
|
||||
var KnownHosts = map[string]HostConfig{
|
||||
"proton": {
|
||||
Name: "proton",
|
||||
FQDN: "proton.obr.sh",
|
||||
DNSZone: "obr.sh",
|
||||
Services: []string{"gitea", "traefik", "portainer"},
|
||||
BackupDir: "proton",
|
||||
},
|
||||
"photon": {
|
||||
Name: "photon",
|
||||
FQDN: "photon.obnh.io",
|
||||
DNSZone: "obnh.io",
|
||||
Services: []string{"gitea", "nginx"},
|
||||
BackupDir: "photon",
|
||||
},
|
||||
"elektron": {
|
||||
Name: "elektron",
|
||||
FQDN: "elektron.obr.sh",
|
||||
DNSZone: "obr.sh",
|
||||
Services: []string{"gitea", "dns", "monitoring"},
|
||||
BackupDir: "elektron",
|
||||
},
|
||||
"fry": {
|
||||
Name: "fry",
|
||||
FQDN: "fry.obr.sh",
|
||||
DNSZone: "obr.sh",
|
||||
Services: []string{"mastodon", "gitea", "traefik"},
|
||||
BackupDir: "fry",
|
||||
},
|
||||
}
|
||||
|
||||
// GetHost returns host config by name
|
||||
func GetHost(name string) (*HostConfig, bool) {
|
||||
h, ok := KnownHosts[name]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
return &h, true
|
||||
}
|
||||
|
||||
// ListHosts returns all known host names
|
||||
func ListHosts() []string {
|
||||
hosts := make([]string, 0, len(KnownHosts))
|
||||
for name := range KnownHosts {
|
||||
hosts = append(hosts, name)
|
||||
}
|
||||
return hosts
|
||||
}
|
||||
178
internal/dns/exoscale.go
Normal file
178
internal/dns/exoscale.go
Normal file
@@ -0,0 +1,178 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
v3 "github.com/exoscale/egoscale/v3"
|
||||
"github.com/exoscale/egoscale/v3/credentials"
|
||||
)
|
||||
|
||||
// ExoscaleDNS manages DNS records via Exoscale API
|
||||
type ExoscaleDNS struct {
|
||||
client *v3.Client
|
||||
}
|
||||
|
||||
// DNSRecord represents a DNS record
|
||||
type DNSRecord struct {
|
||||
ID string
|
||||
Type string // A, AAAA, CNAME, etc.
|
||||
Name string // subdomain or @ for apex
|
||||
Content string // IP or target
|
||||
TTL int64
|
||||
}
|
||||
|
||||
// NewExoscaleDNS creates a new Exoscale DNS client
|
||||
func NewExoscaleDNS(apiKey, apiSecret string) (*ExoscaleDNS, error) {
|
||||
creds := credentials.NewStaticCredentials(apiKey, apiSecret)
|
||||
client, err := v3.NewClient(creds)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Exoscale client: %w", err)
|
||||
}
|
||||
|
||||
return &ExoscaleDNS{client: client}, nil
|
||||
}
|
||||
|
||||
// GetRecord gets a specific DNS record
|
||||
func (d *ExoscaleDNS) GetRecord(ctx context.Context, zone, recordType, name string) (*DNSRecord, error) {
|
||||
domains, err := d.client.ListDNSDomains(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list domains: %w", err)
|
||||
}
|
||||
|
||||
var domainID v3.UUID
|
||||
for _, domain := range domains.DNSDomains {
|
||||
if domain.UnicodeName == zone {
|
||||
domainID = domain.ID
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if domainID == "" {
|
||||
return nil, fmt.Errorf("zone %s not found", zone)
|
||||
}
|
||||
|
||||
records, err := d.client.ListDNSDomainRecords(ctx, domainID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list records: %w", err)
|
||||
}
|
||||
|
||||
for _, rec := range records.DNSDomainRecords {
|
||||
if rec.Type == v3.DNSDomainRecordType(recordType) && rec.Name == name {
|
||||
return &DNSRecord{
|
||||
ID: string(rec.ID),
|
||||
Type: string(rec.Type),
|
||||
Name: rec.Name,
|
||||
Content: rec.Content,
|
||||
TTL: rec.Ttl,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("record %s.%s not found", name, zone)
|
||||
}
|
||||
|
||||
// UpdateRecord updates a DNS record
|
||||
func (d *ExoscaleDNS) UpdateRecord(ctx context.Context, zone string, record *DNSRecord) error {
|
||||
domains, err := d.client.ListDNSDomains(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list domains: %w", err)
|
||||
}
|
||||
|
||||
var domainID v3.UUID
|
||||
for _, domain := range domains.DNSDomains {
|
||||
if domain.UnicodeName == zone {
|
||||
domainID = domain.ID
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if domainID == "" {
|
||||
return fmt.Errorf("zone %s not found", zone)
|
||||
}
|
||||
|
||||
op, err := d.client.UpdateDNSDomainRecord(ctx, domainID, v3.UUID(record.ID), v3.UpdateDNSDomainRecordRequest{
|
||||
Content: record.Content,
|
||||
Ttl: record.TTL,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update record: %w", err)
|
||||
}
|
||||
|
||||
_, err = d.client.Wait(ctx, op, v3.OperationStateSuccess)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update operation failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListRecords lists all records in a zone
|
||||
func (d *ExoscaleDNS) ListRecords(ctx context.Context, zone string) ([]DNSRecord, error) {
|
||||
domains, err := d.client.ListDNSDomains(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list domains: %w", err)
|
||||
}
|
||||
|
||||
var domainID v3.UUID
|
||||
for _, domain := range domains.DNSDomains {
|
||||
if domain.UnicodeName == zone {
|
||||
domainID = domain.ID
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if domainID == "" {
|
||||
return nil, fmt.Errorf("zone %s not found", zone)
|
||||
}
|
||||
|
||||
records, err := d.client.ListDNSDomainRecords(ctx, domainID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list records: %w", err)
|
||||
}
|
||||
|
||||
var result []DNSRecord
|
||||
for _, rec := range records.DNSDomainRecords {
|
||||
result = append(result, DNSRecord{
|
||||
ID: string(rec.ID),
|
||||
Type: string(rec.Type),
|
||||
Name: rec.Name,
|
||||
Content: rec.Content,
|
||||
TTL: rec.Ttl,
|
||||
})
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ListZones lists all DNS zones managed in Exoscale
|
||||
func (d *ExoscaleDNS) ListZones(ctx context.Context) ([]string, error) {
|
||||
domains, err := d.client.ListDNSDomains(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list domains: %w", err)
|
||||
}
|
||||
|
||||
var zones []string
|
||||
for _, domain := range domains.DNSDomains {
|
||||
zones = append(zones, domain.UnicodeName)
|
||||
}
|
||||
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
// ResolveCurrentIP resolves the current IP for a hostname
|
||||
func ResolveCurrentIP(hostname string) (string, error) {
|
||||
ips, err := net.LookupIP(hostname)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, ip := range ips {
|
||||
if ipv4 := ip.To4(); ipv4 != nil {
|
||||
return ipv4.String(), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no IPv4 address found for %s", hostname)
|
||||
}
|
||||
135
internal/dns/health.go
Normal file
135
internal/dns/health.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HealthResult contains the result of health checks
|
||||
type HealthResult struct {
|
||||
SSHReady bool
|
||||
HTTPSReady bool
|
||||
SSHError error
|
||||
HTTPSError error
|
||||
}
|
||||
|
||||
// HealthChecker performs health checks on VMs
|
||||
type HealthChecker struct {
|
||||
SSHTimeout time.Duration
|
||||
HTTPSTimeout time.Duration
|
||||
}
|
||||
|
||||
// NewHealthChecker creates a new health checker
|
||||
func NewHealthChecker() *HealthChecker {
|
||||
return &HealthChecker{
|
||||
SSHTimeout: 10 * time.Second,
|
||||
HTTPSTimeout: 10 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// CheckSSH checks if SSH is accessible
|
||||
func (h *HealthChecker) CheckSSH(ctx context.Context, ip string, port int) error {
|
||||
if port == 0 {
|
||||
port = 22
|
||||
}
|
||||
|
||||
address := fmt.Sprintf("%s:%d", ip, port)
|
||||
|
||||
dialer := &net.Dialer{
|
||||
Timeout: h.SSHTimeout,
|
||||
}
|
||||
|
||||
conn, err := dialer.DialContext(ctx, "tcp", address)
|
||||
if err != nil {
|
||||
return fmt.Errorf("SSH not accessible: %w", err)
|
||||
}
|
||||
conn.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckHTTPS checks if HTTPS is accessible
|
||||
func (h *HealthChecker) CheckHTTPS(ctx context.Context, ip string, port int) error {
|
||||
if port == 0 {
|
||||
port = 443
|
||||
}
|
||||
|
||||
// Use IP directly with insecure TLS (we just want to verify the port is open)
|
||||
client := &http.Client{
|
||||
Timeout: h.HTTPSTimeout,
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("https://%s:%d/", ip, port)
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
// Connection refused or timeout is a failure
|
||||
// But TLS errors mean the port is open (which is what we want)
|
||||
if _, ok := err.(*tls.CertificateVerificationError); ok {
|
||||
return nil // Port is open, TLS is working
|
||||
}
|
||||
|
||||
// For other TLS errors, the port is still open
|
||||
if netErr, ok := err.(net.Error); ok && !netErr.Timeout() {
|
||||
// Non-timeout network error might still mean port is open
|
||||
// Check if we can at least connect
|
||||
conn, connErr := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", ip, port), h.HTTPSTimeout)
|
||||
if connErr == nil {
|
||||
conn.Close()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("HTTPS not accessible: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckAll performs all health checks
|
||||
func (h *HealthChecker) CheckAll(ctx context.Context, ip string) *HealthResult {
|
||||
result := &HealthResult{}
|
||||
|
||||
result.SSHError = h.CheckSSH(ctx, ip, 22)
|
||||
result.SSHReady = result.SSHError == nil
|
||||
|
||||
result.HTTPSError = h.CheckHTTPS(ctx, ip, 443)
|
||||
result.HTTPSReady = result.HTTPSError == nil
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// WaitForReady waits for all health checks to pass
|
||||
func (h *HealthChecker) WaitForReady(ctx context.Context, ip string, timeout time.Duration) error {
|
||||
deadline := time.Now().Add(timeout)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
result := h.CheckAll(ctx, ip)
|
||||
if result.SSHReady {
|
||||
return nil // SSH is the minimum requirement
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(5 * time.Second):
|
||||
// Continue checking
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("timeout waiting for VM to be ready")
|
||||
}
|
||||
189
internal/dns/migration.go
Normal file
189
internal/dns/migration.go
Normal file
@@ -0,0 +1,189 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MigrationRequest contains DNS migration parameters
|
||||
type MigrationRequest struct {
|
||||
Hostname string // Full hostname (e.g., proton.obr.sh)
|
||||
OldIP string // Expected current IP
|
||||
NewIP string // New IP to point to
|
||||
Zone string // DNS zone (e.g., obr.sh)
|
||||
DryRun bool
|
||||
}
|
||||
|
||||
// MigrationResult contains the result of a DNS migration
|
||||
type MigrationResult struct {
|
||||
Success bool
|
||||
Message string
|
||||
OldRecord *DNSRecord
|
||||
NewRecord *DNSRecord
|
||||
RolledBack bool
|
||||
}
|
||||
|
||||
// Migrator handles DNS migration with safety checks
|
||||
type Migrator struct {
|
||||
dns *ExoscaleDNS
|
||||
health *HealthChecker
|
||||
verbose bool
|
||||
}
|
||||
|
||||
// NewMigrator creates a new DNS migrator
|
||||
func NewMigrator(dns *ExoscaleDNS, verbose bool) *Migrator {
|
||||
return &Migrator{
|
||||
dns: dns,
|
||||
health: NewHealthChecker(),
|
||||
verbose: verbose,
|
||||
}
|
||||
}
|
||||
|
||||
// Migrate performs DNS migration with safety checks
|
||||
func (m *Migrator) Migrate(ctx context.Context, req MigrationRequest) (*MigrationResult, error) {
|
||||
result := &MigrationResult{}
|
||||
|
||||
// Parse hostname to get subdomain and zone
|
||||
subdomain, zone := parseHostname(req.Hostname, req.Zone)
|
||||
|
||||
if m.verbose {
|
||||
fmt.Printf("DNS Migration: %s -> %s\n", req.OldIP, req.NewIP)
|
||||
fmt.Printf(" Zone: %s, Subdomain: %s\n", zone, subdomain)
|
||||
}
|
||||
|
||||
// Step 1: Verify new VM is accessible
|
||||
if m.verbose {
|
||||
fmt.Println("\n=== Pre-flight checks ===")
|
||||
}
|
||||
|
||||
healthResult := m.health.CheckAll(ctx, req.NewIP)
|
||||
if !healthResult.SSHReady {
|
||||
return nil, fmt.Errorf("new VM not accessible on SSH (port 22): %s", req.NewIP)
|
||||
}
|
||||
if m.verbose {
|
||||
fmt.Printf(" ✓ SSH accessible on %s\n", req.NewIP)
|
||||
}
|
||||
|
||||
// Step 2: Get current DNS record
|
||||
record, err := m.dns.GetRecord(ctx, zone, "A", subdomain)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get current DNS record: %w", err)
|
||||
}
|
||||
result.OldRecord = record
|
||||
|
||||
// Step 3: Verify current DNS matches expected old IP
|
||||
if record.Content != req.OldIP {
|
||||
return nil, fmt.Errorf("DNS mismatch: expected %s, found %s", req.OldIP, record.Content)
|
||||
}
|
||||
if m.verbose {
|
||||
fmt.Printf(" ✓ Current DNS points to expected IP: %s\n", req.OldIP)
|
||||
}
|
||||
|
||||
// Step 4: Verify via live DNS resolution
|
||||
resolvedIP, err := ResolveCurrentIP(req.Hostname)
|
||||
if err != nil {
|
||||
if m.verbose {
|
||||
fmt.Printf(" ⚠ Could not verify live DNS: %v\n", err)
|
||||
}
|
||||
} else if resolvedIP != req.OldIP {
|
||||
return nil, fmt.Errorf("live DNS mismatch: expected %s, resolved %s", req.OldIP, resolvedIP)
|
||||
} else if m.verbose {
|
||||
fmt.Printf(" ✓ Live DNS resolution verified: %s\n", resolvedIP)
|
||||
}
|
||||
|
||||
// Step 5: Perform migration (or dry-run)
|
||||
if req.DryRun {
|
||||
result.Success = true
|
||||
result.Message = fmt.Sprintf("[DRY RUN] Would update %s.%s: %s -> %s",
|
||||
subdomain, zone, req.OldIP, req.NewIP)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
if m.verbose {
|
||||
fmt.Println("\n=== Updating DNS ===")
|
||||
}
|
||||
|
||||
// Update the record
|
||||
newRecord := &DNSRecord{
|
||||
ID: record.ID,
|
||||
Type: record.Type,
|
||||
Name: record.Name,
|
||||
Content: req.NewIP,
|
||||
TTL: record.TTL,
|
||||
}
|
||||
|
||||
if err := m.dns.UpdateRecord(ctx, zone, newRecord); err != nil {
|
||||
return nil, fmt.Errorf("failed to update DNS: %w", err)
|
||||
}
|
||||
|
||||
result.NewRecord = newRecord
|
||||
|
||||
if m.verbose {
|
||||
fmt.Printf(" ✓ DNS updated: %s -> %s\n", req.OldIP, req.NewIP)
|
||||
}
|
||||
|
||||
// Step 6: Verify the update
|
||||
verifyRecord, err := m.dns.GetRecord(ctx, zone, "A", subdomain)
|
||||
if err != nil || verifyRecord.Content != req.NewIP {
|
||||
// Rollback
|
||||
if m.verbose {
|
||||
fmt.Println(" ✗ Verification failed, rolling back...")
|
||||
}
|
||||
|
||||
rollbackRecord := &DNSRecord{
|
||||
ID: record.ID,
|
||||
Type: record.Type,
|
||||
Name: record.Name,
|
||||
Content: req.OldIP,
|
||||
TTL: record.TTL,
|
||||
}
|
||||
|
||||
if rollbackErr := m.dns.UpdateRecord(ctx, zone, rollbackRecord); rollbackErr != nil {
|
||||
return nil, fmt.Errorf("CRITICAL: rollback failed: %w (original error: %v)", rollbackErr, err)
|
||||
}
|
||||
|
||||
result.RolledBack = true
|
||||
return nil, fmt.Errorf("DNS update verification failed, rolled back: %w", err)
|
||||
}
|
||||
|
||||
result.Success = true
|
||||
result.Message = fmt.Sprintf("DNS migration complete: %s now points to %s", req.Hostname, req.NewIP)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// parseHostname extracts subdomain and zone from hostname
|
||||
func parseHostname(hostname, defaultZone string) (subdomain, zone string) {
|
||||
if defaultZone != "" {
|
||||
if strings.HasSuffix(hostname, "."+defaultZone) {
|
||||
subdomain = strings.TrimSuffix(hostname, "."+defaultZone)
|
||||
return subdomain, defaultZone
|
||||
}
|
||||
}
|
||||
|
||||
// Known zones
|
||||
knownZones := []string{
|
||||
"obr.sh", "obnh.io", "obnh.network", "obnh.org",
|
||||
"obr.digital", "obr.im", "s-n-r.net", "as60284.net", "baumert.cc",
|
||||
}
|
||||
|
||||
for _, z := range knownZones {
|
||||
if strings.HasSuffix(hostname, "."+z) {
|
||||
subdomain = strings.TrimSuffix(hostname, "."+z)
|
||||
return subdomain, z
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: assume last two parts are zone
|
||||
parts := strings.Split(hostname, ".")
|
||||
if len(parts) >= 2 {
|
||||
zone = strings.Join(parts[len(parts)-2:], ".")
|
||||
subdomain = strings.Join(parts[:len(parts)-2], ".")
|
||||
if subdomain == "" {
|
||||
subdomain = "@"
|
||||
}
|
||||
}
|
||||
|
||||
return subdomain, zone
|
||||
}
|
||||
226
internal/providers/cloudscale.go
Normal file
226
internal/providers/cloudscale.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/cloudscale-ch/cloudscale-go-sdk/v6"
|
||||
)
|
||||
|
||||
// CloudscaleProvider implements CloudProvider for Cloudscale.ch
|
||||
type CloudscaleProvider struct {
|
||||
client *cloudscale.Client
|
||||
}
|
||||
|
||||
// NewCloudscaleProvider creates a new Cloudscale provider
|
||||
func NewCloudscaleProvider(token string) (*CloudscaleProvider, error) {
|
||||
if token == "" {
|
||||
return nil, fmt.Errorf("cloudscale API token required")
|
||||
}
|
||||
|
||||
client := cloudscale.NewClient(http.DefaultClient)
|
||||
client.AuthToken = token
|
||||
|
||||
return &CloudscaleProvider{client: client}, nil
|
||||
}
|
||||
|
||||
func (p *CloudscaleProvider) Name() string {
|
||||
return "cloudscale"
|
||||
}
|
||||
|
||||
func (p *CloudscaleProvider) CreateVM(ctx context.Context, opts VMOptions) (*VM, error) {
|
||||
zone := opts.Zone
|
||||
if zone == "" {
|
||||
zone = "lpg1" // Default: Lupfig
|
||||
}
|
||||
|
||||
image := opts.Image
|
||||
if image == "" {
|
||||
image = "ubuntu-24.04"
|
||||
}
|
||||
|
||||
flavor := opts.Flavor
|
||||
if flavor == "" {
|
||||
flavor = "flex-4-2" // 4 vCPU, 2GB RAM
|
||||
}
|
||||
|
||||
req := &cloudscale.ServerRequest{
|
||||
Name: opts.Name,
|
||||
Flavor: flavor,
|
||||
Image: image,
|
||||
Zone: zone,
|
||||
SSHKeys: []string{opts.SSHPublicKey},
|
||||
VolumeSizeGB: int(opts.DiskSizeGB),
|
||||
}
|
||||
|
||||
if opts.UserData != "" {
|
||||
req.UserData = opts.UserData
|
||||
}
|
||||
|
||||
server, err := p.client.Servers.Create(ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create server: %w", err)
|
||||
}
|
||||
|
||||
// Wait for server to be running
|
||||
for i := 0; i < 60; i++ {
|
||||
server, err = p.client.Servers.Get(ctx, server.UUID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get server status: %w", err)
|
||||
}
|
||||
if server.Status == "running" {
|
||||
break
|
||||
}
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
|
||||
// Get public IP
|
||||
var publicIP string
|
||||
for _, iface := range server.Interfaces {
|
||||
if iface.Type == "public" {
|
||||
for _, addr := range iface.Addresses {
|
||||
if addr.Version == 4 {
|
||||
publicIP = addr.Address
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &VM{
|
||||
ID: server.UUID,
|
||||
Name: server.Name,
|
||||
PublicIP: publicIP,
|
||||
Status: server.Status,
|
||||
Provider: "cloudscale",
|
||||
Zone: server.Zone.Slug,
|
||||
CreatedAt: server.CreatedAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *CloudscaleProvider) DeleteVM(ctx context.Context, vmID string) error {
|
||||
return p.client.Servers.Delete(ctx, vmID)
|
||||
}
|
||||
|
||||
func (p *CloudscaleProvider) GetVM(ctx context.Context, vmID string) (*VM, error) {
|
||||
server, err := p.client.Servers.Get(ctx, vmID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var publicIP string
|
||||
for _, iface := range server.Interfaces {
|
||||
if iface.Type == "public" {
|
||||
for _, addr := range iface.Addresses {
|
||||
if addr.Version == 4 {
|
||||
publicIP = addr.Address
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &VM{
|
||||
ID: server.UUID,
|
||||
Name: server.Name,
|
||||
PublicIP: publicIP,
|
||||
Status: server.Status,
|
||||
Provider: "cloudscale",
|
||||
Zone: server.Zone.Slug,
|
||||
CreatedAt: server.CreatedAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *CloudscaleProvider) WaitForSSH(ctx context.Context, ip string, port int, timeout time.Duration) error {
|
||||
if port == 0 {
|
||||
port = 22
|
||||
}
|
||||
|
||||
deadline := time.Now().Add(timeout)
|
||||
address := fmt.Sprintf("%s:%d", ip, port)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
conn, err := net.DialTimeout("tcp", address, 5*time.Second)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(5 * time.Second):
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("SSH not available at %s after %v", address, timeout)
|
||||
}
|
||||
|
||||
func (p *CloudscaleProvider) ListFlavors(ctx context.Context) ([]Flavor, error) {
|
||||
// Make raw API request to /v1/flavors
|
||||
req, err := p.client.NewRequest(ctx, "GET", "v1/flavors", nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
var flavors []cloudscale.Flavor
|
||||
if err := p.client.Do(ctx, req, &flavors); err != nil {
|
||||
return nil, fmt.Errorf("failed to list flavors: %w", err)
|
||||
}
|
||||
|
||||
var result []Flavor
|
||||
for _, f := range flavors {
|
||||
result = append(result, Flavor{
|
||||
ID: f.Slug,
|
||||
Name: f.Name,
|
||||
CPUs: f.VCPUCount,
|
||||
Memory: f.MemoryGB * 1024, // Convert to MB
|
||||
})
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *CloudscaleProvider) ListImages(ctx context.Context, filter string) ([]Image, error) {
|
||||
// Make raw API request to /v1/images
|
||||
req, err := p.client.NewRequest(ctx, "GET", "v1/images", nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
var images []cloudscale.Image
|
||||
if err := p.client.Do(ctx, req, &images); err != nil {
|
||||
return nil, fmt.Errorf("failed to list images: %w", err)
|
||||
}
|
||||
|
||||
var result []Image
|
||||
for _, img := range images {
|
||||
if filter == "" || contains(img.Slug, filter) || contains(img.Name, filter) {
|
||||
result = append(result, Image{
|
||||
ID: img.Slug,
|
||||
Name: img.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *CloudscaleProvider) ListZones(ctx context.Context) ([]string, error) {
|
||||
regions, err := p.client.Regions.List(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var zones []string
|
||||
for _, r := range regions {
|
||||
for _, z := range r.Zones {
|
||||
zones = append(zones, z.Slug)
|
||||
}
|
||||
}
|
||||
|
||||
return zones, nil
|
||||
}
|
||||
319
internal/providers/exoscale.go
Normal file
319
internal/providers/exoscale.go
Normal file
@@ -0,0 +1,319 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
v3 "github.com/exoscale/egoscale/v3"
|
||||
"github.com/exoscale/egoscale/v3/credentials"
|
||||
)
|
||||
|
||||
// ExoscaleProvider implements CloudProvider for Exoscale
|
||||
type ExoscaleProvider struct {
|
||||
creds *credentials.Credentials
|
||||
}
|
||||
|
||||
// NewExoscaleProvider creates a new Exoscale provider
|
||||
func NewExoscaleProvider(apiKey, apiSecret string) (*ExoscaleProvider, error) {
|
||||
creds := credentials.NewStaticCredentials(apiKey, apiSecret)
|
||||
return &ExoscaleProvider{creds: creds}, nil
|
||||
}
|
||||
|
||||
func (p *ExoscaleProvider) Name() string {
|
||||
return "exoscale"
|
||||
}
|
||||
|
||||
// getClientForZone creates a zone-specific client
|
||||
func (p *ExoscaleProvider) getClientForZone(zone string) (*v3.Client, error) {
|
||||
endpoint := p.getEndpointForZone(zone)
|
||||
return v3.NewClient(p.creds, v3.ClientOptWithEndpoint(endpoint))
|
||||
}
|
||||
|
||||
// getEndpointForZone maps zone names to API endpoints
|
||||
func (p *ExoscaleProvider) getEndpointForZone(zone string) v3.Endpoint {
|
||||
endpoints := map[string]v3.Endpoint{
|
||||
"ch-gva-2": v3.CHGva2,
|
||||
"ch-dk-2": v3.CHDk2,
|
||||
"de-fra-1": v3.DEFra1,
|
||||
"de-muc-1": v3.DEMuc1,
|
||||
"at-vie-1": v3.ATVie1,
|
||||
"at-vie-2": v3.ATVie2,
|
||||
"bg-sof-1": v3.BGSof1,
|
||||
}
|
||||
|
||||
if endpoint, ok := endpoints[zone]; ok {
|
||||
return endpoint
|
||||
}
|
||||
return v3.CHGva2 // Default
|
||||
}
|
||||
|
||||
func (p *ExoscaleProvider) CreateVM(ctx context.Context, opts VMOptions) (*VM, error) {
|
||||
// Get zone endpoint
|
||||
zone := opts.Zone
|
||||
if zone == "" {
|
||||
zone = "ch-gva-2" // Default zone
|
||||
}
|
||||
|
||||
// Create client for specific zone
|
||||
client, err := p.getClientForZone(zone)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create zone client: %w", err)
|
||||
}
|
||||
|
||||
// Find template (image)
|
||||
templates, err := client.ListTemplates(ctx, v3.ListTemplatesWithVisibility("public"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list templates: %w", err)
|
||||
}
|
||||
|
||||
selectedTemplate, err := templates.FindTemplate(opts.Image)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("template not found: %s (%w)", opts.Image, err)
|
||||
}
|
||||
|
||||
// Find instance type
|
||||
instanceTypes, err := client.ListInstanceTypes(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list instance types: %w", err)
|
||||
}
|
||||
|
||||
selectedType, err := instanceTypes.FindInstanceTypeByIdOrFamilyAndSize(opts.Flavor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("instance type not found: %s (%w)", opts.Flavor, err)
|
||||
}
|
||||
|
||||
// Determine disk size
|
||||
diskSize := opts.DiskSizeGB
|
||||
if diskSize == 0 {
|
||||
diskSize = 50 // Default 50GB
|
||||
}
|
||||
|
||||
// Register the SSH key temporarily
|
||||
sshKeyName := fmt.Sprintf("recovery-%s-%d", opts.Name, time.Now().Unix())
|
||||
sshKeyOp, err := client.RegisterSSHKey(ctx, v3.RegisterSSHKeyRequest{
|
||||
Name: sshKeyName,
|
||||
PublicKey: opts.SSHPublicKey,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to register SSH key: %w", err)
|
||||
}
|
||||
|
||||
// Wait for SSH key registration
|
||||
sshKeyOp, err = client.Wait(ctx, sshKeyOp, v3.OperationStateSuccess)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("SSH key registration failed: %w", err)
|
||||
}
|
||||
|
||||
// Create the instance
|
||||
createReq := v3.CreateInstanceRequest{
|
||||
Name: opts.Name,
|
||||
InstanceType: &selectedType,
|
||||
Template: &selectedTemplate,
|
||||
DiskSize: diskSize,
|
||||
SSHKey: &v3.SSHKey{Name: sshKeyName},
|
||||
}
|
||||
|
||||
// Add user data if provided
|
||||
if opts.UserData != "" {
|
||||
createReq.UserData = opts.UserData
|
||||
}
|
||||
|
||||
op, err := client.CreateInstance(ctx, createReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create instance: %w", err)
|
||||
}
|
||||
|
||||
// Wait for operation to complete
|
||||
op, err = client.Wait(ctx, op, v3.OperationStateSuccess)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("instance creation failed: %w", err)
|
||||
}
|
||||
|
||||
// Get the created instance
|
||||
if op.Reference == nil {
|
||||
return nil, fmt.Errorf("operation completed but no reference returned")
|
||||
}
|
||||
|
||||
instance, err := client.GetInstance(ctx, op.Reference.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get created instance: %w", err)
|
||||
}
|
||||
|
||||
// Extract public IP
|
||||
var publicIP string
|
||||
if instance.PublicIP != nil {
|
||||
publicIP = instance.PublicIP.String()
|
||||
}
|
||||
|
||||
return &VM{
|
||||
ID: string(instance.ID),
|
||||
Name: instance.Name,
|
||||
PublicIP: publicIP,
|
||||
Status: string(instance.State),
|
||||
Provider: "exoscale",
|
||||
Zone: zone,
|
||||
CreatedAt: instance.CreatedAT,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *ExoscaleProvider) DeleteVM(ctx context.Context, vmID string) error {
|
||||
// We need to find which zone the VM is in
|
||||
zones := []string{"ch-gva-2", "ch-dk-2", "de-fra-1", "de-muc-1", "at-vie-1", "at-vie-2", "bg-sof-1"}
|
||||
|
||||
for _, zone := range zones {
|
||||
client, err := p.getClientForZone(zone)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
op, err := client.DeleteInstance(ctx, v3.UUID(vmID))
|
||||
if err != nil {
|
||||
continue // Try next zone
|
||||
}
|
||||
|
||||
_, err = client.Wait(ctx, op, v3.OperationStateSuccess)
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("VM %s not found in any zone", vmID)
|
||||
}
|
||||
|
||||
func (p *ExoscaleProvider) GetVM(ctx context.Context, vmID string) (*VM, error) {
|
||||
zones := []string{"ch-gva-2", "ch-dk-2", "de-fra-1", "de-muc-1", "at-vie-1", "at-vie-2", "bg-sof-1"}
|
||||
|
||||
for _, zone := range zones {
|
||||
client, err := p.getClientForZone(zone)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
instance, err := client.GetInstance(ctx, v3.UUID(vmID))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var publicIP string
|
||||
if instance.PublicIP != nil {
|
||||
publicIP = instance.PublicIP.String()
|
||||
}
|
||||
|
||||
return &VM{
|
||||
ID: string(instance.ID),
|
||||
Name: instance.Name,
|
||||
PublicIP: publicIP,
|
||||
Status: string(instance.State),
|
||||
Provider: "exoscale",
|
||||
Zone: zone,
|
||||
CreatedAt: instance.CreatedAT,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("VM %s not found", vmID)
|
||||
}
|
||||
|
||||
func (p *ExoscaleProvider) WaitForSSH(ctx context.Context, ip string, port int, timeout time.Duration) error {
|
||||
if port == 0 {
|
||||
port = 22
|
||||
}
|
||||
|
||||
deadline := time.Now().Add(timeout)
|
||||
address := fmt.Sprintf("%s:%d", ip, port)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
conn, err := net.DialTimeout("tcp", address, 5*time.Second)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(5 * time.Second):
|
||||
// Continue trying
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("SSH not available at %s after %v", address, timeout)
|
||||
}
|
||||
|
||||
func (p *ExoscaleProvider) ListFlavors(ctx context.Context) ([]Flavor, error) {
|
||||
// Use default zone for listing
|
||||
client, err := p.getClientForZone("ch-gva-2")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
types, err := client.ListInstanceTypes(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list instance types: %w", err)
|
||||
}
|
||||
|
||||
var flavors []Flavor
|
||||
for _, it := range types.InstanceTypes {
|
||||
flavors = append(flavors, Flavor{
|
||||
ID: string(it.ID),
|
||||
Name: string(it.Size),
|
||||
CPUs: int(it.Cpus),
|
||||
Memory: int(it.Memory),
|
||||
})
|
||||
}
|
||||
|
||||
return flavors, nil
|
||||
}
|
||||
|
||||
func (p *ExoscaleProvider) ListImages(ctx context.Context, filter string) ([]Image, error) {
|
||||
client, err := p.getClientForZone("ch-gva-2")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
templates, err := client.ListTemplates(ctx, v3.ListTemplatesWithVisibility("public"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list templates: %w", err)
|
||||
}
|
||||
|
||||
var images []Image
|
||||
for _, tmpl := range templates.Templates {
|
||||
if filter == "" || contains(tmpl.Name, filter) {
|
||||
images = append(images, Image{
|
||||
ID: string(tmpl.ID),
|
||||
Name: tmpl.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return images, nil
|
||||
}
|
||||
|
||||
func (p *ExoscaleProvider) ListZones(ctx context.Context) ([]string, error) {
|
||||
return []string{
|
||||
"ch-gva-2", // Geneva
|
||||
"ch-dk-2", // Zurich
|
||||
"de-fra-1", // Frankfurt
|
||||
"de-muc-1", // Munich
|
||||
"at-vie-1", // Vienna 1
|
||||
"at-vie-2", // Vienna 2
|
||||
"bg-sof-1", // Sofia
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Helper function
|
||||
func contains(s, substr string) bool {
|
||||
return len(s) >= len(substr) && (s == substr || len(substr) == 0 ||
|
||||
(len(s) > 0 && len(substr) > 0 && findSubstring(s, substr)))
|
||||
}
|
||||
|
||||
func findSubstring(s, substr string) bool {
|
||||
for i := 0; i <= len(s)-len(substr); i++ {
|
||||
if s[i:i+len(substr)] == substr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
40
internal/providers/factory.go
Normal file
40
internal/providers/factory.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// NewProvider creates a cloud provider by name
|
||||
func NewProvider(name string, config map[string]string) (CloudProvider, error) {
|
||||
switch name {
|
||||
case "exoscale":
|
||||
apiKey := config["api_key"]
|
||||
apiSecret := config["api_secret"]
|
||||
if apiKey == "" || apiSecret == "" {
|
||||
return nil, fmt.Errorf("exoscale requires api_key and api_secret")
|
||||
}
|
||||
return NewExoscaleProvider(apiKey, apiSecret)
|
||||
|
||||
case "cloudscale":
|
||||
token := config["token"]
|
||||
if token == "" {
|
||||
return nil, fmt.Errorf("cloudscale requires token")
|
||||
}
|
||||
return NewCloudscaleProvider(token)
|
||||
|
||||
case "hetzner":
|
||||
token := config["token"]
|
||||
if token == "" {
|
||||
return nil, fmt.Errorf("hetzner requires token")
|
||||
}
|
||||
return NewHetznerProvider(token)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown provider: %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
// SupportedProviders returns list of supported cloud providers
|
||||
func SupportedProviders() []string {
|
||||
return []string{"exoscale", "cloudscale", "hetzner"}
|
||||
}
|
||||
233
internal/providers/hetzner.go
Normal file
233
internal/providers/hetzner.go
Normal file
@@ -0,0 +1,233 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/hetznercloud/hcloud-go/v2/hcloud"
|
||||
)
|
||||
|
||||
// HetznerProvider implements CloudProvider for Hetzner Cloud
|
||||
type HetznerProvider struct {
|
||||
client *hcloud.Client
|
||||
}
|
||||
|
||||
// NewHetznerProvider creates a new Hetzner provider
|
||||
func NewHetznerProvider(token string) (*HetznerProvider, error) {
|
||||
if token == "" {
|
||||
return nil, fmt.Errorf("hetzner API token required")
|
||||
}
|
||||
|
||||
client := hcloud.NewClient(hcloud.WithToken(token))
|
||||
|
||||
return &HetznerProvider{client: client}, nil
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) Name() string {
|
||||
return "hetzner"
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) CreateVM(ctx context.Context, opts VMOptions) (*VM, error) {
|
||||
// Parse zone/location
|
||||
location := opts.Zone
|
||||
if location == "" {
|
||||
location = "fsn1" // Default: Falkenstein
|
||||
}
|
||||
|
||||
image := opts.Image
|
||||
if image == "" {
|
||||
image = "ubuntu-24.04"
|
||||
}
|
||||
|
||||
serverType := opts.Flavor
|
||||
if serverType == "" {
|
||||
serverType = "cx22" // 2 vCPU, 4GB RAM
|
||||
}
|
||||
|
||||
// Register SSH key if provided
|
||||
var sshKeys []*hcloud.SSHKey
|
||||
if opts.SSHPublicKey != "" {
|
||||
keyName := fmt.Sprintf("recovery-%s-%d", opts.Name, time.Now().Unix())
|
||||
sshKey, _, err := p.client.SSHKey.Create(ctx, hcloud.SSHKeyCreateOpts{
|
||||
Name: keyName,
|
||||
PublicKey: opts.SSHPublicKey,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to register SSH key: %w", err)
|
||||
}
|
||||
sshKeys = append(sshKeys, sshKey)
|
||||
}
|
||||
|
||||
// Create server
|
||||
createOpts := hcloud.ServerCreateOpts{
|
||||
Name: opts.Name,
|
||||
ServerType: &hcloud.ServerType{Name: serverType},
|
||||
Image: &hcloud.Image{Name: image},
|
||||
Location: &hcloud.Location{Name: location},
|
||||
SSHKeys: sshKeys,
|
||||
}
|
||||
|
||||
if opts.UserData != "" {
|
||||
createOpts.UserData = opts.UserData
|
||||
}
|
||||
|
||||
result, _, err := p.client.Server.Create(ctx, createOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create server: %w", err)
|
||||
}
|
||||
|
||||
// Wait for server to be running
|
||||
server := result.Server
|
||||
for i := 0; i < 60; i++ {
|
||||
server, _, err = p.client.Server.GetByID(ctx, server.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get server status: %w", err)
|
||||
}
|
||||
if server.Status == hcloud.ServerStatusRunning {
|
||||
break
|
||||
}
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
|
||||
var publicIP string
|
||||
if server.PublicNet.IPv4.IP != nil {
|
||||
publicIP = server.PublicNet.IPv4.IP.String()
|
||||
}
|
||||
|
||||
return &VM{
|
||||
ID: fmt.Sprintf("%d", server.ID),
|
||||
Name: server.Name,
|
||||
PublicIP: publicIP,
|
||||
Status: string(server.Status),
|
||||
Provider: "hetzner",
|
||||
Zone: server.Datacenter.Location.Name,
|
||||
CreatedAt: server.Created,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) DeleteVM(ctx context.Context, vmID string) error {
|
||||
var id int64
|
||||
fmt.Sscanf(vmID, "%d", &id)
|
||||
|
||||
server, _, err := p.client.Server.GetByID(ctx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if server == nil {
|
||||
return fmt.Errorf("server not found: %s", vmID)
|
||||
}
|
||||
|
||||
_, _, err = p.client.Server.DeleteWithResult(ctx, server)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) GetVM(ctx context.Context, vmID string) (*VM, error) {
|
||||
var id int64
|
||||
fmt.Sscanf(vmID, "%d", &id)
|
||||
|
||||
server, _, err := p.client.Server.GetByID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if server == nil {
|
||||
return nil, fmt.Errorf("server not found: %s", vmID)
|
||||
}
|
||||
|
||||
var publicIP string
|
||||
if server.PublicNet.IPv4.IP != nil {
|
||||
publicIP = server.PublicNet.IPv4.IP.String()
|
||||
}
|
||||
|
||||
return &VM{
|
||||
ID: fmt.Sprintf("%d", server.ID),
|
||||
Name: server.Name,
|
||||
PublicIP: publicIP,
|
||||
Status: string(server.Status),
|
||||
Provider: "hetzner",
|
||||
Zone: server.Datacenter.Location.Name,
|
||||
CreatedAt: server.Created,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) WaitForSSH(ctx context.Context, ip string, port int, timeout time.Duration) error {
|
||||
if port == 0 {
|
||||
port = 22
|
||||
}
|
||||
|
||||
deadline := time.Now().Add(timeout)
|
||||
address := fmt.Sprintf("%s:%d", ip, port)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
conn, err := net.DialTimeout("tcp", address, 5*time.Second)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(5 * time.Second):
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("SSH not available at %s after %v", address, timeout)
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) ListFlavors(ctx context.Context) ([]Flavor, error) {
|
||||
types, err := p.client.ServerType.All(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result []Flavor
|
||||
for _, t := range types {
|
||||
result = append(result, Flavor{
|
||||
ID: t.Name,
|
||||
Name: t.Description,
|
||||
CPUs: t.Cores,
|
||||
Memory: int(t.Memory * 1024), // Convert GB to MB
|
||||
Disk: t.Disk,
|
||||
})
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) ListImages(ctx context.Context, filter string) ([]Image, error) {
|
||||
images, err := p.client.Image.All(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result []Image
|
||||
for _, img := range images {
|
||||
if img.Type != hcloud.ImageTypeSystem {
|
||||
continue // Only show system images
|
||||
}
|
||||
if filter == "" || contains(img.Name, filter) || contains(img.Description, filter) {
|
||||
result = append(result, Image{
|
||||
ID: img.Name,
|
||||
Name: img.Description,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) ListZones(ctx context.Context) ([]string, error) {
|
||||
locations, err := p.client.Location.All(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var zones []string
|
||||
for _, l := range locations {
|
||||
zones = append(zones, l.Name)
|
||||
}
|
||||
|
||||
return zones, nil
|
||||
}
|
||||
96
internal/providers/provider.go
Normal file
96
internal/providers/provider.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// VMOptions contains options for creating a new VM
|
||||
type VMOptions struct {
|
||||
Name string
|
||||
Zone string // e.g., "ch-gva-2", "lpg1", "fsn1"
|
||||
Flavor string // Instance type/size
|
||||
Image string // OS image name or ID
|
||||
SSHPublicKey string // Ephemeral public key content
|
||||
UserData string // Cloud-init script
|
||||
DiskSizeGB int64 // Root disk size
|
||||
Tags map[string]string // Optional tags/labels
|
||||
}
|
||||
|
||||
// VM represents a created virtual machine
|
||||
type VM struct {
|
||||
ID string
|
||||
Name string
|
||||
PublicIP string
|
||||
PrivateIP string
|
||||
Status string
|
||||
Provider string
|
||||
Zone string
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
// Flavor represents an instance type/size
|
||||
type Flavor struct {
|
||||
ID string
|
||||
Name string
|
||||
CPUs int
|
||||
Memory int // MB
|
||||
Disk int // GB (if applicable)
|
||||
}
|
||||
|
||||
// Image represents an OS image
|
||||
type Image struct {
|
||||
ID string
|
||||
Name string
|
||||
}
|
||||
|
||||
// CloudProvider defines the interface for cloud providers
|
||||
type CloudProvider interface {
|
||||
// Name returns the provider name
|
||||
Name() string
|
||||
|
||||
// CreateVM creates a new virtual machine
|
||||
CreateVM(ctx context.Context, opts VMOptions) (*VM, error)
|
||||
|
||||
// DeleteVM deletes a virtual machine by ID
|
||||
DeleteVM(ctx context.Context, vmID string) error
|
||||
|
||||
// GetVM gets VM details by ID
|
||||
GetVM(ctx context.Context, vmID string) (*VM, error)
|
||||
|
||||
// WaitForSSH waits until SSH is available on the VM
|
||||
WaitForSSH(ctx context.Context, ip string, port int, timeout time.Duration) error
|
||||
|
||||
// ListFlavors lists available instance types
|
||||
ListFlavors(ctx context.Context) ([]Flavor, error)
|
||||
|
||||
// ListImages lists available OS images
|
||||
ListImages(ctx context.Context, filter string) ([]Image, error)
|
||||
|
||||
// ListZones lists available zones/regions
|
||||
ListZones(ctx context.Context) ([]string, error)
|
||||
}
|
||||
|
||||
// GenerateCloudInit creates a cloud-init user-data script
|
||||
func GenerateCloudInit(ephemeralPubKey string) string {
|
||||
return `#cloud-config
|
||||
ssh_pwauth: false
|
||||
users:
|
||||
- name: root
|
||||
ssh_authorized_keys:
|
||||
- "` + ephemeralPubKey + `"
|
||||
package_update: true
|
||||
packages:
|
||||
- rsync
|
||||
- docker.io
|
||||
- docker-compose
|
||||
- wireguard-tools
|
||||
write_files:
|
||||
- path: /var/tmp/recovery-ready
|
||||
content: "ready"
|
||||
permissions: '0644'
|
||||
runcmd:
|
||||
- systemctl enable docker
|
||||
- systemctl start docker
|
||||
`
|
||||
}
|
||||
102
internal/restore/docker.go
Normal file
102
internal/restore/docker.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// startDocker ensures Docker is running and starts compose stacks
|
||||
func (p *Pipeline) startDocker(ctx context.Context) error {
|
||||
// Ensure Docker is enabled and running
|
||||
if err := p.remoteCmd(ctx, "systemctl enable docker"); err != nil {
|
||||
return fmt.Errorf("failed to enable docker: %w", err)
|
||||
}
|
||||
|
||||
if err := p.remoteCmd(ctx, "systemctl start docker"); err != nil {
|
||||
return fmt.Errorf("failed to start docker: %w", err)
|
||||
}
|
||||
|
||||
// Wait for Docker to be ready
|
||||
for i := 0; i < 30; i++ {
|
||||
if err := p.remoteCmd(ctx, "docker info > /dev/null 2>&1"); err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
// Find and start docker-compose stacks
|
||||
findCmd := "find /opt -name 'docker-compose.yml' -o -name 'docker-compose.yaml' -o -name 'compose.yml' -o -name 'compose.yaml' 2>/dev/null | head -20"
|
||||
output, err := p.remoteCmdOutput(ctx, findCmd)
|
||||
if err != nil || strings.TrimSpace(output) == "" {
|
||||
if p.Verbose {
|
||||
fmt.Println(" No docker-compose files found")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
composeFiles := strings.Split(strings.TrimSpace(output), "\n")
|
||||
for _, file := range composeFiles {
|
||||
if file == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get directory containing compose file
|
||||
dir := file[:strings.LastIndex(file, "/")]
|
||||
|
||||
if p.Verbose {
|
||||
fmt.Printf(" Starting compose stack in %s\n", dir)
|
||||
}
|
||||
|
||||
// Try docker compose (v2) first, fall back to docker-compose (v1)
|
||||
startCmd := fmt.Sprintf("cd %s && (docker compose up -d 2>/dev/null || docker-compose up -d)", dir)
|
||||
if err := p.remoteCmd(ctx, startCmd); err != nil {
|
||||
if p.Verbose {
|
||||
fmt.Printf(" Warning: failed to start stack in %s: %v\n", dir, err)
|
||||
}
|
||||
// Don't fail on individual stack failures
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runHealth performs health verification
|
||||
func (p *Pipeline) runHealth(ctx context.Context) error {
|
||||
checks := []struct {
|
||||
name string
|
||||
cmd string
|
||||
require bool
|
||||
}{
|
||||
{"SSH accessible", "echo ok", true},
|
||||
{"Docker running", "docker info > /dev/null 2>&1 && echo ok", true},
|
||||
{"Network connectivity", "ping -c 1 8.8.8.8 > /dev/null 2>&1 && echo ok", false},
|
||||
{"DNS resolution", "host google.com > /dev/null 2>&1 && echo ok", false},
|
||||
}
|
||||
|
||||
var failures []string
|
||||
|
||||
for _, check := range checks {
|
||||
output, err := p.remoteCmdOutput(ctx, check.cmd)
|
||||
success := err == nil && strings.TrimSpace(output) == "ok"
|
||||
|
||||
status := "✓"
|
||||
if !success {
|
||||
status = "✗"
|
||||
if check.require {
|
||||
failures = append(failures, check.name)
|
||||
}
|
||||
}
|
||||
|
||||
if p.Verbose {
|
||||
fmt.Printf(" %s %s\n", status, check.name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
return fmt.Errorf("required health checks failed: %v", failures)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
130
internal/restore/pipeline.go
Normal file
130
internal/restore/pipeline.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"recover-server/internal/backup"
|
||||
"recover-server/internal/providers"
|
||||
)
|
||||
|
||||
// Stage represents a restore stage
|
||||
type Stage int
|
||||
|
||||
const (
|
||||
StageSync Stage = iota + 1
|
||||
StageEtc
|
||||
StageSelectiveEtc
|
||||
StageSSHKeys
|
||||
StageServices
|
||||
StageHealth
|
||||
)
|
||||
|
||||
func (s Stage) String() string {
|
||||
names := map[Stage]string{
|
||||
StageSync: "Sync /root and /opt",
|
||||
StageEtc: "Stage /etc backup",
|
||||
StageSelectiveEtc: "Selective /etc restore",
|
||||
StageSSHKeys: "Merge SSH keys",
|
||||
StageServices: "Start services",
|
||||
StageHealth: "Health verification",
|
||||
}
|
||||
return names[s]
|
||||
}
|
||||
|
||||
// StageResult contains the result of a stage execution
|
||||
type StageResult struct {
|
||||
Stage Stage
|
||||
Success bool
|
||||
Message string
|
||||
Duration time.Duration
|
||||
Error error
|
||||
}
|
||||
|
||||
// Pipeline orchestrates the restore process
|
||||
type Pipeline struct {
|
||||
VM *providers.VM
|
||||
BackupSource backup.BackupSource
|
||||
HostName string
|
||||
SSHKeyPath string // Path to ephemeral private key
|
||||
SSHUser string // Usually "root"
|
||||
DryRun bool
|
||||
Verbose bool
|
||||
|
||||
results []StageResult
|
||||
}
|
||||
|
||||
// NewPipeline creates a new restore pipeline
|
||||
func NewPipeline(vm *providers.VM, source backup.BackupSource, host, sshKeyPath string) *Pipeline {
|
||||
return &Pipeline{
|
||||
VM: vm,
|
||||
BackupSource: source,
|
||||
HostName: host,
|
||||
SSHKeyPath: sshKeyPath,
|
||||
SSHUser: "root",
|
||||
results: make([]StageResult, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Run executes all stages
|
||||
func (p *Pipeline) Run(ctx context.Context) error {
|
||||
stages := []struct {
|
||||
stage Stage
|
||||
fn func(context.Context) error
|
||||
}{
|
||||
{StageSync, p.runSync},
|
||||
{StageEtc, p.runEtcStaging},
|
||||
{StageSelectiveEtc, p.runSelectiveEtc},
|
||||
{StageSSHKeys, p.runSSHKeyMerge},
|
||||
{StageServices, p.runServices},
|
||||
{StageHealth, p.runHealth},
|
||||
}
|
||||
|
||||
for _, s := range stages {
|
||||
start := time.Now()
|
||||
|
||||
if p.Verbose {
|
||||
fmt.Printf("\n=== Stage %d: %s ===\n", s.stage, s.stage)
|
||||
}
|
||||
|
||||
if p.DryRun {
|
||||
p.results = append(p.results, StageResult{
|
||||
Stage: s.stage,
|
||||
Success: true,
|
||||
Message: "[DRY RUN] Would execute: " + s.stage.String(),
|
||||
Duration: 0,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
err := s.fn(ctx)
|
||||
result := StageResult{
|
||||
Stage: s.stage,
|
||||
Success: err == nil,
|
||||
Duration: time.Since(start),
|
||||
Error: err,
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
result.Message = err.Error()
|
||||
p.results = append(p.results, result)
|
||||
return fmt.Errorf("stage %d (%s) failed: %w", s.stage, s.stage, err)
|
||||
}
|
||||
|
||||
result.Message = "Completed successfully"
|
||||
p.results = append(p.results, result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Results returns all stage results
|
||||
func (p *Pipeline) Results() []StageResult {
|
||||
return p.results
|
||||
}
|
||||
|
||||
// sshTarget returns the SSH target string
|
||||
func (p *Pipeline) sshTarget() string {
|
||||
return fmt.Sprintf("%s@%s", p.SSHUser, p.VM.PublicIP)
|
||||
}
|
||||
110
internal/restore/ssh.go
Normal file
110
internal/restore/ssh.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
// SSHKeyPair holds an ephemeral SSH key pair
|
||||
type SSHKeyPair struct {
|
||||
PrivateKeyPath string
|
||||
PublicKey string
|
||||
}
|
||||
|
||||
// GenerateEphemeralKey creates a temporary ED25519 SSH key pair
|
||||
func GenerateEphemeralKey() (*SSHKeyPair, error) {
|
||||
// Generate ED25519 key pair
|
||||
pubKey, privKey, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate key: %w", err)
|
||||
}
|
||||
|
||||
// Convert to SSH format
|
||||
sshPubKey, err := ssh.NewPublicKey(pubKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create SSH public key: %w", err)
|
||||
}
|
||||
|
||||
// Marshal public key
|
||||
pubKeyStr := strings.TrimSpace(string(ssh.MarshalAuthorizedKey(sshPubKey)))
|
||||
|
||||
// Create temp directory for key
|
||||
tmpDir, err := os.MkdirTemp("", "recover-ssh-")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp dir: %w", err)
|
||||
}
|
||||
|
||||
// Write private key in OpenSSH format
|
||||
privKeyPath := filepath.Join(tmpDir, "id_ed25519")
|
||||
|
||||
// Marshal private key to OpenSSH format
|
||||
pemBlock, err := ssh.MarshalPrivateKey(privKey, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal private key: %w", err)
|
||||
}
|
||||
|
||||
privKeyPEM := pem.EncodeToMemory(pemBlock)
|
||||
if err := os.WriteFile(privKeyPath, privKeyPEM, 0600); err != nil {
|
||||
return nil, fmt.Errorf("failed to write private key: %w", err)
|
||||
}
|
||||
|
||||
return &SSHKeyPair{
|
||||
PrivateKeyPath: privKeyPath,
|
||||
PublicKey: pubKeyStr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Cleanup removes the ephemeral key files
|
||||
func (k *SSHKeyPair) Cleanup() {
|
||||
if k.PrivateKeyPath != "" {
|
||||
os.RemoveAll(filepath.Dir(k.PrivateKeyPath))
|
||||
}
|
||||
}
|
||||
|
||||
// runSSHKeyMerge merges original authorized_keys with ephemeral key
|
||||
func (p *Pipeline) runSSHKeyMerge(ctx context.Context) error {
|
||||
// First, backup current authorized_keys
|
||||
backupCmd := "cp /root/.ssh/authorized_keys /root/.ssh/authorized_keys.ephemeral 2>/dev/null || true"
|
||||
p.remoteCmd(ctx, backupCmd)
|
||||
|
||||
// Check if we have original keys in the restored /root
|
||||
checkCmd := "cat /root/.ssh/authorized_keys.original 2>/dev/null || cat /srv/restore/root/.ssh/authorized_keys 2>/dev/null || echo ''"
|
||||
originalKeys, _ := p.remoteCmdOutput(ctx, checkCmd)
|
||||
|
||||
// Get current (ephemeral) keys
|
||||
currentKeys, _ := p.remoteCmdOutput(ctx, "cat /root/.ssh/authorized_keys 2>/dev/null || echo ''")
|
||||
|
||||
// Merge keys (unique)
|
||||
allKeys := make(map[string]bool)
|
||||
for _, key := range strings.Split(currentKeys, "\n") {
|
||||
key = strings.TrimSpace(key)
|
||||
if key != "" && !strings.HasPrefix(key, "#") {
|
||||
allKeys[key] = true
|
||||
}
|
||||
}
|
||||
for _, key := range strings.Split(originalKeys, "\n") {
|
||||
key = strings.TrimSpace(key)
|
||||
if key != "" && !strings.HasPrefix(key, "#") {
|
||||
allKeys[key] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Write merged keys
|
||||
var mergedKeys []string
|
||||
for key := range allKeys {
|
||||
mergedKeys = append(mergedKeys, key)
|
||||
}
|
||||
|
||||
mergeCmd := fmt.Sprintf("mkdir -p /root/.ssh && echo '%s' > /root/.ssh/authorized_keys && chmod 600 /root/.ssh/authorized_keys",
|
||||
strings.Join(mergedKeys, "\n"))
|
||||
|
||||
return p.remoteCmd(ctx, mergeCmd)
|
||||
}
|
||||
109
internal/restore/stages.go
Normal file
109
internal/restore/stages.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// /etc whitelist - only these are restored
|
||||
var etcWhitelist = []string{
|
||||
"wireguard",
|
||||
"letsencrypt",
|
||||
"nginx",
|
||||
"rsyslog-certs",
|
||||
"systemd/system",
|
||||
"docker",
|
||||
"hostname",
|
||||
"hosts",
|
||||
"passwd",
|
||||
"group",
|
||||
"shadow",
|
||||
"gshadow",
|
||||
}
|
||||
|
||||
// runSync syncs /root and /opt from backup
|
||||
func (p *Pipeline) runSync(ctx context.Context) error {
|
||||
dirs := []string{"root", "opt"}
|
||||
return p.BackupSource.SyncTo(ctx, p.HostName, p.sshTarget(), p.SSHKeyPath, dirs)
|
||||
}
|
||||
|
||||
// runEtcStaging stages /etc to /srv/restore/etc
|
||||
func (p *Pipeline) runEtcStaging(ctx context.Context) error {
|
||||
// Create staging directory on target
|
||||
if err := p.remoteCmd(ctx, "mkdir -p /srv/restore"); err != nil {
|
||||
return fmt.Errorf("failed to create staging dir: %w", err)
|
||||
}
|
||||
|
||||
// Sync /etc to staging
|
||||
dirs := []string{"etc"}
|
||||
return p.BackupSource.SyncTo(ctx, p.HostName, p.sshTarget(), p.SSHKeyPath, dirs)
|
||||
}
|
||||
|
||||
// runSelectiveEtc copies only whitelisted items from staged /etc
|
||||
func (p *Pipeline) runSelectiveEtc(ctx context.Context) error {
|
||||
for _, item := range etcWhitelist {
|
||||
src := fmt.Sprintf("/srv/restore/etc/%s", item)
|
||||
dst := fmt.Sprintf("/etc/%s", item)
|
||||
|
||||
// Check if source exists
|
||||
checkCmd := fmt.Sprintf("test -e %s && echo exists || echo missing", src)
|
||||
output, err := p.remoteCmdOutput(ctx, checkCmd)
|
||||
if err != nil || strings.TrimSpace(output) == "missing" {
|
||||
if p.Verbose {
|
||||
fmt.Printf(" Skipping %s (not in backup)\n", item)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Create parent directory if needed
|
||||
parentDir := fmt.Sprintf("/etc/%s", strings.Split(item, "/")[0])
|
||||
if strings.Contains(item, "/") {
|
||||
p.remoteCmd(ctx, fmt.Sprintf("mkdir -p %s", parentDir))
|
||||
}
|
||||
|
||||
// Copy with rsync for proper permissions
|
||||
copyCmd := fmt.Sprintf("rsync -av %s %s", src, dst)
|
||||
if err := p.remoteCmd(ctx, copyCmd); err != nil {
|
||||
return fmt.Errorf("failed to restore %s: %w", item, err)
|
||||
}
|
||||
|
||||
if p.Verbose {
|
||||
fmt.Printf(" Restored %s\n", item)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// remoteCmd runs a command on the target VM
|
||||
func (p *Pipeline) remoteCmd(ctx context.Context, cmd string) error {
|
||||
sshArgs := []string{
|
||||
"-i", p.SSHKeyPath,
|
||||
"-o", "StrictHostKeyChecking=no",
|
||||
"-o", "UserKnownHostsFile=/dev/null",
|
||||
"-o", "ConnectTimeout=10",
|
||||
p.sshTarget(),
|
||||
cmd,
|
||||
}
|
||||
|
||||
sshCmd := exec.CommandContext(ctx, "ssh", sshArgs...)
|
||||
return sshCmd.Run()
|
||||
}
|
||||
|
||||
// remoteCmdOutput runs a command and returns output
|
||||
func (p *Pipeline) remoteCmdOutput(ctx context.Context, cmd string) (string, error) {
|
||||
sshArgs := []string{
|
||||
"-i", p.SSHKeyPath,
|
||||
"-o", "StrictHostKeyChecking=no",
|
||||
"-o", "UserKnownHostsFile=/dev/null",
|
||||
"-o", "ConnectTimeout=10",
|
||||
p.sshTarget(),
|
||||
cmd,
|
||||
}
|
||||
|
||||
sshCmd := exec.CommandContext(ctx, "ssh", sshArgs...)
|
||||
output, err := sshCmd.Output()
|
||||
return string(output), err
|
||||
}
|
||||
63
internal/restore/wireguard.go
Normal file
63
internal/restore/wireguard.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// runServices starts restored services
|
||||
func (p *Pipeline) runServices(ctx context.Context) error {
|
||||
// Start WireGuard interfaces
|
||||
if err := p.startWireGuard(ctx); err != nil {
|
||||
// WireGuard is optional, log but don't fail
|
||||
if p.Verbose {
|
||||
fmt.Printf(" WireGuard: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Start Docker
|
||||
if err := p.startDocker(ctx); err != nil {
|
||||
return fmt.Errorf("failed to start Docker: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// startWireGuard enables and starts WireGuard interfaces
|
||||
func (p *Pipeline) startWireGuard(ctx context.Context) error {
|
||||
// Check if WireGuard configs exist
|
||||
checkCmd := "ls /etc/wireguard/*.conf 2>/dev/null | head -5"
|
||||
output, err := p.remoteCmdOutput(ctx, checkCmd)
|
||||
if err != nil || strings.TrimSpace(output) == "" {
|
||||
return fmt.Errorf("no WireGuard configs found")
|
||||
}
|
||||
|
||||
// Get interface names
|
||||
configs := strings.Split(strings.TrimSpace(output), "\n")
|
||||
for _, conf := range configs {
|
||||
if conf == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract interface name from path (e.g., /etc/wireguard/wg0.conf -> wg0)
|
||||
parts := strings.Split(conf, "/")
|
||||
filename := parts[len(parts)-1]
|
||||
iface := strings.TrimSuffix(filename, ".conf")
|
||||
|
||||
if p.Verbose {
|
||||
fmt.Printf(" Starting WireGuard interface: %s\n", iface)
|
||||
}
|
||||
|
||||
// Enable and start
|
||||
enableCmd := fmt.Sprintf("systemctl enable wg-quick@%s", iface)
|
||||
startCmd := fmt.Sprintf("systemctl start wg-quick@%s", iface)
|
||||
|
||||
p.remoteCmd(ctx, enableCmd)
|
||||
if err := p.remoteCmd(ctx, startCmd); err != nil {
|
||||
return fmt.Errorf("failed to start %s: %w", iface, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
108
internal/ui/dryrun.go
Normal file
108
internal/ui/dryrun.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// DryRun tracks dry-run mode operations
|
||||
type DryRun struct {
|
||||
Enabled bool
|
||||
Operations []DryRunOp
|
||||
}
|
||||
|
||||
// DryRunOp represents a single operation that would be performed
|
||||
type DryRunOp struct {
|
||||
Component string // e.g., "VM", "DNS", "Restore"
|
||||
Action string // e.g., "Create", "Update", "Delete"
|
||||
Description string
|
||||
}
|
||||
|
||||
// NewDryRun creates a dry-run tracker
|
||||
func NewDryRun(enabled bool) *DryRun {
|
||||
return &DryRun{
|
||||
Enabled: enabled,
|
||||
Operations: make([]DryRunOp, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// AddOperation records an operation
|
||||
func (d *DryRun) AddOperation(component, action, description string) {
|
||||
d.Operations = append(d.Operations, DryRunOp{
|
||||
Component: component,
|
||||
Action: action,
|
||||
Description: description,
|
||||
})
|
||||
}
|
||||
|
||||
// Print displays all recorded operations
|
||||
func (d *DryRun) Print() {
|
||||
if !d.Enabled {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("\n" + HeaderLine("DRY RUN - No changes will be made"))
|
||||
fmt.Println()
|
||||
|
||||
if len(d.Operations) == 0 {
|
||||
fmt.Println("No operations would be performed.")
|
||||
return
|
||||
}
|
||||
|
||||
for i, op := range d.Operations {
|
||||
fmt.Printf("%d. [%s] %s: %s\n", i+1, op.Component, op.Action, op.Description)
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("To execute these operations, run with --yes flag")
|
||||
}
|
||||
|
||||
// HeaderLine creates a formatted header
|
||||
func HeaderLine(title string) string {
|
||||
return fmt.Sprintf("=== %s ===", title)
|
||||
}
|
||||
|
||||
// TablePrint prints data as a simple table
|
||||
func TablePrint(headers []string, rows [][]string) {
|
||||
// Calculate column widths
|
||||
widths := make([]int, len(headers))
|
||||
for i, h := range headers {
|
||||
widths[i] = len(h)
|
||||
}
|
||||
for _, row := range rows {
|
||||
for i, cell := range row {
|
||||
if i < len(widths) && len(cell) > widths[i] {
|
||||
widths[i] = len(cell)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Print header
|
||||
for i, h := range headers {
|
||||
fmt.Printf("%-*s ", widths[i], h)
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Print separator
|
||||
for i := range headers {
|
||||
fmt.Printf("%s ", repeat("-", widths[i]))
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Print rows
|
||||
for _, row := range rows {
|
||||
for i, cell := range row {
|
||||
if i < len(widths) {
|
||||
fmt.Printf("%-*s ", widths[i], cell)
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
|
||||
func repeat(s string, n int) string {
|
||||
result := ""
|
||||
for i := 0; i < n; i++ {
|
||||
result += s
|
||||
}
|
||||
return result
|
||||
}
|
||||
111
internal/ui/progress.go
Normal file
111
internal/ui/progress.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Progress tracks and displays progress
|
||||
type Progress struct {
|
||||
Total int
|
||||
Current int
|
||||
StartTime time.Time
|
||||
Message string
|
||||
}
|
||||
|
||||
// NewProgress creates a new progress tracker
|
||||
func NewProgress(total int, message string) *Progress {
|
||||
return &Progress{
|
||||
Total: total,
|
||||
Current: 0,
|
||||
StartTime: time.Now(),
|
||||
Message: message,
|
||||
}
|
||||
}
|
||||
|
||||
// Increment advances progress by one
|
||||
func (p *Progress) Increment() {
|
||||
p.Current++
|
||||
p.Print()
|
||||
}
|
||||
|
||||
// SetMessage updates the current message
|
||||
func (p *Progress) SetMessage(msg string) {
|
||||
p.Message = msg
|
||||
}
|
||||
|
||||
// Print displays current progress
|
||||
func (p *Progress) Print() {
|
||||
elapsed := time.Since(p.StartTime)
|
||||
percent := float64(p.Current) / float64(p.Total) * 100
|
||||
|
||||
bar := p.bar(20)
|
||||
|
||||
fmt.Printf("\r[%s] %.1f%% (%d/%d) %s - %s ",
|
||||
bar, percent, p.Current, p.Total, elapsed.Round(time.Second), p.Message)
|
||||
}
|
||||
|
||||
// Complete marks progress as done
|
||||
func (p *Progress) Complete() {
|
||||
p.Current = p.Total
|
||||
elapsed := time.Since(p.StartTime)
|
||||
fmt.Printf("\r[%s] 100%% (%d/%d) %s - Complete\n",
|
||||
p.bar(20), p.Total, p.Total, elapsed.Round(time.Second))
|
||||
}
|
||||
|
||||
func (p *Progress) bar(width int) string {
|
||||
filled := int(float64(p.Current) / float64(p.Total) * float64(width))
|
||||
empty := width - filled
|
||||
|
||||
return strings.Repeat("█", filled) + strings.Repeat("░", empty)
|
||||
}
|
||||
|
||||
// Spinner displays a spinning indicator
|
||||
type Spinner struct {
|
||||
Message string
|
||||
stop chan bool
|
||||
done chan bool
|
||||
}
|
||||
|
||||
// NewSpinner creates a new spinner
|
||||
func NewSpinner(message string) *Spinner {
|
||||
return &Spinner{
|
||||
Message: message,
|
||||
stop: make(chan bool),
|
||||
done: make(chan bool),
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins the spinner animation
|
||||
func (s *Spinner) Start() {
|
||||
go func() {
|
||||
frames := []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}
|
||||
i := 0
|
||||
for {
|
||||
select {
|
||||
case <-s.stop:
|
||||
s.done <- true
|
||||
return
|
||||
default:
|
||||
fmt.Printf("\r%s %s", frames[i%len(frames)], s.Message)
|
||||
i++
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop stops the spinner
|
||||
func (s *Spinner) Stop() {
|
||||
s.stop <- true
|
||||
<-s.done
|
||||
fmt.Printf("\r%s\n", strings.Repeat(" ", len(s.Message)+5))
|
||||
}
|
||||
|
||||
// StopWithMessage stops spinner with a final message
|
||||
func (s *Spinner) StopWithMessage(msg string) {
|
||||
s.stop <- true
|
||||
<-s.done
|
||||
fmt.Printf("\r%s\n", msg)
|
||||
}
|
||||
100
internal/ui/prompts.go
Normal file
100
internal/ui/prompts.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ConfirmAction asks for yes/no confirmation
|
||||
func ConfirmAction(message string) bool {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
fmt.Printf("%s [y/N]: ", message)
|
||||
|
||||
response, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
response = strings.TrimSpace(strings.ToLower(response))
|
||||
return response == "y" || response == "yes"
|
||||
}
|
||||
|
||||
// ConfirmHostname requires typing the hostname to confirm
|
||||
func ConfirmHostname(hostname string) bool {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
fmt.Printf("\n⚠️ DESTRUCTIVE OPERATION ⚠️\n")
|
||||
fmt.Printf("This will modify DNS for: %s\n", hostname)
|
||||
fmt.Printf("Type the hostname exactly to confirm: ")
|
||||
|
||||
response, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
response = strings.TrimSpace(response)
|
||||
return response == hostname
|
||||
}
|
||||
|
||||
// ConfirmRecovery requires confirmation for recovery operation
|
||||
func ConfirmRecovery(host, source, target string) bool {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
fmt.Printf("\n=== RECOVERY CONFIRMATION ===\n")
|
||||
fmt.Printf("Host: %s\n", host)
|
||||
fmt.Printf("Source: %s\n", source)
|
||||
fmt.Printf("Target: %s\n", target)
|
||||
fmt.Printf("\nThis will create a new VM and restore data.\n")
|
||||
fmt.Printf("Type 'RECOVER' to proceed: ")
|
||||
|
||||
response, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
response = strings.TrimSpace(response)
|
||||
return response == "RECOVER"
|
||||
}
|
||||
|
||||
// SelectOption presents options and returns selection
|
||||
func SelectOption(prompt string, options []string) (int, error) {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
|
||||
fmt.Println(prompt)
|
||||
for i, opt := range options {
|
||||
fmt.Printf(" %d. %s\n", i+1, opt)
|
||||
}
|
||||
fmt.Print("Selection: ")
|
||||
|
||||
var selection int
|
||||
_, err := fmt.Fscanf(reader, "%d\n", &selection)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
if selection < 1 || selection > len(options) {
|
||||
return -1, fmt.Errorf("invalid selection")
|
||||
}
|
||||
|
||||
return selection - 1, nil
|
||||
}
|
||||
|
||||
// PrintError prints an error message in red
|
||||
func PrintError(format string, args ...interface{}) {
|
||||
fmt.Printf("\033[31mError: "+format+"\033[0m\n", args...)
|
||||
}
|
||||
|
||||
// PrintSuccess prints a success message in green
|
||||
func PrintSuccess(format string, args ...interface{}) {
|
||||
fmt.Printf("\033[32m✓ "+format+"\033[0m\n", args...)
|
||||
}
|
||||
|
||||
// PrintWarning prints a warning message in yellow
|
||||
func PrintWarning(format string, args ...interface{}) {
|
||||
fmt.Printf("\033[33m⚠ "+format+"\033[0m\n", args...)
|
||||
}
|
||||
|
||||
// PrintInfo prints an info message in blue
|
||||
func PrintInfo(format string, args ...interface{}) {
|
||||
fmt.Printf("\033[34mℹ "+format+"\033[0m\n", args...)
|
||||
}
|
||||
Reference in New Issue
Block a user