diff --git a/index.css b/index.css
new file mode 100644
index 0000000..84c45bb
--- /dev/null
+++ b/index.css
@@ -0,0 +1,2251 @@
+/* ============================================================================
+ * Styles - Batman, Spider-Man, Aquaman Theme (Fully Responsive - Enhanced)
+ * ============================================================================
+ */
+
+/* CSS Variables for better maintainability */
+:root {
+ --primary-gold: #ffd700;
+ --primary-gold-dark: #ffaa00;
+ --primary-blue: #00ffff;
+ --primary-red: #ff3333;
+ --primary-red-dark: #cc0000;
+ --primary-green: #00ff00;
+ --primary-green-dark: #00cc00;
+ --primary-info: #2196F3;
+ --primary-warning: #ff9800;
+ --primary-error: #f44336;
+ --primary-success: #4CAF50;
+
+ --bg-dark: #0a0f1e;
+ --bg-medium: #1a1f2f;
+ --bg-light: #0f172a;
+ --bg-card: rgba(0, 30, 50, 0.7);
+ --bg-overlay: rgba(0, 0, 0, 0.5);
+ --bg-hover: rgba(255, 215, 0, 0.1);
+
+ --text-primary: #fff;
+ --text-secondary: #aaffff;
+ --text-muted: #94a3b8;
+
+ --shadow-sm: 0 2px 4px rgba(0,0,0,0.1);
+ --shadow-md: 0 4px 6px rgba(0,0,0,0.1);
+ --shadow-lg: 0 10px 15px rgba(0,0,0,0.1);
+ --shadow-gold: 0 0 20px var(--primary-gold);
+ --shadow-blue: 0 0 20px var(--primary-blue);
+ --shadow-red: 0 0 20px var(--primary-red);
+
+ --transition-fast: 0.2s ease;
+ --transition-normal: 0.3s ease;
+ --transition-slow: 0.5s ease;
+
+ --spacing-xs: 4px;
+ --spacing-sm: 8px;
+ --spacing-md: 12px;
+ --spacing-lg: 16px;
+ --spacing-xl: 20px;
+ --spacing-xxl: 30px;
+
+ --radius-sm: 4px;
+ --radius-md: 8px;
+ --radius-lg: 12px;
+ --radius-xl: 20px;
+ --radius-xxl: 30px;
+ --radius-full: 40px;
+ --radius-circle: 50%;
+
+ --font-xs: 0.75rem;
+ --font-sm: 0.875rem;
+ --font-md: 1rem;
+ --font-lg: 1.125rem;
+ --font-xl: 1.25rem;
+ --font-xxl: 1.5rem;
+ --font-xxxl: 2rem;
+
+ --header-height: 80px;
+ --footer-height: 50px;
+}
+
+/* Base Styles */
+* {
+ margin: 0;
+ padding: 0;
+ box-sizing: border-box;
+}
+
+html {
+ font-size: 16px;
+ -webkit-text-size-adjust: 100%;
+ scroll-behavior: smooth;
+}
+
+body {
+ font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
+ background: linear-gradient(135deg, var(--bg-dark) 0%, var(--bg-medium) 50%, var(--bg-light) 100%);
+ color: var(--text-primary);
+ min-height: 100vh;
+ position: relative;
+ overflow-x: hidden;
+ line-height: 1.6;
+ padding-bottom: var(--footer-height);
+}
+
+/* Optimized Animations */
+@keyframes slideIn {
+ from { transform: translateX(100%); opacity: 0; }
+ to { transform: translateX(0); opacity: 1; }
+}
+
+@keyframes slideOut {
+ from { transform: translateX(0); opacity: 1; }
+ to { transform: translateX(100%); opacity: 0; }
+}
+
+@keyframes fadeIn {
+ from { opacity: 0; transform: translateY(20px); }
+ to { opacity: 1; transform: translateY(0); }
+}
+
+@keyframes pulse {
+ 0%, 100% { transform: scale(1); }
+ 50% { transform: scale(1.05); }
+}
+
+@keyframes spin {
+ from { transform: rotate(0deg); }
+ to { transform: rotate(360deg); }
+}
+
+@keyframes batSignal {
+ 0% { transform: rotate(0deg) scale(1); }
+ 50% { transform: rotate(5deg) scale(1.1); }
+ 100% { transform: rotate(0deg) scale(1); }
+}
+
+@keyframes gradientShift {
+ 0% { background-position: 0% 50%; }
+ 50% { background-position: 100% 50%; }
+ 100% { background-position: 0% 50%; }
+}
+
+@keyframes oceanWaves {
+ 0%, 100% { box-shadow: 0 0 30px rgba(0, 255, 255, 0.3), 0 0 60px rgba(255, 215, 0, 0.2); }
+ 50% { box-shadow: 0 0 50px rgba(0, 255, 255, 0.5), 0 0 80px rgba(255, 215, 0, 0.4); }
+}
+
+@keyframes shine {
+ 0% { transform: rotate(45deg) translateX(-100%); }
+ 100% { transform: rotate(45deg) translateX(100%); }
+}
+
+@keyframes swingIn {
+ 0% { transform: rotate(-10deg) scale(0.8); opacity: 0; }
+ 50% { transform: rotate(5deg) scale(1.02); }
+ 100% { transform: rotate(0deg) scale(1); opacity: 1; }
+}
+
+@keyframes wave {
+ 0%, 100% { top: -100%; }
+ 50% { top: 100%; }
+}
+
+@keyframes rotate {
+ from { transform: rotate(0deg); }
+ to { transform: rotate(360deg); }
+}
+
+@keyframes blink {
+ 0%, 100% { opacity: 1; }
+ 50% { opacity: 0.5; }
+}
+
+@keyframes borderFlow {
+ 0% { border-image-slice: 1; border-image-source: linear-gradient(90deg, #00ffff, #ffd700, #ff3333, #00ffff); }
+ 100% { border-image-slice: 1; border-image-source: linear-gradient(90deg, #00ffff, #ff3333, #ffd700, #00ffff); }
+}
+
+@keyframes batFly {
+ 0%, 100% { transform: scale(1) rotate(0deg); }
+ 33% { transform: scale(1.2) rotate(10deg); }
+ 66% { transform: scale(1.1) rotate(-10deg); }
+}
+
+@keyframes webSwing {
+ 0%, 100% { transform: rotate(0deg) scale(1); }
+ 33% { transform: rotate(10deg) scale(1.1); }
+ 66% { transform: rotate(-10deg) scale(0.9); }
+}
+
+@keyframes shimmer {
+ 0% { background-position: -1000px 0; }
+ 100% { background-position: 1000px 0; }
+}
+
+/* Background Effects */
+body::before {
+ content: '';
+ position: fixed;
+ top: -50%;
+ left: -50%;
+ width: 200%;
+ height: 200%;
+ background: radial-gradient(circle at 30% 50%, rgba(10, 15, 30, 0.8) 0%, transparent 50%),
+ repeating-linear-gradient(45deg, rgba(255, 215, 0, 0.02) 0px, rgba(255, 215, 0, 0.02) 2px, transparent 2px, transparent 8px);
+ z-index: -1;
+ animation: batSignal 20s linear infinite;
+ pointer-events: none;
+}
+
+body::after {
+ content: '';
+ position: fixed;
+ top: 0;
+ left: 0;
+ right: 0;
+ bottom: 0;
+ background: url('data:image/svg+xml;utf8,');
+ pointer-events: none;
+ z-index: -1;
+ opacity: 0.3;
+}
+
+/* Container */
+.container {
+ width: 100%;
+ max-width: 1400px;
+ margin: 0 auto;
+ padding: var(--spacing-lg);
+ position: relative;
+ backdrop-filter: blur(10px);
+}
+
+/* Header - Enhanced for mobile */
+.header {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ text-align: center;
+ background: linear-gradient(135deg, rgba(0, 20, 40, 0.95) 0%, rgba(0, 50, 80, 0.95) 50%, rgba(0, 30, 60, 0.95) 100%);
+ border: 2px solid transparent;
+ border-image: linear-gradient(45deg, var(--primary-blue), var(--primary-gold), var(--primary-red)) 1;
+ border-radius: var(--radius-xl);
+ padding: var(--spacing-xl);
+ margin-bottom: var(--spacing-xl);
+ box-shadow: 0 0 30px rgba(0, 255, 255, 0.2), 0 0 60px rgba(255, 215, 0, 0.1);
+ position: relative;
+ overflow: hidden;
+ animation: oceanWaves 8s ease-in-out infinite;
+}
+
+@media (min-width: 768px) {
+ .header {
+ flex-direction: row;
+ justify-content: space-between;
+ text-align: left;
+ padding: var(--spacing-xl) var(--spacing-xxl);
+ }
+}
+
+.header-content {
+ flex: 1;
+}
+
+.header h1 {
+ font-size: clamp(var(--font-xxl), 5vw, var(--font-xxxl));
+ background: linear-gradient(45deg, var(--primary-blue), var(--primary-gold), var(--primary-red));
+ -webkit-background-clip: text;
+ -webkit-text-fill-color: transparent;
+ background-size: 300% 300%;
+ animation: gradientShift 8s ease infinite;
+ margin-bottom: var(--spacing-xs);
+ line-height: 1.2;
+ word-wrap: break-word;
+}
+
+.header p {
+ font-size: clamp(var(--font-sm), 2vw, var(--font-md));
+ opacity: 0.9;
+ color: var(--text-secondary);
+}
+
+.header-actions {
+ margin-top: var(--spacing-md);
+}
+
+@media (min-width: 768px) {
+ .header-actions {
+ margin-top: 0;
+ margin-left: var(--spacing-xl);
+ }
+}
+
+.security-badge {
+ background: linear-gradient(135deg, #000 0%, #1a1a1a 100%);
+ padding: var(--spacing-xs) var(--spacing-lg);
+ border-radius: var(--radius-full);
+ border: 2px solid var(--primary-gold);
+ box-shadow: var(--shadow-gold);
+ animation: pulse 2s ease-in-out infinite;
+ font-size: clamp(var(--font-xs), 1.5vw, var(--font-sm));
+ font-weight: 600;
+ display: inline-block;
+}
+
+/* Tabs - Enhanced for mobile */
+.tabs {
+ display: flex;
+ flex-wrap: wrap;
+ gap: var(--spacing-xs);
+ margin-bottom: var(--spacing-xl);
+ background: var(--bg-overlay);
+ padding: var(--spacing-sm);
+ border-radius: var(--radius-full);
+ backdrop-filter: blur(10px);
+ border: 1px solid rgba(255, 215, 0, 0.2);
+}
+
+.tab-btn {
+ padding: var(--spacing-sm) var(--spacing-lg);
+ background: transparent;
+ border: none;
+ color: var(--text-secondary);
+ border-radius: var(--radius-full);
+ cursor: pointer;
+ font-size: clamp(var(--font-sm), 2vw, var(--font-md));
+ font-weight: 600;
+ transition: all var(--transition-normal);
+ flex: 1 1 auto;
+ min-width: fit-content;
+ white-space: nowrap;
+ position: relative;
+ overflow: hidden;
+}
+
+@media (max-width: 768px) {
+ .tab-btn {
+ flex: 1 1 calc(50% - var(--spacing-xs));
+ white-space: normal;
+ text-align: center;
+ padding: var(--spacing-md) var(--spacing-sm);
+ }
+}
+
+@media (max-width: 480px) {
+ .tab-btn {
+ flex: 1 1 100%;
+ }
+}
+
+.tab-btn::before {
+ content: '';
+ position: absolute;
+ top: -50%;
+ left: -50%;
+ width: 200%;
+ height: 200%;
+ background: linear-gradient(45deg, transparent, rgba(255, 215, 0, 0.1), rgba(0, 255, 255, 0.1), transparent);
+ transform: rotate(45deg);
+ animation: shine 4s linear infinite;
+ opacity: 0;
+ transition: opacity 0.3s;
+ pointer-events: none;
+}
+
+.tab-btn:hover::before {
+ opacity: 1;
+}
+
+.tab-btn:hover {
+ color: var(--primary-gold);
+ transform: translateY(-1px);
+}
+
+.tab-btn.active {
+ background: linear-gradient(135deg, var(--primary-gold) 0%, var(--primary-gold-dark) 100%);
+ color: #000;
+ box-shadow: var(--shadow-gold);
+ border: none;
+}
+
+/* Tab Content */
+.tab-content {
+ display: none;
+ background: rgba(10, 20, 30, 0.8);
+ backdrop-filter: blur(10px);
+ border-radius: var(--radius-xl);
+ padding: var(--spacing-xl);
+ border: 1px solid rgba(255, 215, 0, 0.2);
+ animation: fadeIn var(--transition-slow);
+}
+
+.tab-content.active {
+ display: block;
+}
+
+.tab-content h2 {
+ font-size: clamp(var(--font-xl), 4vw, var(--font-xxl));
+ margin-bottom: var(--spacing-xl);
+ color: var(--primary-gold);
+ border-bottom: 2px solid rgba(255, 215, 0, 0.3);
+ padding-bottom: var(--spacing-sm);
+}
+
+.tab-content h3 {
+ font-size: clamp(var(--font-lg), 3vw, var(--font-xl));
+ margin: var(--spacing-lg) 0;
+ color: var(--primary-blue);
+}
+
+.tab-content h4 {
+ font-size: clamp(var(--font-md), 2.5vw, var(--font-lg));
+ color: var(--primary-red);
+ margin-bottom: var(--spacing-sm);
+}
+
+/* Migration Layout - Enhanced for mobile */
+.migration-layout {
+ display: grid;
+ grid-template-columns: 1fr;
+ gap: var(--spacing-xl);
+}
+
+@media (min-width: 992px) {
+ .migration-layout {
+ grid-template-columns: 1fr 1fr;
+ }
+}
+
+.migration-column {
+ display: flex;
+ flex-direction: column;
+ gap: var(--spacing-lg);
+}
+
+/* Environment Layout */
+.environment-layout {
+ display: grid;
+ grid-template-columns: 1fr;
+ gap: var(--spacing-xl);
+}
+
+@media (min-width: 992px) {
+ .environment-layout {
+ grid-template-columns: 1fr 1fr;
+ }
+}
+
+.environment-column {
+ display: flex;
+ flex-direction: column;
+ gap: var(--spacing-lg);
+}
+
+/* Config Sections */
+.config-section {
+ background: rgba(0, 0, 0, 0.2);
+ border-radius: var(--radius-lg);
+ padding: var(--spacing-lg);
+ border: 1px solid rgba(255, 215, 0, 0.1);
+}
+
+/* Config Cards */
+.config-cards {
+ display: grid;
+ grid-template-columns: 1fr;
+ gap: var(--spacing-lg);
+ margin: var(--spacing-lg) 0;
+}
+
+@media (min-width: 640px) {
+ .config-cards {
+ grid-template-columns: repeat(2, 1fr);
+ }
+}
+
+@media (min-width: 1200px) {
+ .config-cards {
+ grid-template-columns: repeat(4, 1fr);
+ }
+}
+
+.config-card {
+ background: linear-gradient(135deg, rgba(20, 20, 30, 0.95) 0%, rgba(30, 30, 40, 0.95) 100%);
+ border-radius: var(--radius-lg);
+ padding: var(--spacing-lg);
+ border-left: 4px solid var(--primary-red);
+ transition: all var(--transition-normal);
+ box-shadow: var(--shadow-md);
+ position: relative;
+ overflow: hidden;
+}
+
+.config-card::before {
+ content: '';
+ position: absolute;
+ top: -50%;
+ left: -50%;
+ width: 200%;
+ height: 200%;
+ background: radial-gradient(circle at 30% 50%, rgba(255, 51, 51, 0.1) 0%, transparent 50%);
+ animation: webSwing 8s ease-in-out infinite;
+ pointer-events: none;
+}
+
+.config-card:hover {
+ transform: translateY(-3px);
+ box-shadow: var(--shadow-lg), 0 0 20px rgba(255, 215, 0, 0.2);
+ border-left-color: var(--primary-gold);
+}
+
+.config-card h4 {
+ background: linear-gradient(45deg, var(--primary-red), var(--primary-gold));
+ -webkit-background-clip: text;
+ -webkit-text-fill-color: transparent;
+ margin-bottom: var(--spacing-md);
+ font-size: var(--font-lg);
+ position: relative;
+ z-index: 1;
+}
+
+.config-card pre {
+ background: rgba(0, 0, 0, 0.6);
+ padding: var(--spacing-md);
+ border-radius: var(--radius-md);
+ color: var(--text-secondary);
+ font-family: 'Courier New', monospace;
+ font-size: var(--font-sm);
+ overflow-x: auto;
+ white-space: pre-wrap;
+ word-wrap: break-word;
+ border: 1px solid rgba(255, 215, 0, 0.2);
+ position: relative;
+ z-index: 1;
+}
+
+/* Input Groups - Enhanced for mobile */
+.config-grid {
+ display: grid;
+ grid-template-columns: 1fr;
+ gap: var(--spacing-md);
+ margin-bottom: var(--spacing-lg);
+}
+
+@media (min-width: 480px) {
+ .config-grid {
+ grid-template-columns: repeat(2, 1fr);
+ }
+}
+
+@media (min-width: 768px) {
+ .config-grid {
+ grid-template-columns: repeat(3, 1fr);
+ }
+}
+
+.config-group {
+ background: rgba(0, 30, 50, 0.6);
+ padding: var(--spacing-md);
+ border-radius: var(--radius-md);
+ border: 1px solid rgba(0, 255, 255, 0.2);
+ transition: all var(--transition-normal);
+}
+
+.config-group:hover {
+ border-color: var(--primary-blue);
+ background: rgba(0, 50, 80, 0.7);
+ box-shadow: var(--shadow-blue);
+}
+
+.config-group label {
+ display: block;
+ margin-bottom: var(--spacing-xs);
+ color: var(--primary-blue);
+ font-weight: 600;
+ font-size: var(--font-sm);
+ text-transform: uppercase;
+ letter-spacing: 0.5px;
+}
+
+.config-group input,
+.config-group select,
+.config-group textarea {
+ width: 100%;
+ padding: var(--spacing-sm) var(--spacing-md);
+ background: rgba(0, 0, 0, 0.5);
+ border: 2px solid rgba(0, 255, 255, 0.2);
+ border-radius: var(--radius-md);
+ color: var(--text-primary);
+ font-size: var(--font-md);
+ transition: all var(--transition-normal);
+}
+
+.config-group input:hover,
+.config-group select:hover,
+.config-group textarea:hover {
+ border-color: var(--primary-blue);
+}
+
+.config-group input:focus,
+.config-group select:focus,
+.config-group textarea:focus {
+ outline: none;
+ border-color: var(--primary-gold);
+ box-shadow: var(--shadow-gold);
+ background: rgba(0, 0, 0, 0.7);
+}
+
+.config-group input[type="checkbox"] {
+ width: auto;
+ margin-right: var(--spacing-sm);
+ accent-color: var(--primary-gold);
+ transform: scale(1.1);
+ cursor: pointer;
+}
+
+/* URI Input Group - Enhanced */
+.uri-input-group {
+ margin: var(--spacing-lg) 0;
+}
+
+.uri-input-group label {
+ display: block;
+ color: var(--primary-blue);
+ font-weight: 600;
+ margin-bottom: var(--spacing-xs);
+}
+
+.uri-input-wrapper {
+ display: flex;
+ flex-direction: column;
+ gap: var(--spacing-sm);
+}
+
+@media (min-width: 480px) {
+ .uri-input-wrapper {
+ flex-direction: row;
+ }
+}
+
+.uri-input-wrapper input {
+ flex: 1;
+ padding: var(--spacing-md);
+ background: rgba(0, 0, 0, 0.5);
+ border: 2px solid rgba(0, 255, 255, 0.2);
+ border-radius: var(--radius-md);
+ color: var(--text-primary);
+ font-size: var(--font-md);
+}
+
+.uri-input-wrapper input:focus {
+ border-color: var(--primary-gold);
+ box-shadow: var(--shadow-gold);
+ outline: none;
+}
+
+.uri-input-wrapper button {
+ width: 100%;
+}
+
+@media (min-width: 480px) {
+ .uri-input-wrapper button {
+ width: auto;
+ }
+}
+
+/* Action Groups */
+.action-group {
+ display: flex;
+ flex-wrap: wrap;
+ gap: var(--spacing-sm);
+ margin: var(--spacing-lg) 0;
+}
+
+.action-group button {
+ flex: 1 1 auto;
+}
+
+@media (max-width: 480px) {
+ .action-group button {
+ flex: 1 1 100%;
+ }
+}
+
+/* Buttons - Enhanced */
+button {
+ padding: var(--spacing-sm) var(--spacing-xl);
+ background: linear-gradient(135deg, #1a1a1a 0%, #2a2a2a 100%);
+ border: 2px solid transparent;
+ background-clip: padding-box;
+ color: var(--text-primary);
+ border-radius: var(--radius-full);
+ cursor: pointer;
+ font-size: var(--font-md);
+ font-weight: 600;
+ transition: all var(--transition-normal);
+ text-transform: uppercase;
+ letter-spacing: 0.5px;
+ margin: var(--spacing-xs);
+ white-space: nowrap;
+ position: relative;
+ overflow: hidden;
+ box-shadow: var(--shadow-sm);
+}
+
+button::after {
+ content: '';
+ position: absolute;
+ top: 0;
+ left: 0;
+ right: 0;
+ bottom: 0;
+ border-radius: var(--radius-full);
+ padding: 2px;
+ background: linear-gradient(45deg, var(--primary-gold), var(--primary-blue));
+ -webkit-mask: linear-gradient(#fff 0 0) content-box, linear-gradient(#fff 0 0);
+ mask: linear-gradient(#fff 0 0) content-box, linear-gradient(#fff 0 0);
+ -webkit-mask-composite: xor;
+ mask-composite: exclude;
+ opacity: 0;
+ transition: opacity var(--transition-normal);
+ pointer-events: none;
+}
+
+button:hover::after {
+ opacity: 1;
+}
+
+button:hover {
+ transform: translateY(-2px);
+ box-shadow: var(--shadow-lg), 0 0 20px rgba(255, 215, 0, 0.3);
+}
+
+button:active {
+ transform: translateY(0);
+}
+
+button:focus-visible {
+ outline: 2px solid var(--primary-gold);
+ outline-offset: 2px;
+}
+
+button:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+ pointer-events: none;
+}
+
+/* Button variants */
+.btn-migrate {
+ background: linear-gradient(135deg, var(--primary-gold) 0%, var(--primary-gold-dark) 100%);
+ color: #000;
+ font-size: var(--font-lg);
+ padding: var(--spacing-md) var(--spacing-xxl);
+ width: 100%;
+ max-width: 100%;
+}
+
+@media (min-width: 768px) {
+ .btn-migrate {
+ max-width: 400px;
+ }
+}
+
+.btn-migrate::after {
+ background: linear-gradient(45deg, #000, #333);
+}
+
+.btn-warning {
+ background: linear-gradient(135deg, var(--primary-red) 0%, var(--primary-red-dark) 100%);
+ color: white;
+}
+
+.btn-success {
+ background: linear-gradient(135deg, var(--primary-green) 0%, var(--primary-green-dark) 100%);
+ color: white;
+}
+
+.btn-small {
+ padding: var(--spacing-xs) var(--spacing-lg);
+ font-size: var(--font-sm);
+}
+
+.btn-large {
+ padding: var(--spacing-lg) var(--spacing-xxl);
+ font-size: var(--font-lg);
+}
+
+.btn-secondary {
+ background: linear-gradient(135deg, #2a2a2a 0%, #3a3a3a 100%);
+}
+
+.btn-test {
+ background: linear-gradient(135deg, #2a2a2a 0%, #3a3a3a 100%);
+}
+
+.btn-primary {
+ background: linear-gradient(135deg, var(--primary-blue), #00cccc);
+ color: #000;
+}
+
+/* Migration Options Panel */
+.migration-options-panel {
+ margin-top: var(--spacing-xl);
+ padding: var(--spacing-xl);
+ background: rgba(0, 0, 0, 0.3);
+ border-radius: var(--radius-lg);
+ border: 1px solid rgba(255, 215, 0, 0.2);
+}
+
+.options-row {
+ display: flex;
+ flex-direction: column;
+ gap: var(--spacing-md);
+ margin-bottom: var(--spacing-md);
+}
+
+@media (min-width: 640px) {
+ .options-row {
+ flex-direction: row;
+ flex-wrap: wrap;
+ }
+}
+
+.options-row.full-width {
+ flex-direction: column;
+}
+
+.options-row .option-item {
+ flex: 1 1 200px;
+}
+
+/* Checkbox Labels */
+.checkbox-label {
+ display: flex;
+ align-items: center;
+ gap: var(--spacing-sm);
+ color: var(--text-secondary);
+ cursor: pointer;
+ padding: var(--spacing-sm);
+ background: rgba(0, 30, 50, 0.5);
+ border-radius: var(--radius-md);
+ border: 1px solid rgba(0, 255, 255, 0.2);
+ transition: all var(--transition-fast);
+}
+
+.checkbox-label:hover {
+ border-color: var(--primary-blue);
+ background: rgba(0, 50, 80, 0.6);
+}
+
+.checkbox-label input[type="checkbox"] {
+ accent-color: var(--primary-gold);
+ width: 18px;
+ height: 18px;
+ cursor: pointer;
+}
+
+/* Config Row */
+.config-row {
+ display: flex;
+ flex-direction: column;
+ gap: var(--spacing-md);
+ margin: var(--spacing-lg) 0;
+}
+
+@media (min-width: 480px) {
+ .config-row {
+ flex-direction: row;
+ align-items: center;
+ justify-content: space-between;
+ }
+}
+
+.select-wrapper {
+ flex: 1;
+}
+
+.select-wrapper select {
+ width: 100%;
+ padding: var(--spacing-sm) var(--spacing-md);
+ background: rgba(0, 0, 0, 0.5);
+ border: 2px solid rgba(0, 255, 255, 0.2);
+ border-radius: var(--radius-md);
+ color: var(--text-primary);
+ font-size: var(--font-md);
+}
+
+/* Format Controls */
+.format-controls {
+ display: flex;
+ flex-direction: column;
+ gap: var(--spacing-sm);
+ margin: var(--spacing-lg) 0;
+}
+
+@media (min-width: 480px) {
+ .format-controls {
+ flex-direction: row;
+ }
+}
+
+.format-controls select {
+ flex: 1;
+ padding: var(--spacing-sm) var(--spacing-md);
+ background: rgba(0, 0, 0, 0.5);
+ border: 2px solid rgba(0, 255, 255, 0.2);
+ border-radius: var(--radius-md);
+ color: var(--text-primary);
+ font-size: var(--font-md);
+}
+
+.format-controls button {
+ width: 100%;
+}
+
+@media (min-width: 480px) {
+ .format-controls button {
+ width: auto;
+ }
+}
+
+/* Status Messages */
+.status-message {
+ margin-top: var(--spacing-lg);
+ padding: var(--spacing-lg);
+ border-radius: var(--radius-lg);
+ animation: fadeIn var(--transition-normal);
+ overflow-x: auto;
+}
+
+.status-message.success {
+ background: linear-gradient(135deg, rgba(0, 255, 0, 0.15), rgba(0, 200, 0, 0.15));
+ border: 1px solid var(--primary-green);
+ box-shadow: 0 0 20px rgba(0, 255, 0, 0.2);
+}
+
+.status-message.error {
+ background: linear-gradient(135deg, rgba(255, 0, 0, 0.15), rgba(200, 0, 0, 0.15));
+ border: 1px solid var(--primary-red);
+ box-shadow: 0 0 20px rgba(255, 0, 0, 0.2);
+}
+
+.status-message p {
+ margin: var(--spacing-xs) 0;
+ color: var(--text-secondary);
+ font-size: var(--font-sm);
+}
+
+/* Results Sections */
+.results-section {
+ margin-top: var(--spacing-xl);
+ padding: var(--spacing-lg);
+ background: rgba(0, 0, 0, 0.3);
+ border-radius: var(--radius-lg);
+ border: 1px solid rgba(255, 215, 0, 0.1);
+}
+
+/* Schema Cards */
+.schemas-grid {
+ display: grid;
+ grid-template-columns: repeat(auto-fill, minmax(120px, 1fr));
+ gap: var(--spacing-sm);
+ margin: var(--spacing-lg) 0;
+}
+
+.schema-card {
+ background: rgba(0, 50, 80, 0.6);
+ padding: var(--spacing-md);
+ border-radius: var(--radius-md);
+ border: 1px solid rgba(0, 255, 255, 0.3);
+ text-align: center;
+ cursor: pointer;
+ transition: all var(--transition-normal);
+ font-size: var(--font-sm);
+ user-select: none;
+ position: relative;
+ overflow: hidden;
+}
+
+.schema-card::before {
+ content: '';
+ position: absolute;
+ top: -100%;
+ left: 0;
+ width: 100%;
+ height: 100%;
+ background: linear-gradient(transparent, rgba(0, 255, 255, 0.2));
+ animation: wave 3s ease-in-out infinite;
+ pointer-events: none;
+}
+
+.schema-card:hover {
+ transform: translateY(-2px);
+ border-color: var(--primary-gold);
+ box-shadow: var(--shadow-gold);
+ background: rgba(0, 80, 120, 0.7);
+}
+
+.schema-card label {
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ gap: var(--spacing-xs);
+}
+
+.schema-card input[type="checkbox"] {
+ accent-color: var(--primary-gold);
+ width: 16px;
+ height: 16px;
+ cursor: pointer;
+}
+
+/* Tables */
+.table-group {
+ margin-bottom: var(--spacing-xl);
+ background: rgba(0, 0, 0, 0.3);
+ border-radius: var(--radius-lg);
+ padding: var(--spacing-lg);
+ border: 1px solid rgba(255, 215, 0, 0.1);
+ overflow-x: auto;
+}
+
+.group-stats {
+ margin-bottom: var(--spacing-md);
+ color: var(--text-secondary);
+ font-size: var(--font-sm);
+ display: flex;
+ gap: var(--spacing-sm);
+ align-items: center;
+ flex-wrap: wrap;
+}
+
+.group-stats span {
+ background: rgba(255, 215, 0, 0.1);
+ padding: var(--spacing-xs) var(--spacing-sm);
+ border-radius: var(--radius-full);
+ border: 1px solid rgba(255, 215, 0, 0.2);
+}
+
+.table-responsive {
+ overflow-x: auto;
+ margin: var(--spacing-md) 0;
+}
+
+.tables-table {
+ width: 100%;
+ min-width: 600px;
+ border-collapse: collapse;
+ background: rgba(0, 0, 0, 0.3);
+ border-radius: var(--radius-md);
+ overflow: hidden;
+}
+
+.tables-table th {
+ background: rgba(0, 0, 0, 0.6);
+ padding: var(--spacing-md);
+ text-align: left;
+ color: var(--primary-gold);
+ font-weight: 600;
+ font-size: var(--font-sm);
+ border-bottom: 2px solid var(--primary-gold);
+}
+
+.tables-table td {
+ padding: var(--spacing-sm) var(--spacing-md);
+ border-bottom: 1px solid rgba(255, 215, 0, 0.1);
+ font-size: var(--font-sm);
+}
+
+.tables-table tr:hover td {
+ background: rgba(255, 215, 0, 0.1);
+}
+
+.tables-table input[type="checkbox"] {
+ accent-color: var(--primary-gold);
+ cursor: pointer;
+}
+
+/* Bucket Cards */
+.bucket-card {
+ background: rgba(0, 30, 50, 0.8);
+ padding: var(--spacing-lg);
+ margin: var(--spacing-md) 0;
+ border-radius: var(--radius-lg);
+ border: 1px solid rgba(0, 255, 255, 0.3);
+ transition: all var(--transition-normal);
+ position: relative;
+ overflow: hidden;
+}
+
+.bucket-card::before {
+ content: '';
+ position: absolute;
+ top: -50%;
+ left: -50%;
+ width: 200%;
+ height: 200%;
+ background: radial-gradient(circle at 30% 50%, rgba(0, 255, 255, 0.15), transparent 70%);
+ animation: rotate 10s linear infinite;
+ pointer-events: none;
+}
+
+.bucket-card:hover {
+ transform: translateX(5px);
+ border-color: var(--primary-gold);
+ box-shadow: var(--shadow-gold);
+ background: rgba(0, 50, 80, 0.9);
+}
+
+.bucket-name {
+ font-size: clamp(var(--font-lg), 3vw, var(--font-xl));
+ color: var(--primary-gold);
+ margin-bottom: var(--spacing-sm);
+ font-weight: 600;
+ position: relative;
+ z-index: 1;
+ word-break: break-word;
+}
+
+.bucket-details {
+ display: flex;
+ flex-wrap: wrap;
+ gap: var(--spacing-sm);
+ color: var(--text-secondary);
+ font-size: var(--font-sm);
+ position: relative;
+ z-index: 1;
+}
+
+.bucket-details span {
+ background: rgba(0, 0, 0, 0.6);
+ padding: var(--spacing-xs) var(--spacing-md);
+ border-radius: var(--radius-full);
+ border: 1px solid rgba(0, 255, 255, 0.2);
+ word-break: break-word;
+}
+
+.select-bucket-btn {
+ margin-top: var(--spacing-md);
+ width: 100%;
+ position: relative;
+ z-index: 1;
+}
+
+/* Migration Cards */
+.migration-card {
+ background: linear-gradient(135deg, rgba(20, 20, 30, 0.95), rgba(30, 30, 40, 0.95));
+ padding: var(--spacing-lg);
+ margin: var(--spacing-md) 0;
+ border-radius: var(--radius-lg);
+ border: 1px solid rgba(255, 215, 0, 0.2);
+ transition: all var(--transition-normal);
+}
+
+.migration-card:hover {
+ transform: translateX(5px);
+ box-shadow: var(--shadow-gold);
+ border-color: var(--primary-gold);
+}
+
+.migration-header {
+ display: flex;
+ flex-wrap: wrap;
+ gap: var(--spacing-sm);
+ justify-content: space-between;
+ align-items: center;
+ margin-bottom: var(--spacing-md);
+}
+
+.migration-id {
+ font-family: 'Courier New', monospace;
+ color: var(--primary-gold);
+ font-size: var(--font-md);
+ font-weight: 600;
+ word-wrap: break-word;
+}
+
+.migration-status {
+ padding: var(--spacing-xs) var(--spacing-md);
+ border-radius: var(--radius-full);
+ font-weight: 600;
+ text-transform: uppercase;
+ font-size: var(--font-xs);
+ display: inline-block;
+}
+
+.migration-status.running {
+ background: rgba(255, 215, 0, 0.2);
+ color: var(--primary-gold);
+ border: 1px solid var(--primary-gold);
+ animation: pulse 1.5s ease-in-out infinite;
+}
+
+.migration-status.completed {
+ background: rgba(0, 255, 0, 0.2);
+ color: var(--primary-green);
+ border: 1px solid var(--primary-green);
+}
+
+.migration-status.failed {
+ background: rgba(255, 0, 0, 0.2);
+ color: var(--primary-red);
+ border: 1px solid var(--primary-red);
+}
+
+.migration-body p {
+ margin: var(--spacing-xs) 0;
+ color: var(--text-secondary);
+ font-size: var(--font-sm);
+}
+
+.migration-body strong {
+ color: var(--text-primary);
+}
+
+.migration-actions {
+ display: flex;
+ flex-wrap: wrap;
+ gap: var(--spacing-sm);
+ margin-top: var(--spacing-md);
+}
+
+.migration-actions button {
+ flex: 1 1 auto;
+}
+
+@media (max-width: 480px) {
+ .migration-actions button {
+ flex: 1 1 100%;
+ }
+}
+
+/* Migration Controls Panel */
+.migration-controls-panel {
+ margin-top: var(--spacing-xl);
+ text-align: center;
+}
+
+.migration-controls {
+ margin-top: var(--spacing-xl);
+ text-align: center;
+}
+
+/* Logs */
+#logsContainer {
+ max-height: 400px;
+ overflow-y: auto;
+ padding: var(--spacing-md);
+ background: rgba(0, 0, 0, 0.4);
+ border-radius: var(--radius-md);
+ font-family: 'Courier New', monospace;
+ border: 1px solid rgba(255, 215, 0, 0.2);
+}
+
+.log-entry {
+ padding: var(--spacing-sm) var(--spacing-md);
+ margin: var(--spacing-xs) 0;
+ border-radius: var(--radius-sm);
+ animation: fadeIn var(--transition-normal);
+ font-size: var(--font-sm);
+ word-wrap: break-word;
+ border-left: 4px solid transparent;
+ transition: background var(--transition-fast);
+}
+
+.log-entry:hover {
+ background: rgba(255, 255, 255, 0.05);
+}
+
+.log-entry.info {
+ background: rgba(0, 255, 255, 0.1);
+ border-left-color: var(--primary-blue);
+}
+
+.log-entry.success {
+ background: rgba(0, 255, 0, 0.1);
+ border-left-color: var(--primary-green);
+}
+
+.log-entry.error {
+ background: rgba(255, 0, 0, 0.1);
+ border-left-color: var(--primary-red);
+}
+
+.log-entry.warning {
+ background: rgba(255, 255, 0, 0.1);
+ border-left-color: #ffff00;
+}
+
+.log-time {
+ color: var(--primary-gold);
+ margin-right: var(--spacing-md);
+ font-weight: 600;
+ display: inline-block;
+ min-width: 80px;
+}
+
+.log-message {
+ color: var(--text-primary);
+ display: inline;
+}
+
+/* Notifications */
+.notification {
+ position: fixed;
+ top: var(--spacing-xl);
+ right: var(--spacing-xl);
+ left: var(--spacing-xl);
+ padding: var(--spacing-md) var(--spacing-xl);
+ color: white;
+ border-radius: var(--radius-lg);
+ z-index: 9999;
+ box-shadow: var(--shadow-lg);
+ animation: slideIn var(--transition-normal);
+ display: flex;
+ align-items: center;
+ gap: var(--spacing-md);
+ min-width: auto;
+ max-width: 480px;
+ margin: 0 auto;
+ backdrop-filter: blur(10px);
+ border: 1px solid rgba(255, 255, 255, 0.2);
+ font-weight: 500;
+}
+
+@media (min-width: 768px) {
+ .notification {
+ left: auto;
+ right: var(--spacing-xl);
+ min-width: 320px;
+ }
+}
+
+.notification.error {
+ background: linear-gradient(135deg, var(--primary-error), #d32f2f);
+}
+
+.notification.success {
+ background: linear-gradient(135deg, var(--primary-success), #388e3c);
+}
+
+.notification.warning {
+ background: linear-gradient(135deg, var(--primary-warning), #f57c00);
+}
+
+.notification.info {
+ background: linear-gradient(135deg, var(--primary-info), #1976d2);
+}
+
+.notification-icon {
+ font-size: var(--font-xl);
+}
+
+.notification-message {
+ flex: 1;
+ font-size: var(--font-md);
+ word-break: break-word;
+}
+
+.notification-close {
+ background: none;
+ border: none;
+ color: white;
+ font-size: var(--font-xl);
+ cursor: pointer;
+ padding: 0 var(--spacing-xs);
+ line-height: 1;
+ opacity: 0.7;
+ transition: opacity var(--transition-fast);
+}
+
+.notification-close:hover {
+ opacity: 1;
+ background: none;
+ transform: none;
+ box-shadow: none;
+}
+
+.notification-close::after {
+ display: none;
+}
+
+/* Status Bar */
+.status-bar {
+ position: fixed;
+ bottom: 0;
+ left: 0;
+ right: 0;
+ background: rgba(0, 20, 40, 0.98);
+ backdrop-filter: blur(10px);
+ padding: var(--spacing-sm) var(--spacing-xl);
+ display: flex;
+ flex-wrap: wrap;
+ justify-content: space-between;
+ align-items: center;
+ gap: var(--spacing-sm);
+ border-top: 2px solid;
+ border-image: linear-gradient(90deg, var(--primary-blue), var(--primary-gold), var(--primary-red), var(--primary-blue)) 1;
+ animation: borderFlow 4s linear infinite;
+ z-index: 100;
+ font-size: var(--font-sm);
+}
+
+#statusInfo {
+ color: var(--text-secondary);
+ flex: 1;
+ min-width: 200px;
+ font-size: var(--font-sm);
+ word-break: break-word;
+}
+
+#loading {
+ color: var(--primary-gold);
+ font-weight: 600;
+ animation: spin 1s linear infinite;
+ display: inline-flex;
+ align-items: center;
+ gap: var(--spacing-xs);
+}
+
+/* Loading Overlay */
+.loading-overlay {
+ position: fixed;
+ top: 0;
+ left: 0;
+ right: 0;
+ bottom: 0;
+ background: rgba(0, 0, 0, 0.9);
+ display: none;
+ justify-content: center;
+ align-items: center;
+ z-index: 99999;
+ backdrop-filter: blur(5px);
+}
+
+.loading-overlay.active {
+ display: flex;
+}
+
+.bat-symbol {
+ width: min(100px, 20vw);
+ height: min(100px, 20vw);
+ background: linear-gradient(45deg, var(--primary-gold), var(--primary-gold-dark));
+ clip-path: polygon(20% 0%, 80% 0%, 100% 20%, 100% 80%, 80% 100%, 20% 100%, 0% 80%, 0% 20%);
+ animation: batFly 2s ease-in-out infinite;
+ box-shadow: 0 0 50px var(--primary-gold), 0 0 100px var(--primary-gold);
+}
+
+/* Migration Stats */
+.migration-stats {
+ margin: var(--spacing-xl) 0;
+ padding: var(--spacing-lg);
+ background: rgba(0, 30, 50, 0.6);
+ border-radius: var(--radius-lg);
+ border: 1px solid rgba(255, 215, 0, 0.2);
+}
+
+.stats-grid {
+ display: grid;
+ grid-template-columns: repeat(auto-fit, minmax(150px, 1fr));
+ gap: var(--spacing-md);
+ margin-top: var(--spacing-md);
+}
+
+.stat-item {
+ background: rgba(0, 0, 0, 0.5);
+ padding: var(--spacing-md);
+ border-radius: var(--radius-md);
+ text-align: center;
+ border: 1px solid rgba(255, 215, 0, 0.1);
+ transition: all var(--transition-fast);
+}
+
+.stat-item:hover {
+ border-color: var(--primary-gold);
+ transform: translateY(-2px);
+ box-shadow: var(--shadow-gold);
+}
+
+.stat-label {
+ color: var(--primary-blue);
+ font-size: var(--font-sm);
+ text-transform: uppercase;
+ margin-bottom: var(--spacing-xs);
+ font-weight: 500;
+}
+
+.stat-value {
+ color: var(--primary-gold);
+ font-size: var(--font-xl);
+ font-weight: 700;
+}
+
+/* Option Items */
+.option-item {
+ background: rgba(0, 30, 50, 0.6);
+ padding: var(--spacing-md);
+ border-radius: var(--radius-md);
+ border: 1px solid rgba(0, 255, 255, 0.2);
+}
+
+.option-item label {
+ display: block;
+ margin-bottom: var(--spacing-xs);
+ color: var(--primary-blue);
+ font-weight: 600;
+ font-size: var(--font-sm);
+}
+
+.option-item input[type="text"],
+.option-item input[type="number"],
+.option-item select {
+ width: 100%;
+ padding: var(--spacing-sm);
+ background: rgba(0, 0, 0, 0.5);
+ border: 1px solid rgba(0, 255, 255, 0.3);
+ border-radius: var(--radius-sm);
+ color: var(--text-primary);
+ font-size: var(--font-sm);
+ transition: all var(--transition-fast);
+}
+
+.option-item input:focus,
+.option-item select:focus {
+ border-color: var(--primary-gold);
+ box-shadow: var(--shadow-gold);
+ outline: none;
+}
+
+/* Security Info */
+.security-info {
+ background: rgba(0, 30, 50, 0.6);
+ padding: var(--spacing-lg);
+ border-radius: var(--radius-lg);
+ margin: var(--spacing-lg) 0;
+ border: 1px solid rgba(255, 215, 0, 0.2);
+}
+
+.security-info p {
+ margin: var(--spacing-sm) 0;
+ font-size: var(--font-md);
+ display: flex;
+ flex-direction: column;
+ gap: var(--spacing-xs);
+}
+
+@media (min-width: 480px) {
+ .security-info p {
+ flex-direction: row;
+ align-items: center;
+ }
+}
+
+.security-info strong {
+ color: var(--primary-blue);
+ min-width: 120px;
+ display: inline-block;
+}
+
+.security-features {
+ background: rgba(0, 30, 50, 0.6);
+ padding: var(--spacing-lg);
+ border-radius: var(--radius-lg);
+ margin: var(--spacing-lg) 0;
+}
+
+.security-feature {
+ display: flex;
+ flex-direction: column;
+ gap: var(--spacing-xs);
+ padding: var(--spacing-md) 0;
+ border-bottom: 1px solid rgba(255, 215, 0, 0.2);
+}
+
+@media (min-width: 480px) {
+ .security-feature {
+ flex-direction: row;
+ justify-content: space-between;
+ align-items: center;
+ }
+}
+
+.security-feature:last-child {
+ border-bottom: none;
+}
+
+.feature-name {
+ color: var(--primary-blue);
+ font-weight: 500;
+}
+
+.feature-value {
+ color: var(--primary-gold);
+ font-weight: 600;
+}
+
+/* Cleanup Controls */
+.cleanup-controls {
+ display: flex;
+ flex-direction: column;
+ gap: var(--spacing-sm);
+ margin: var(--spacing-lg) 0;
+}
+
+@media (min-width: 480px) {
+ .cleanup-controls {
+ flex-direction: row;
+ }
+}
+
+.cleanup-controls input {
+ flex: 1;
+ padding: var(--spacing-md);
+ background: rgba(0, 0, 0, 0.5);
+ border: 2px solid rgba(255, 51, 51, 0.3);
+ border-radius: var(--radius-md);
+ color: var(--text-primary);
+ font-size: var(--font-md);
+ transition: all var(--transition-fast);
+}
+
+.cleanup-controls input:focus {
+ outline: none;
+ border-color: var(--primary-red);
+ box-shadow: var(--shadow-red);
+}
+
+.cleanup-controls button {
+ width: 100%;
+}
+
+@media (min-width: 480px) {
+ .cleanup-controls button {
+ width: auto;
+ }
+}
+
+/* Environment Preview */
+.env-preview {
+ background: rgba(0, 0, 0, 0.6);
+ padding: var(--spacing-lg);
+ border-radius: var(--radius-lg);
+ font-family: 'Courier New', monospace;
+ font-size: var(--font-sm);
+ overflow-x: auto;
+ white-space: pre-wrap;
+ word-wrap: break-word;
+ max-height: 400px;
+ overflow-y: auto;
+ border: 1px solid rgba(255, 215, 0, 0.2);
+}
+
+/* Section Header */
+.section-header {
+ display: flex;
+ justify-content: flex-end;
+ margin-bottom: var(--spacing-md);
+}
+
+/* No Migrations / No Logs */
+.no-migrations,
+.no-logs {
+ text-align: center;
+ color: var(--text-muted);
+ padding: var(--spacing-xl);
+ font-style: italic;
+ font-size: var(--font-md);
+}
+
+/* Custom Scrollbar */
+::-webkit-scrollbar {
+ width: 10px;
+ height: 10px;
+}
+/* ==================== Progress Modal Styles ==================== */
+.progress-modal {
+ position: fixed;
+ top: 0;
+ left: 0;
+ width: 100%;
+ height: 100%;
+ background: rgba(0, 0, 0, 0.5);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ z-index: 1000;
+ animation: fadeIn 0.3s ease;
+}
+
+.modal-content {
+ background: white;
+ border-radius: 12px;
+ width: 600px;
+ max-width: 90%;
+ max-height: 80vh;
+ overflow-y: auto;
+ box-shadow: 0 10px 30px rgba(0,0,0,0.2);
+ animation: slideUp 0.3s ease;
+}
+
+.modal-header {
+ padding: 20px 24px;
+ border-bottom: 1px solid #e0e0e0;
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
+ color: white;
+ border-radius: 12px 12px 0 0;
+}
+
+.modal-header h3 {
+ margin: 0;
+ font-size: 18px;
+ display: flex;
+ align-items: center;
+ gap: 10px;
+}
+
+.migration-status-badge {
+ display: inline-block;
+ width: 10px;
+ height: 10px;
+ border-radius: 50%;
+ animation: pulse 1.5s infinite;
+}
+
+@keyframes pulse {
+ 0% { opacity: 1; }
+ 50% { opacity: 0.5; }
+ 100% { opacity: 1; }
+}
+
+.close-modal {
+ background: rgba(255,255,255,0.2);
+ border: none;
+ font-size: 24px;
+ cursor: pointer;
+ color: white;
+ width: 30px;
+ height: 30px;
+ border-radius: 50%;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ transition: background 0.2s;
+}
+
+.close-modal:hover {
+ background: rgba(255,255,255,0.3);
+}
+
+.modal-body {
+ padding: 24px;
+}
+
+.progress-container {
+ display: flex;
+ flex-direction: column;
+ gap: 20px;
+}
+
+.progress-bar-container {
+ position: relative;
+ width: 100%;
+ height: 30px;
+ background: #f0f0f0;
+ border-radius: 15px;
+ overflow: hidden;
+}
+
+.progress-bar-fill {
+ position: absolute;
+ top: 0;
+ left: 0;
+ height: 100%;
+ background: linear-gradient(90deg, #4CAF50, #45a049);
+ transition: width 0.3s ease;
+ border-radius: 15px;
+}
+
+.progress-bar-label {
+ position: absolute;
+ top: 50%;
+ left: 50%;
+ transform: translate(-50%, -50%);
+ color: white;
+ font-weight: bold;
+ text-shadow: 1px 1px 2px rgba(0,0,0,0.2);
+ z-index: 1;
+}
+
+.progress-bar-text {
+ font-family: monospace;
+ font-size: 14px;
+ color: #4CAF50;
+ background: #f0f0f0;
+ padding: 10px;
+ border-radius: 8px;
+ margin: 0;
+ text-align: center;
+}
+
+.progress-stats-grid {
+ display: grid;
+ grid-template-columns: repeat(auto-fit, minmax(150px, 1fr));
+ gap: 15px;
+ background: #f8f9fa;
+ padding: 15px;
+ border-radius: 8px;
+ border: 1px solid #e0e0e0;
+}
+
+.stat-item {
+ display: flex;
+ flex-direction: column;
+ gap: 5px;
+}
+
+.stat-label {
+ font-size: 12px;
+ color: #666;
+ text-transform: uppercase;
+ letter-spacing: 0.5px;
+}
+
+.stat-value {
+ font-size: 16px;
+ font-weight: bold;
+ color: #333;
+}
+
+.current-item {
+ background: #e3f2fd;
+ padding: 15px;
+ border-radius: 8px;
+ border-left: 4px solid #2196F3;
+}
+
+.current-item h4 {
+ margin: 0 0 10px 0;
+ color: #1976D2;
+ font-size: 14px;
+}
+
+.current-object, .current-table {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+ font-family: monospace;
+}
+
+.current-icon {
+ font-size: 18px;
+}
+
+.current-name {
+ font-weight: 500;
+ color: #333;
+ word-break: break-all;
+}
+
+.current-progress {
+ margin-left: auto;
+ font-size: 12px;
+ color: #666;
+}
+
+.progress-logs {
+ background: #f5f5f5;
+ border-radius: 8px;
+ padding: 15px;
+}
+
+.progress-logs h4 {
+ margin: 0 0 10px 0;
+ color: #333;
+ font-size: 14px;
+}
+
+.logs-container {
+ max-height: 200px;
+ overflow-y: auto;
+ font-family: monospace;
+ font-size: 12px;
+}
+
+.log-entry {
+ padding: 5px;
+ border-bottom: 1px solid #e0e0e0;
+ display: flex;
+ gap: 10px;
+}
+
+.log-entry:last-child {
+ border-bottom: none;
+}
+
+.log-entry.info {
+ color: #2196F3;
+}
+
+.log-entry.success {
+ color: #4CAF50;
+}
+
+.log-entry.error {
+ color: #f44336;
+ background: #ffebee;
+}
+
+.log-time {
+ color: #999;
+ min-width: 70px;
+}
+
+.log-message {
+ flex: 1;
+ word-break: break-word;
+}
+
+.progress-controls {
+ display: flex;
+ justify-content: flex-end;
+ gap: 10px;
+ margin-top: 10px;
+}
+
+.progress-history {
+ margin-top: 20px;
+ padding: 15px;
+ background: #f8f9fa;
+ border-radius: 8px;
+}
+
+.progress-history h4 {
+ margin: 0 0 10px 0;
+ color: #333;
+}
+
+.history-list {
+ display: flex;
+ flex-direction: column;
+ gap: 5px;
+}
+
+.history-item {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+ padding: 8px 12px;
+ background: white;
+ border-radius: 6px;
+ border: 1px solid #e0e0e0;
+ font-size: 13px;
+}
+
+.history-item.completed {
+ border-left: 3px solid #4CAF50;
+}
+
+.history-item.failed {
+ border-left: 3px solid #f44336;
+}
+
+.history-id {
+ font-family: monospace;
+ font-weight: bold;
+ color: #333;
+}
+
+.history-type {
+ padding: 2px 8px;
+ background: #e0e0e0;
+ border-radius: 12px;
+ font-size: 11px;
+}
+
+.history-status {
+ padding: 2px 8px;
+ border-radius: 12px;
+ font-size: 11px;
+ text-transform: uppercase;
+}
+
+.history-item.completed .history-status {
+ background: #e8f5e8;
+ color: #4CAF50;
+}
+
+.history-item.failed .history-status {
+ background: #ffebee;
+ color: #f44336;
+}
+
+.history-time {
+ margin-left: auto;
+ color: #999;
+ font-size: 11px;
+}
+
+@keyframes fadeIn {
+ from { opacity: 0; }
+ to { opacity: 1; }
+}
+
+@keyframes slideUp {
+ from { transform: translateY(50px); opacity: 0; }
+ to { transform: translateY(0); opacity: 1; }
+}
+
+/* تحديث أنماط بطاقات الترحيل */
+.migration-card {
+ position: relative;
+ overflow: hidden;
+}
+
+.migration-card .progress-bar-container {
+ position: absolute;
+ bottom: 0;
+ left: 0;
+ width: 100%;
+ height: 4px;
+ background: transparent;
+}
+
+.migration-card .progress-bar-fill {
+ height: 4px;
+ border-radius: 0;
+}
+
+.migration-card .progress-text {
+ display: none;
+}
+::-webkit-scrollbar-track {
+ background: #1a1a1a;
+ border-radius: var(--radius-sm);
+}
+
+::-webkit-scrollbar-thumb {
+ background: linear-gradient(135deg, var(--primary-red), var(--primary-gold));
+ border-radius: var(--radius-sm);
+ border: 2px solid #1a1a1a;
+}
+
+::-webkit-scrollbar-thumb:hover {
+ background: linear-gradient(135deg, var(--primary-gold), var(--primary-red));
+}
+
+::-webkit-scrollbar-corner {
+ background: #1a1a1a;
+}
+
+/* Utility Classes */
+.full-width {
+ width: 100%;
+ grid-column: 1 / -1;
+}
+
+.text-center {
+ text-align: center;
+}
+
+.text-glow {
+ text-shadow: 0 0 10px var(--primary-blue);
+}
+
+.border-glow {
+ box-shadow: 0 0 10px var(--primary-blue), 0 0 20px var(--primary-gold);
+}
+
+/* Loading Skeleton Animation */
+.skeleton {
+ background: linear-gradient(
+ 90deg,
+ rgba(255, 255, 255, 0.05) 25%,
+ rgba(255, 215, 0, 0.1) 50%,
+ rgba(255, 255, 255, 0.05) 75%
+ );
+ background-size: 200% 100%;
+ animation: shimmer 1.5s infinite;
+ border-radius: var(--radius-sm);
+}
+
+/* Print Styles */
+@media print {
+ body {
+ background: white;
+ color: black;
+ }
+
+ .status-bar,
+ .loading-overlay,
+ .notification,
+ button,
+ .tabs,
+ .security-badge {
+ display: none !important;
+ }
+
+ .tab-content {
+ display: block !important;
+ background: white;
+ color: black;
+ border: 1px solid #ccc;
+ box-shadow: none;
+ page-break-inside: avoid;
+ }
+ /* ==================== Progress Tracking Styles ==================== */
+
+.progress-modal {
+ position: fixed;
+ top: 0;
+ left: 0;
+ width: 100%;
+ height: 100%;
+ background: rgba(0, 0, 0, 0.5);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ z-index: 1000;
+}
+
+.modal-content {
+ background: white;
+ border-radius: 8px;
+ width: 500px;
+ max-width: 90%;
+ max-height: 80vh;
+ overflow-y: auto;
+}
+
+.modal-header {
+ padding: 15px 20px;
+ border-bottom: 1px solid #e0e0e0;
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+}
+
+.modal-header h3 {
+ margin: 0;
+ color: #333;
+}
+
+.close-modal {
+ background: none;
+ border: none;
+ font-size: 24px;
+ cursor: pointer;
+ color: #666;
+}
+
+.modal-body {
+ padding: 20px;
+}
+
+.progress-container {
+ display: flex;
+ flex-direction: column;
+ gap: 15px;
+}
+
+.progress-bar-container {
+ width: 100%;
+ height: 20px;
+ background: #f0f0f0;
+ border-radius: 10px;
+ overflow: hidden;
+}
+
+.progress-bar-fill {
+ height: 100%;
+ background: linear-gradient(90deg, #4CAF50, #45a049);
+ transition: width 0.3s ease;
+ border-radius: 10px;
+}
+
+.progress-stats {
+ font-family: monospace;
+ background: #f8f9fa;
+ padding: 10px;
+ border-radius: 4px;
+ border: 1px solid #e0e0e0;
+ white-space: pre-wrap;
+ font-size: 12px;
+}
+
+.progress-details {
+ display: flex;
+ flex-wrap: wrap;
+ gap: 15px;
+ padding: 10px;
+ background: #f8f9fa;
+ border-radius: 4px;
+ border: 1px solid #e0e0e0;
+}
+
+.progress-detail {
+ flex: 1 1 auto;
+ min-width: 120px;
+}
+
+.progress-detail span {
+ font-weight: bold;
+ color: #555;
+}
+
+.progress-bar-text {
+ font-family: monospace;
+ font-size: 12px;
+ color: #4CAF50;
+ background: #f0f0f0;
+ padding: 5px;
+ border-radius: 4px;
+ margin: 0;
+}
+
+.stop-progress-btn {
+ background: #ff6b6b;
+ color: white;
+ border: none;
+ padding: 8px 16px;
+ border-radius: 4px;
+ cursor: pointer;
+ font-size: 14px;
+ transition: background 0.2s;
+}
+
+.stop-progress-btn:hover {
+ background: #ff5252;
+}
+
+.progress-area {
+ margin-top: 10px;
+ padding: 10px;
+ background: #f8f9fa;
+ border-radius: 4px;
+ border: 1px solid #e0e0e0;
+}
+ .header {
+ background: none;
+ border: 1px solid #ccc;
+ box-shadow: none;
+ animation: none;
+ }
+
+ .header h1 {
+ color: black;
+ -webkit-text-fill-color: black;
+ animation: none;
+ }
+
+ .config-card,
+ .migration-card,
+ .bucket-card {
+ break-inside: avoid;
+ border: 1px solid #ccc;
+ box-shadow: none;
+ }
+}
\ No newline at end of file
diff --git a/index.js b/index.js
new file mode 100644
index 0000000..b8fe2de
--- /dev/null
+++ b/index.js
@@ -0,0 +1,3366 @@
+const API_BASE = "";
+
+// ============================================================================
+// تعريف دوال التدفق (Stream) محلياً - حل المشكلة
+// ============================================================================
+
+function createProgressStream(migrationId, type = 's3', options = {}) {
+ const {
+ onProgress = () => {},
+ onComplete = () => {},
+ onError = () => {},
+ reconnectInterval = 3000,
+ maxReconnectAttempts = 5
+ } = options;
+
+ let eventSource = null;
+ let reconnectAttempts = 0;
+ let isActive = true;
+ let reconnectTimer = null;
+
+ const connect = () => {
+ if (!isActive) return;
+
+ if (eventSource) {
+ eventSource.close();
+ }
+
+ try {
+ const safeMigrationId = encodeURIComponent(migrationId);
+ const safeType = encodeURIComponent(type);
+ const url = `${API_BASE}/api/stream-progress/${safeMigrationId}?type=${safeType}`;
+ eventSource = new EventSource(url);
+
+ eventSource.onmessage = (event) => {
+ try {
+ const data = JSON.parse(event.data);
+
+ // التحقق الموحد من الإكمال
+ if (data.type === 'completion' ||
+ data.status === 'completed' ||
+ data.status === 'success' ||
+ data.success === true) {
+ onComplete(data);
+ if (eventSource) eventSource.close();
+ }
+ // التحقق الموحد من الخطأ
+ else if (data.type === 'error' ||
+ data.status === 'failed' ||
+ data.status === 'error' ||
+ data.error) {
+ onError(data);
+ if (eventSource) eventSource.close();
+ }
+ else {
+ onProgress(data);
+ }
+ } catch (error) {
+ console.error('Error parsing stream data:', error);
+ }
+ };
+
+ eventSource.onerror = (error) => {
+ console.error('EventSource error:', error);
+
+ if (isActive && reconnectAttempts < maxReconnectAttempts) {
+ reconnectAttempts++;
+ console.log(`Reconnecting... Attempt ${reconnectAttempts}/${maxReconnectAttempts}`);
+
+ if (reconnectTimer) clearTimeout(reconnectTimer);
+ reconnectTimer = setTimeout(connect, reconnectInterval);
+ } else if (reconnectAttempts >= maxReconnectAttempts) {
+ onError({ error: 'Max reconnection attempts reached' });
+ }
+
+ if (eventSource) {
+ eventSource.close();
+ eventSource = null;
+ }
+ };
+
+ eventSource.onopen = () => {
+ console.log(`Stream connected for ${migrationId}`);
+ reconnectAttempts = 0;
+ };
+
+ } catch (error) {
+ console.error('Error creating EventSource:', error);
+ onError({ error: error.message });
+ }
+ };
+
+ connect();
+
+ return {
+ stop: () => {
+ isActive = false;
+ if (reconnectTimer) clearTimeout(reconnectTimer);
+ if (eventSource) {
+ eventSource.close();
+ eventSource = null;
+ }
+ },
+ pause: () => {
+ isActive = false;
+ if (reconnectTimer) clearTimeout(reconnectTimer);
+ if (eventSource) {
+ eventSource.close();
+ eventSource = null;
+ }
+ },
+ resume: () => {
+ if (!isActive) {
+ isActive = true;
+ reconnectAttempts = 0;
+ connect();
+ }
+ }
+ };
+}
+
+function formatProgressDisplay(progress) {
+ if (!progress) return 'Connecting...';
+
+ const lines = [];
+
+ if (progress.migration_id) {
+ lines.push(`🚀 Migration: ${progress.migration_id}`);
+ }
+
+ if (progress.status) {
+ const statusEmoji = {
+ 'running': '🟢',
+ 'completed': '✅',
+ 'failed': '❌',
+ 'cancelled': '⏹️',
+ 'success': '✅',
+ 'error': '❌'
+ }[progress.status] || '📊';
+
+ lines.push(`${statusEmoji} Status: ${progress.status}`);
+ }
+
+ if (progress.percentage !== undefined) {
+ lines.push(`📊 Progress: ${progress.percentage.toFixed(1)}%`);
+ }
+
+ if (progress.processed_objects !== undefined && progress.total_objects !== undefined) {
+ lines.push(`📦 Objects: ${progress.processed_objects}/${progress.total_objects}`);
+ }
+
+ if (progress.processed?.tables !== undefined && progress.total?.tables !== undefined) {
+ lines.push(`📋 Tables: ${progress.processed.tables}/${progress.total.tables}`);
+ }
+
+ if (progress.processed_size_formatted && progress.total_size_formatted) {
+ lines.push(`💾 Size: ${progress.processed_size_formatted}/${progress.total_size_formatted}`);
+ }
+
+ if (progress.current_speed_formatted) {
+ lines.push(`⚡ Speed: ${progress.current_speed_formatted}`);
+ }
+
+ if (progress.elapsed_time_formatted) {
+ lines.push(`⏱️ Elapsed: ${progress.elapsed_time_formatted}`);
+ }
+
+ if (progress.eta_formatted) {
+ lines.push(`⏳ ETA: ${progress.eta_formatted}`);
+ }
+
+ if (progress.current_object) {
+ const objName = progress.current_object.length > 50
+ ? '...' + progress.current_object.slice(-50)
+ : progress.current_object;
+ lines.push(`📄 Current: ${objName}`);
+ }
+
+ return lines.join('\n');
+}
+// Copy to clipboard
+async function copyToClipboard(text) {
+ try {
+ await navigator.clipboard.writeText(text);
+ return true;
+ } catch (err) {
+ console.error('Failed to copy:', err);
+ return false;
+ }
+}
+
+// Show notification
+function showNotification(message, type = 'info') {
+ const notification = document.createElement('div');
+ notification.className = `notification ${type}`;
+ notification.innerHTML = `
+ ${type === 'success' ? '✅' : type === 'error' ? '❌' : type === 'warning' ? '⚠️' : 'ℹ️'}
+ ${message}
+
+ `;
+
+ document.body.appendChild(notification);
+
+ setTimeout(() => {
+ if (notification.parentElement) {
+ notification.style.animation = 'slideOut 0.3s ease';
+ setTimeout(() => notification.remove(), 300);
+ }
+ }, 3000);
+}
+
+// Format environment variables
+function formatEnvVars(envVars, format = 'dotenv') {
+ if (!envVars) return '';
+
+ switch(format) {
+ case 'dotenv':
+ return Object.entries(envVars)
+ .map(([key, value]) => `${key}=${value}`)
+ .join('\n');
+
+ case 'json':
+ return JSON.stringify(envVars, null, 2);
+
+ case 'shell_export':
+ return Object.entries(envVars)
+ .map(([key, value]) => `export ${key}="${value}"`)
+ .join('\n');
+
+ case 'docker_env':
+ return Object.entries(envVars)
+ .map(([key, value]) => `-e ${key}="${value}"`)
+ .join(' ');
+
+ case 'docker_compose':
+ return Object.entries(envVars)
+ .map(([key, value]) => ` ${key}: ${value}`)
+ .join('\n');
+
+ default:
+ return Object.entries(envVars)
+ .map(([key, value]) => `${key}: ${value}`)
+ .join('\n');
+ }
+}
+
+// Format file size
+function formatFileSize(bytes) {
+ if (bytes === 0 || !bytes) return '0 Bytes';
+ if (typeof bytes !== 'number') bytes = parseInt(bytes);
+
+ const k = 1024;
+ const sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB', 'PB'];
+ const i = Math.floor(Math.log(bytes) / Math.log(k));
+ return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
+}
+
+// Format PostgreSQL configuration for display
+function formatPostgresConfig(config) {
+ if (!config) return '';
+
+ const details = [
+ `📍 PostgreSQL Configuration:`,
+ `━━━━━━━━━━━━━━━━━━━━━━━━━━━`,
+ `Host: ${config.host || 'Not set'}`,
+ `Port: ${config.port || 5432}`,
+ `User: ${config.user || 'Not set'}`,
+ `Database: ${config.database || 'Not set'}`,
+ `━━━━━━━━━━━━━━━━━━━━━━━━━━━`
+ ];
+
+ if (config.host && config.user && config.password) {
+ details.push(`✅ Credentials: Configured`);
+ } else {
+ details.push(`❌ Credentials: Not configured`);
+ }
+
+ return details.join('\n');
+}
+
+// Format S3 configuration for display
+function formatS3Config(config, type = 'source') {
+ if (!config) return '';
+
+ const details = [
+ `📍 ${type.toUpperCase()} S3 Configuration:`,
+ `━━━━━━━━━━━━━━━━━━━━━━━━━━━`,
+ `Endpoint: ${config.endpoint_url || 'AWS S3 (default)'}`,
+ `Region: ${config.region || 'us-east-1'}`,
+ `Bucket: ${config.bucket || 'Not set'}`,
+ `━━━━━━━━━━━━━━━━━━━━━━━━━━━`
+ ];
+
+ if (config.access_key_id && config.secret_access_key) {
+ details.push(`✅ Credentials: Configured`);
+ } else {
+ details.push(`❌ Credentials: Not configured`);
+ }
+
+ return details.join('\n');
+}
+
+// Format migration details for display
+function formatMigrationDetails(migrationData) {
+ if (!migrationData) return '';
+
+ const details = [
+ `🔄 Migration ID: ${migrationData.migration_id || migrationData.id || 'N/A'}`,
+ `━━━━━━━━━━━━━━━━━━━━━━━━━━━`,
+ `Status: ${migrationData.status || 'N/A'}`,
+ `Started: ${migrationData.started_at ? new Date(migrationData.started_at * 1000).toLocaleString() : 'N/A'}`
+ ];
+
+ if (migrationData.stats) {
+ details.push(`━━━━━━━━━━━━━━━━━━━━━━━━━━━`);
+ details.push(`📊 Statistics:`);
+ Object.entries(migrationData.stats).forEach(([key, value]) => {
+ details.push(` ${key}: ${value}`);
+ });
+ }
+
+ if (migrationData.message) {
+ details.push(`━━━━━━━━━━━━━━━━━━━━━━━━━━━`);
+ details.push(`📝 Message: ${migrationData.message}`);
+ }
+
+ return details.join('\n');
+}
+
+// Extract PostgreSQL info from URI
+function extractPostgresInfo(uri) {
+ try {
+ const match = uri.match(/postgresql:\/\/([^:]+):([^@]+)@([^:]+):(\d+)\/(.+)/);
+ if (match) {
+ return {
+ user: match[1],
+ password: match[2],
+ host: match[3],
+ port: parseInt(match[4]),
+ database: match[5],
+ isValid: true
+ };
+ }
+ return { isValid: false };
+ } catch (error) {
+ console.error('Error extracting PostgreSQL info:', error);
+ return { isValid: false, error: error.message };
+ }
+}
+
+// Extract S3 info from URI
+function extractS3Info(s3Uri) {
+ try {
+ if (s3Uri.startsWith('s3://')) {
+ const uri = s3Uri.substring(5);
+ const parts = uri.split('/', 1);
+ const bucket = parts[0];
+ const key = uri.substring(bucket.length + 1);
+ return {
+ bucket,
+ key: key || '',
+ fullUri: s3Uri,
+ isValid: true
+ };
+ }
+ return { isValid: false };
+ } catch (error) {
+ console.error('Error parsing S3 URI:', error);
+ return { isValid: false, error: error.message };
+ }
+}
+
+// Build PostgreSQL connection string
+function buildPostgresConnectionString(host, database, user, port = 5432) {
+ if (!host || !database) return '';
+ return `postgresql://${user ? user + '@' : ''}${host}:${port}/${database}`;
+}
+
+// Build S3 URL
+function buildS3Url(bucket, key, endpointUrl = null) {
+ if (!bucket) return '';
+ if (!key) return `s3://${bucket}`;
+
+ if (endpointUrl) {
+ return `${endpointUrl}/${bucket}/${key}`;
+ }
+ return `s3://${bucket}/${key}`;
+}
+
+// Group tables by schema
+function groupTablesBySchema(tables) {
+ const groups = {};
+
+ tables.forEach(table => {
+ const schema = table.schema || 'public';
+
+ if (!groups[schema]) {
+ groups[schema] = {
+ count: 0,
+ tables: []
+ };
+ }
+
+ groups[schema].count++;
+ groups[schema].tables.push(table);
+ });
+
+ return groups;
+}
+
+// Group objects by prefix
+function groupObjectsByPrefix(objects, depth = 1) {
+ const groups = {};
+
+ objects.forEach(obj => {
+ const parts = obj.key.split('/');
+ let prefix = '';
+
+ for (let i = 0; i < Math.min(depth, parts.length - 1); i++) {
+ prefix += parts[i] + '/';
+ }
+
+ if (!prefix) prefix = '/';
+
+ if (!groups[prefix]) {
+ groups[prefix] = {
+ count: 0,
+ totalSize: 0,
+ objects: []
+ };
+ }
+
+ groups[prefix].count++;
+ groups[prefix].totalSize += obj.size;
+ groups[prefix].objects.push(obj);
+ });
+
+ return groups;
+}
+
+// Estimate migration time
+function estimateMigrationTime(totalItems, averageItemsPerSecond = 100) {
+ if (!totalItems) return 'Unknown';
+
+ const seconds = totalItems / averageItemsPerSecond;
+
+ if (seconds < 60) return `${Math.ceil(seconds)} seconds`;
+ if (seconds < 3600) return `${Math.ceil(seconds / 60)} minutes`;
+ if (seconds < 86400) return `${(seconds / 3600).toFixed(1)} hours`;
+ return `${(seconds / 86400).toFixed(1)} days`;
+}
+
+// ============================================================================
+// PostgreSQL API Functions
+// ============================================================================
+
+// Test PostgreSQL connection
+async function testPostgresConnection(options = {}) {
+ const {
+ useEnvVars = false,
+ uri,
+ host,
+ user,
+ password,
+ database,
+ port = 5432
+ } = options;
+
+ const body = {
+ use_env_vars: useEnvVars
+ };
+
+ if (uri) {
+ body.uri = uri;
+ } else if (host && user && password && database) {
+ body.uri = `postgresql://${user}:${password}@${host}:${port}/${database}`;
+ }
+
+ const r = await fetch(`${API_BASE}/api/postgres/test-connection`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(body)
+ });
+ return r.json();
+}
+
+// Get PostgreSQL schemas
+async function getPostgresSchemas(options = {}) {
+ const {
+ useEnvVars = false,
+ uri,
+ host,
+ user,
+ password,
+ database,
+ port = 5432
+ } = options;
+
+ const body = {
+ use_env_vars: useEnvVars
+ };
+
+ // بناء الرابط إذا لم يتم توفيره
+ if (uri) {
+ body.uri = uri;
+ } else if (host && user && password && database) {
+ body.uri = `postgresql://${user}:${password}@${host}:${port}/${database}`;
+ }
+
+ const r = await fetch(`${API_BASE}/api/postgres/get-schemas`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(body) // ✅ الآن يرسل الجسم كاملاً
+ });
+ return r.json();
+}
+// Get PostgreSQL tables
+async function getPostgresTables(options = {}){
+ const {
+ useEnvVars = false,
+ uri,
+ host,
+ user,
+ password,
+ database,
+ port = 5432,
+ schema = ''
+ } = options;
+
+ const body = {
+ use_env_vars: useEnvVars
+ };
+
+ // بناء الرابط إذا لم يتم توفيره
+ if (uri) {
+ body.uri = uri;
+ } else if (host && user && password && database) {
+ body.uri = `postgresql://${user}:${password}@${host}:${port}/${database}`;
+ }
+
+ const r = await fetch(`${API_BASE}/api/postgres/get-tables`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(body)
+ });
+ return r.json();
+}
+
+// Get PostgreSQL table counts
+async function getPostgresTableCounts(uri, schema = '') {
+ const r = await fetch(`${API_BASE}/api/postgres/get-table-counts`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(body)
+ });
+ return r.json();
+}
+
+// Parse PostgreSQL URI
+async function parsePostgresUri(uri) {
+ const r = await fetch(`${API_BASE}/api/postgres/parse-uri`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ uri })
+ });
+ return r.json();
+}
+
+// Start PostgreSQL to PostgreSQL migration
+async function startPostgresMigration(sourceUri, destUri, schemas = null, tables = null) {
+ const r = await fetch(`${API_BASE}/api/postgres/start-migration`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({
+ source_uri: sourceUri,
+ dest_uri: destUri,
+ schemas,
+ tables
+ })
+ });
+ return r.json();
+}
+
+// Get PostgreSQL migration status
+async function getPostgresMigrationStatus(migrationId) {
+ const r = await fetch(`${API_BASE}/api/postgres/migration-status/${migrationId}`);
+ return r.json();
+}
+
+// List PostgreSQL migrations
+async function listPostgresMigrations() {
+ const r = await fetch(`${API_BASE}/api/postgres/list-migrations`);
+ return r.json();
+}
+
+// ============================================================================
+// S3 API Functions
+// ============================================================================
+
+// Test source S3 connection
+async function testSourceS3Connection(options = {}) {
+ const {
+ useEnvVars = false,
+ accessKeyId,
+ secretAccessKey,
+ region = 'us-east-1',
+ endpointUrl,
+ sessionToken
+ } = options;
+
+ const body = {
+ use_env_vars: useEnvVars,
+ access_key_id: accessKeyId,
+ secret_access_key: secretAccessKey,
+ region,
+ endpoint_url: endpointUrl,
+ session_token: sessionToken
+ };
+
+ const r = await fetch(`${API_BASE}/api/s3-source/test-connection`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(body)
+ });
+ return r.json();
+}
+
+// Test destination S3 connection
+async function testDestinationS3Connection(options = {}) {
+ const {
+ useEnvVars = false,
+ accessKeyId,
+ secretAccessKey,
+ region = 'us-east-1',
+ endpointUrl,
+ sessionToken
+ } = options;
+
+ const body = {
+ use_env_vars: useEnvVars,
+ access_key_id: accessKeyId,
+ secret_access_key: secretAccessKey,
+ region,
+ endpoint_url: endpointUrl,
+ session_token: sessionToken
+ };
+
+ const r = await fetch(`${API_BASE}/api/s3-destination/test-connection`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(body)
+ });
+ return r.json();
+}
+
+// List source S3 buckets
+async function listSourceS3Buckets(accessKeyId, secretAccessKey, region = 'us-east-1', endpointUrl = null, sessionToken = null) {
+ const r = await fetch(`${API_BASE}/api/s3-source/list-buckets`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({
+ access_key_id: accessKeyId,
+ secret_access_key: secretAccessKey,
+ region,
+ endpoint_url: endpointUrl,
+ session_token: sessionToken
+ })
+ });
+ return r.json();
+}
+
+// List destination S3 buckets
+async function listDestinationS3Buckets(accessKeyId, secretAccessKey, region = 'us-east-1', endpointUrl = null, sessionToken = null) {
+ const r = await fetch(`${API_BASE}/api/s3-destination/list-buckets`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({
+ access_key_id: accessKeyId,
+ secret_access_key: secretAccessKey,
+ region,
+ endpoint_url: endpointUrl,
+ session_token: sessionToken
+ })
+ });
+ return r.json();
+}
+
+// List objects in S3 bucket
+async function listS3Objects(bucket, prefix = '', isSource = true, credentials = {}) {
+ const body = {
+ bucket,
+ prefix,
+ is_source: isSource,
+ access_key_id: credentials.accessKeyId,
+ secret_access_key: credentials.secretAccessKey,
+ region: credentials.region || 'us-east-1',
+ endpoint_url: credentials.endpointUrl,
+ session_token: credentials.sessionToken,
+ max_keys: credentials.maxKeys || 1000
+ };
+
+ const r = await fetch(`${API_BASE}/api/s3/list-objects`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(body)
+ });
+ return r.json();
+}
+
+// Create S3 bucket in destination
+async function createS3Bucket(bucket, region = 'us-east-1', options = {}) {
+ const body = {
+ bucket,
+ region,
+ access_key_id: options.accessKeyId,
+ secret_access_key: options.secretAccessKey,
+ endpoint_url: options.endpointUrl,
+ session_token: options.sessionToken
+ };
+
+ const r = await fetch(`${API_BASE}/api/s3-destination/create-bucket`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(body)
+ });
+ return r.json();
+}
+
+// Migrate single S3 object
+async function migrateS3Object(sourceBucket, sourceKey, destBucket, destKey = null, options = {}) {
+ const body = {
+ source_bucket: sourceBucket,
+ source_key: sourceKey,
+ dest_bucket: destBucket,
+ dest_key: destKey || sourceKey,
+ source_access_key_id: options.sourceAccessKeyId,
+ source_secret_access_key: options.sourceSecretAccessKey,
+ source_region: options.sourceRegion || 'us-east-1',
+ source_endpoint_url: options.sourceEndpointUrl,
+ source_session_token: options.sourceSessionToken,
+ dest_access_key_id: options.destAccessKeyId,
+ dest_secret_access_key: options.destSecretAccessKey,
+ dest_region: options.destRegion || 'us-east-1',
+ dest_endpoint_url: options.destEndpointUrl,
+ dest_session_token: options.destSessionToken,
+ preserve_metadata: options.preserveMetadata !== undefined ? options.preserveMetadata : true,
+ storage_class: options.storageClass || 'STANDARD'
+ };
+
+ const r = await fetch(`${API_BASE}/api/s3/migrate-object`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(body)
+ });
+ return r.json();
+}
+
+// Migrate multiple S3 objects in batch
+async function migrateS3Batch(objects, sourceBucket, destBucket, options = {}) {
+ const body = {
+ objects,
+ source_bucket: sourceBucket,
+ dest_bucket: destBucket,
+ source_access_key_id: options.sourceAccessKeyId,
+ source_secret_access_key: options.sourceSecretAccessKey,
+ source_region: options.sourceRegion || 'us-east-1',
+ source_endpoint_url: options.sourceEndpointUrl,
+ source_session_token: options.sourceSessionToken,
+ dest_access_key_id: options.destAccessKeyId,
+ dest_secret_access_key: options.destSecretAccessKey,
+ dest_region: options.destRegion || 'us-east-1',
+ dest_endpoint_url: options.destEndpointUrl,
+ dest_session_token: options.destSessionToken,
+ preserve_metadata: options.preserveMetadata !== undefined ? options.preserveMetadata : true,
+ storage_class: options.storageClass || 'STANDARD',
+ max_concurrent: options.maxConcurrent || 5
+ };
+
+ const r = await fetch(`${API_BASE}/api/s3/migrate-batch`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(body)
+ });
+ return r.json();
+}
+
+// Start full S3 to S3 migration
+async function startS3Migration(sourceBucket, destBucket, prefix = '', options = {}) {
+ const body = {
+ source_bucket: sourceBucket,
+ dest_bucket: destBucket,
+ prefix,
+ source_access_key_id: options.sourceAccessKeyId,
+ source_secret_access_key: options.sourceSecretAccessKey,
+ source_region: options.sourceRegion || 'us-east-1',
+ source_endpoint_url: options.sourceEndpointUrl,
+ source_session_token: options.sourceSessionToken,
+ dest_access_key_id: options.destAccessKeyId,
+ dest_secret_access_key: options.destSecretAccessKey,
+ dest_region: options.destRegion || 'us-east-1',
+ dest_endpoint_url: options.destEndpointUrl,
+ dest_session_token: options.destSessionToken,
+ include_patterns: options.includePatterns,
+ exclude_patterns: options.excludePatterns,
+ preserve_metadata: options.preserveMetadata !== undefined ? options.preserveMetadata : true,
+ storage_class: options.storageClass || 'STANDARD',
+ create_dest_bucket: options.createDestBucket !== undefined ? options.createDestBucket : true,
+ max_concurrent: options.maxConcurrent || 5
+ };
+
+ const r = await fetch(`${API_BASE}/api/s3/start-migration`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(body)
+ });
+ return r.json();
+}
+
+// Get S3 migration status
+async function getS3MigrationStatus(migrationId) {
+ const r = await fetch(`${API_BASE}/api/s3/migration-status/${migrationId}`);
+ return r.json();
+}
+
+// List S3 migrations
+async function listS3Migrations() {
+ const r = await fetch(`${API_BASE}/api/s3/list-migrations`);
+ return r.json();
+}
+
+// Cancel S3 migration
+async function cancelS3Migration(migrationId) {
+ const r = await fetch(`${API_BASE}/api/s3/cancel-migration/${migrationId}`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" }
+ });
+ return r.json();
+}
+
+// Parse S3 URI
+async function parseS3Uri(s3Uri) {
+ const r = await fetch(`${API_BASE}/api/s3/parse-uri`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ s3_uri: s3Uri })
+ });
+ return r.json();
+}
+
+// Generate presigned URL for S3 object
+async function generatePresignedUrl(bucket, key, isSource = true, expiration = 3600, credentials = {}) {
+ const body = {
+ bucket,
+ key,
+ is_source: isSource,
+ expiration,
+ access_key_id: credentials.accessKeyId,
+ secret_access_key: credentials.secretAccessKey,
+ region: credentials.region || 'us-east-1',
+ endpoint_url: credentials.endpointUrl,
+ session_token: credentials.sessionToken
+ };
+
+ const r = await fetch(`${API_BASE}/api/s3/generate-presigned-url`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(body)
+ });
+ return r.json();
+}
+
+// ============================================================================
+// PostgreSQL to S3 API Functions
+// ============================================================================
+
+// Test PostgreSQL connection for PG to S3
+async function testPgToS3PostgresConnection(uri) {
+ const r = await fetch(`${API_BASE}/api/postgres-s3/test-postgres-connection`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ uri })
+ });
+ return r.json();
+}
+
+// Test S3 connection for PG to S3
+async function testPgToS3S3Connection(accessKeyId, secretAccessKey, region = 'us-east-1', endpointUrl = null) {
+ const r = await fetch(`${API_BASE}/api/postgres-s3/test-s3-connection`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({
+ access_key_id: accessKeyId,
+ secret_access_key: secretAccessKey,
+ region,
+ endpoint_url: endpointUrl
+ })
+ });
+ return r.json();
+}
+
+// Get PostgreSQL schemas for PG to S3
+async function getPgToS3Schemas(uri) {
+ const r = await fetch(`${API_BASE}/api/postgres-s3/get-schemas`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ uri })
+ });
+ return r.json();
+}
+
+// Get PostgreSQL tables for PG to S3
+async function getPgToS3Tables(uri, schema = '') {
+ const r = await fetch(`${API_BASE}/api/postgres-s3/get-tables`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ uri, schema })
+ });
+ return r.json();
+}
+
+// Export single PostgreSQL table to S3
+async function exportTableToS3(postgresUri, schema, table, s3Bucket, s3Key, options = {}) {
+ const body = {
+ postgres_uri: postgresUri,
+ schema,
+ table,
+ s3_bucket: s3Bucket,
+ s3_key: s3Key,
+ compress: options.compress !== undefined ? options.compress : true,
+ format: options.format || 'csv',
+ access_key_id: options.accessKeyId,
+ secret_access_key: options.secretAccessKey,
+ region: options.region || 'us-east-1',
+ endpoint_url: options.endpointUrl
+ };
+
+ const r = await fetch(`${API_BASE}/api/postgres-s3/export-table`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(body)
+ });
+ return r.json();
+}
+
+// Start full PostgreSQL to S3 migration
+async function startPgToS3Migration(postgresUri, s3Bucket, s3Prefix = '', options = {}) {
+ const body = {
+ postgres_uri: postgresUri,
+ s3_bucket: s3Bucket,
+ s3_prefix: s3Prefix,
+ schemas: options.schemas,
+ tables: options.tables,
+ compress: options.compress !== undefined ? options.compress : true,
+ format: options.format || 'csv',
+ access_key_id: options.accessKeyId,
+ secret_access_key: options.secretAccessKey,
+ region: options.region || 'us-east-1',
+ endpoint_url: options.endpointUrl
+ };
+
+ const r = await fetch(`${API_BASE}/api/postgres-s3/start-migration`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(body)
+ });
+ return r.json();
+}
+
+// Get PostgreSQL to S3 migration status
+async function getPgToS3MigrationStatus(migrationId) {
+ const r = await fetch(`${API_BASE}/api/postgres-s3/migration-status/${migrationId}`);
+ return r.json();
+}
+
+// List PostgreSQL to S3 migrations
+async function listPgToS3Migrations() {
+ const r = await fetch(`${API_BASE}/api/postgres-s3/list-migrations`);
+ return r.json();
+}
+
+// ============================================================================
+// Common Environment Functions
+// ============================================================================
+
+// Inject environment variables
+async function injectEnv(envVars) {
+ const r = await fetch(`${API_BASE}/api/inject-env`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ environment_variables: envVars })
+ });
+ return r.json();
+}
+
+// Get current environment
+async function getCurrentEnv() {
+ const r = await fetch(`${API_BASE}/api/get-current-env`);
+ return r.json();
+}
+
+// Health check
+async function healthCheck() {
+ const r = await fetch(`${API_BASE}/api/health`);
+ return r.json();
+}
+
+// ============================================================================
+// Security API Functions
+// ============================================================================
+
+// Clear session
+async function clearSession() {
+ const r = await fetch(`${API_BASE}/api/clear-session`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" }
+ });
+ return r.json();
+}
+
+// Clear migration
+async function clearMigration(migrationId) {
+ const r = await fetch(`${API_BASE}/api/clear-migration/${migrationId}`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" }
+ });
+ return r.json();
+}
+
+// Get security status
+async function getSecurityStatus() {
+ const r = await fetch(`${API_BASE}/api/security-status`);
+ return r.json();
+}
+
+// ============================================================================
+// Main App
+// ============================================================================
+
+function App() {
+ const root = document.getElementById("root");
+
+ root.innerHTML = `
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
🐘 → 📤 PostgreSQL to S3 Migration
+
+
+
+
+
+
PostgreSQL Source
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
S3 Destination
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
🐘 → 🐘 PostgreSQL to PostgreSQL Migration
+
+
+
+
+
+
Source PostgreSQL
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Destination PostgreSQL
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Migration Options
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
📤 → 📥 S3 to S3 Migration
+
+
+
+
+
+
Source S3
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Available Buckets:
+
+
+
+
+
+
+
+
+
Destination S3
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Available Buckets:
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
⚙️ Environment & Security
+
+
+
+
+
+
Environment Variables
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Current Configuration
+
+
+
+
+
Environment Preview
+
+
+
+
+
+
+
+
Security
+
+
+
Session ID: Loading...
+
Active Sessions: -
+
Expiry: 10 minutes
+
+
+
+
+ Encryption:
+ AES-256
+
+
+ Session Isolation:
+ ✅ Enabled
+
+
+ Auto-cleanup:
+ 10 minutes
+
+
+
+
+
+
+
+
+
+
Migration Data Cleanup
+
Clear sensitive data from migrations
+
+
+
+
+
+
+
+
Migration History
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Ready
+
⏳ Processing...
+
+
+ `;
+
+ // ==================== State Variables ====================
+
+ // ===== PSQL to S3 =====
+ let psqlToS3Config = {
+ source: { host: '', user: '', password: '', port: 5432, database: '', uri: '' },
+ s3: { accessKeyId: '', secretAccessKey: '', region: 'us-east-1', endpointUrl: '', bucket: '', prefix: '' },
+ compress: true,
+ format: 'csv'
+ };
+
+ // ===== PSQL to PSQL =====
+ let psqlToPsqlConfig = {
+ source: { host: '', user: '', password: '', port: 5432, database: '', uri: '' },
+ dest: { host: '', user: '', password: '', port: 5432, database: '', uri: '' }
+ };
+ let psqlToPsqlSchemas = [];
+ let psqlToPsqlTables = [];
+ let psqlToPsqlSelectedSchemas = [];
+ let psqlToPsqlSelectedTables = [];
+
+ // ===== S3 to S3 =====
+ let s3ToS3Config = {
+ source: { accessKeyId: '', secretAccessKey: '', region: 'us-east-1', endpointUrl: '', sessionToken: '', bucket: 'my-source-bucket' },
+ dest: { accessKeyId: '', secretAccessKey: '', region: 'us-east-1', endpointUrl: '', sessionToken: '', bucket: 'my-destination-bucket' }
+ };
+ let s3ToS3SourceBuckets = [];
+ let s3ToS3DestBuckets = [];
+
+ // Common state variables
+ let migrations = [];
+ let activeMigration = null;
+ let migrationLogs = [];
+ let currentEnv = {};
+ let loading = false;
+
+ // DOM Elements
+ const statusInfo = document.getElementById("statusInfo");
+ const loadingDiv = document.getElementById("loading");
+ const envPreview = document.getElementById("envPreview");
+ const sourcePostgresSummary = document.getElementById("sourcePostgresSummary");
+ const destPostgresSummary = document.getElementById("destPostgresSummary");
+ const sourceS3Summary = document.getElementById("sourceS3Summary");
+ const destS3Summary = document.getElementById("destS3Summary");
+
+ // ==================== Setup Event Listeners ====================
+ function setupEventListeners() {
+ // Tab switching
+ document.querySelectorAll('.tab-btn').forEach(btn => {
+ btn.addEventListener('click', () => {
+ document.querySelectorAll('.tab-btn').forEach(b => b.classList.remove('active'));
+ document.querySelectorAll('.tab-content').forEach(c => c.classList.remove('active'));
+
+ btn.classList.add('active');
+ document.getElementById(`tab-${btn.dataset.tab}`).classList.add('active');
+ });
+ });
+
+ // ========== PSQL to S3 Inputs ==========
+ document.getElementById('psqlToS3_sourceHost')?.addEventListener('input', (e) => {
+ psqlToS3Config.source.host = e.target.value;
+ });
+
+ document.getElementById('psqlToS3_sourcePort')?.addEventListener('input', (e) => {
+ psqlToS3Config.source.port = parseInt(e.target.value) || 5432;
+ });
+
+ document.getElementById('psqlToS3_sourceUser')?.addEventListener('input', (e) => {
+ psqlToS3Config.source.user = e.target.value;
+ });
+
+ document.getElementById('psqlToS3_sourcePassword')?.addEventListener('input', (e) => {
+ psqlToS3Config.source.password = e.target.value;
+ });
+
+ document.getElementById('psqlToS3_sourceDatabase')?.addEventListener('input', (e) => {
+ psqlToS3Config.source.database = e.target.value;
+ });
+
+ document.getElementById('psqlToS3_sourceUri')?.addEventListener('input', (e) => {
+ psqlToS3Config.source.uri = e.target.value;
+ });
+
+ document.getElementById('psqlToS3_s3AccessKeyId')?.addEventListener('input', (e) => {
+ psqlToS3Config.s3.accessKeyId = e.target.value;
+ });
+
+ document.getElementById('psqlToS3_s3SecretAccessKey')?.addEventListener('input', (e) => {
+ psqlToS3Config.s3.secretAccessKey = e.target.value;
+ });
+
+ document.getElementById('psqlToS3_s3Region')?.addEventListener('input', (e) => {
+ psqlToS3Config.s3.region = e.target.value || 'us-east-1';
+ });
+
+ document.getElementById('psqlToS3_s3EndpointUrl')?.addEventListener('input', (e) => {
+ psqlToS3Config.s3.endpointUrl = e.target.value;
+ });
+
+ document.getElementById('psqlToS3_s3Bucket')?.addEventListener('input', (e) => {
+ psqlToS3Config.s3.bucket = e.target.value;
+ });
+
+ document.getElementById('psqlToS3_s3Prefix')?.addEventListener('input', (e) => {
+ psqlToS3Config.s3.prefix = e.target.value;
+ });
+
+ document.getElementById('psqlToS3_compress')?.addEventListener('change', (e) => {
+ psqlToS3Config.compress = e.target.checked;
+ });
+
+ document.getElementById('psqlToS3_format')?.addEventListener('change', (e) => {
+ psqlToS3Config.format = e.target.value;
+ });
+
+ // ========== PSQL to PSQL Inputs ==========
+ document.getElementById('psqlToPsql_sourceHost')?.addEventListener('input', (e) => {
+ psqlToPsqlConfig.source.host = e.target.value;
+ });
+
+ document.getElementById('psqlToPsql_sourcePort')?.addEventListener('input', (e) => {
+ psqlToPsqlConfig.source.port = parseInt(e.target.value) || 5432;
+ });
+
+ document.getElementById('psqlToPsql_sourceUser')?.addEventListener('input', (e) => {
+ psqlToPsqlConfig.source.user = e.target.value;
+ });
+
+ document.getElementById('psqlToPsql_sourcePassword')?.addEventListener('input', (e) => {
+ psqlToPsqlConfig.source.password = e.target.value;
+ });
+
+ document.getElementById('psqlToPsql_sourceDatabase')?.addEventListener('input', (e) => {
+ psqlToPsqlConfig.source.database = e.target.value;
+ });
+
+ document.getElementById('psqlToPsql_sourceUri')?.addEventListener('input', (e) => {
+ psqlToPsqlConfig.source.uri = e.target.value;
+ });
+
+ document.getElementById('psqlToPsql_destHost')?.addEventListener('input', (e) => {
+ psqlToPsqlConfig.dest.host = e.target.value;
+ });
+
+ document.getElementById('psqlToPsql_destPort')?.addEventListener('input', (e) => {
+ psqlToPsqlConfig.dest.port = parseInt(e.target.value) || 5432;
+ });
+
+ document.getElementById('psqlToPsql_destUser')?.addEventListener('input', (e) => {
+ psqlToPsqlConfig.dest.user = e.target.value;
+ });
+
+ document.getElementById('psqlToPsql_destPassword')?.addEventListener('input', (e) => {
+ psqlToPsqlConfig.dest.password = e.target.value;
+ });
+
+ document.getElementById('psqlToPsql_destDatabase')?.addEventListener('input', (e) => {
+ psqlToPsqlConfig.dest.database = e.target.value;
+ });
+
+ document.getElementById('psqlToPsql_destUri')?.addEventListener('input', (e) => {
+ psqlToPsqlConfig.dest.uri = e.target.value;
+ });
+
+ // ========== S3 to S3 Inputs ==========
+ document.getElementById('s3ToS3_sourceAccessKeyId')?.addEventListener('input', (e) => {
+ s3ToS3Config.source.accessKeyId = e.target.value;
+ });
+
+ document.getElementById('s3ToS3_sourceSecretAccessKey')?.addEventListener('input', (e) => {
+ s3ToS3Config.source.secretAccessKey = e.target.value;
+ });
+
+ document.getElementById('s3ToS3_sourceRegion')?.addEventListener('input', (e) => {
+ s3ToS3Config.source.region = e.target.value || 'us-east-1';
+ });
+
+ document.getElementById('s3ToS3_sourceEndpointUrl')?.addEventListener('input', (e) => {
+ s3ToS3Config.source.endpointUrl = e.target.value;
+ });
+
+ document.getElementById('s3ToS3_sourceSessionToken')?.addEventListener('input', (e) => {
+ s3ToS3Config.source.sessionToken = e.target.value;
+ });
+
+ document.getElementById('s3ToS3_sourceBucket')?.addEventListener('input', (e) => {
+ s3ToS3Config.source.bucket = e.target.value;
+ });
+
+ document.getElementById('s3ToS3_destAccessKeyId')?.addEventListener('input', (e) => {
+ s3ToS3Config.dest.accessKeyId = e.target.value;
+ });
+
+ document.getElementById('s3ToS3_destSecretAccessKey')?.addEventListener('input', (e) => {
+ s3ToS3Config.dest.secretAccessKey = e.target.value;
+ });
+
+ document.getElementById('s3ToS3_destRegion')?.addEventListener('input', (e) => {
+ s3ToS3Config.dest.region = e.target.value || 'us-east-1';
+ });
+
+ document.getElementById('s3ToS3_destEndpointUrl')?.addEventListener('input', (e) => {
+ s3ToS3Config.dest.endpointUrl = e.target.value;
+ });
+
+ document.getElementById('s3ToS3_destSessionToken')?.addEventListener('input', (e) => {
+ s3ToS3Config.dest.sessionToken = e.target.value;
+ });
+
+ document.getElementById('s3ToS3_destBucket')?.addEventListener('input', (e) => {
+ s3ToS3Config.dest.bucket = e.target.value;
+ });
+
+ // ========== PSQL to S3 Actions ==========
+ document.getElementById('psqlToS3_testSourceConnection')?.addEventListener('click', testPsqlToS3SourceConnectionHandler);
+ document.getElementById('psqlToS3_parseSourceUri')?.addEventListener('click', parsePsqlToS3SourceUriHandler);
+ document.getElementById('psqlToS3_getSchemas')?.addEventListener('click', getPsqlToS3SchemasHandler);
+ document.getElementById('psqlToS3_getTables')?.addEventListener('click', getPsqlToS3TablesHandler);
+ document.getElementById('psqlToS3_testPgConnection')?.addEventListener('click', testPsqlToS3PgConnectionHandler);
+ document.getElementById('psqlToS3_testS3Connection')?.addEventListener('click', testPsqlToS3S3ConnectionHandler);
+ document.getElementById('psqlToS3_startMigration')?.addEventListener('click', startPsqlToS3MigrationHandler);
+
+ // ========== PSQL to PSQL Actions ==========
+ document.getElementById('psqlToPsql_testSourceConnection')?.addEventListener('click', testPsqlToPsqlSourceConnectionHandler);
+ document.getElementById('psqlToPsql_parseSourceUri')?.addEventListener('click', parsePsqlToPsqlSourceUriHandler);
+ document.getElementById('psqlToPsql_getSchemas')?.addEventListener('click', getPsqlToPsqlSchemasHandler);
+ document.getElementById('psqlToPsql_getTables')?.addEventListener('click', getPsqlToPsqlTablesHandler);
+ document.getElementById('psqlToPsql_testDestConnection')?.addEventListener('click', testPsqlToPsqlDestConnectionHandler);
+ document.getElementById('psqlToPsql_parseDestUri')?.addEventListener('click', parsePsqlToPsqlDestUriHandler);
+ document.getElementById('psqlToPsql_startMigration')?.addEventListener('click', startPsqlToPsqlMigrationHandler);
+
+ // ========== S3 to S3 Actions ==========
+ document.getElementById('s3ToS3_testSourceConnection')?.addEventListener('click', testS3ToS3SourceConnectionHandler);
+ document.getElementById('s3ToS3_listSourceBuckets')?.addEventListener('click', listS3ToS3SourceBucketsHandler);
+ document.getElementById('s3ToS3_hideSourceBuckets')?.addEventListener('click', () => {
+ document.getElementById('s3ToS3_sourceBucketsList').style.display = 'none';
+ });
+
+ document.getElementById('s3ToS3_testDestConnection')?.addEventListener('click', testS3ToS3DestConnectionHandler);
+ document.getElementById('s3ToS3_listDestBuckets')?.addEventListener('click', listS3ToS3DestBucketsHandler);
+ document.getElementById('s3ToS3_createDestBucket')?.addEventListener('click', createS3ToS3DestBucketHandler);
+ document.getElementById('s3ToS3_hideDestBuckets')?.addEventListener('click', () => {
+ document.getElementById('s3ToS3_destBucketsList').style.display = 'none';
+ });
+
+ document.getElementById('s3ToS3_startMigration')?.addEventListener('click', startS3ToS3MigrationHandler);
+
+ // ========== Environment Actions ==========
+ document.getElementById('refreshEnv')?.addEventListener('click', loadCurrentEnvHandler);
+ document.getElementById('injectEnv')?.addEventListener('click', injectEnvironmentHandler);
+ document.getElementById('clearEnv')?.addEventListener('click', clearEnvironmentHandler);
+ document.getElementById('copyEnv')?.addEventListener('click', copyEnvToClipboardHandler);
+
+ // ========== Security Actions ==========
+ document.getElementById('clearSessionBtn')?.addEventListener('click', clearSessionHandler);
+ document.getElementById('clearMigrationBtn')?.addEventListener('click', clearMigrationHandler);
+ document.getElementById('refreshMigrations')?.addEventListener('click', loadMigrationsHandler);
+ }
+
+ // Helper function for summaries
+ function updateS3Summaries() {
+ if (sourceS3Summary) sourceS3Summary.textContent = s3ToS3Config.source.bucket || 'Not set';
+ if (destS3Summary) destS3Summary.textContent = s3ToS3Config.dest.bucket || 'Not set';
+ }
+
+ // ==================== Helper Functions ====================
+
+ function setLoading(isLoading) {
+ loading = isLoading;
+ loadingDiv.style.display = isLoading ? 'block' : 'none';
+ }
+
+ function updatePostgresSummaries() {
+ if (sourcePostgresSummary) {
+ sourcePostgresSummary.textContent = psqlToPsqlConfig.source.database
+ ? `${psqlToPsqlConfig.source.database}@${psqlToPsqlConfig.source.host}`
+ : 'Not set';
+ }
+ if (destPostgresSummary) {
+ destPostgresSummary.textContent = psqlToPsqlConfig.dest.database
+ ? `${psqlToPsqlConfig.dest.database}@${psqlToPsqlConfig.dest.host}`
+ : 'Not set';
+ }
+ }
+
+ function updateStatusInfo() {
+ let info = [];
+ if (psqlToPsqlConfig.source.database) info.push(`🐘 Source PG: ${psqlToPsqlConfig.source.database}`);
+ if (psqlToPsqlConfig.dest.database) info.push(`🐘 Dest PG: ${psqlToPsqlConfig.dest.database}`);
+ if (s3ToS3Config.source.bucket) info.push(`📤 S3: ${s3ToS3Config.source.bucket}`);
+ if (s3ToS3Config.dest.bucket) info.push(`📥 S3: ${s3ToS3Config.dest.bucket}`);
+ if (activeMigration) info.push(`🚀 Migration: ${activeMigration}`);
+ info.push(`⚡ ${Object.keys(currentEnv).length} env vars`);
+
+ statusInfo.textContent = info.join(' • ') || 'Ready';
+ }
+
+ // ==================== PSQL to S3 Handlers ====================
+
+ async function testPsqlToS3SourceConnectionHandler() {
+ setLoading(true);
+ try {
+ let uri = psqlToS3Config.source.uri;
+ if (!uri && psqlToS3Config.source.host && psqlToS3Config.source.user && psqlToS3Config.source.password && psqlToS3Config.source.database) {
+ uri = `postgresql://${psqlToS3Config.source.user}:${psqlToS3Config.source.password}@${psqlToS3Config.source.host}:${psqlToS3Config.source.port}/${psqlToS3Config.source.database}`;
+ }
+
+ const result = await testPostgresConnection({ uri });
+
+ const statusDiv = document.getElementById('psqlToS3_sourceConnectionStatus');
+ if (result.success) {
+ showNotification(`✅ PSQL to S3 - Source connection successful!`, 'success');
+ statusDiv.innerHTML = `
+ Success: ✅ Connected
+ Host: ${psqlToS3Config.source.host || result.connection?.host}:${psqlToS3Config.source.port || result.connection?.port}
+ Version: ${result.version || 'Unknown'}
+ `;
+ statusDiv.className = 'status-message success';
+ } else {
+ showNotification(`❌ PSQL to S3 - Source connection failed: ${result.error}`, 'error');
+ statusDiv.innerHTML = `Error: ${result.error}
`;
+ statusDiv.className = 'status-message error';
+ }
+ statusDiv.style.display = 'block';
+ } catch (error) {
+ showNotification(`❌ Error testing source PostgreSQL connection: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function parsePsqlToS3SourceUriHandler() {
+ const uri = document.getElementById('psqlToS3_sourceUri')?.value;
+ if (!uri) {
+ showNotification('Please enter PostgreSQL URI', 'warning');
+ return;
+ }
+
+ setLoading(true);
+ try {
+ const result = await parsePostgresUri(uri);
+ if (result.success && result.parsed) {
+ psqlToS3Config.source = {
+ host: result.parsed.host || '',
+ user: result.parsed.user || '',
+ password: result.parsed.password || '',
+ port: result.parsed.port || 5432,
+ database: result.parsed.database || '',
+ uri: uri
+ };
+
+ document.getElementById('psqlToS3_sourceHost').value = result.parsed.host;
+ document.getElementById('psqlToS3_sourceUser').value = result.parsed.user;
+ document.getElementById('psqlToS3_sourcePassword').value = result.parsed.password;
+ document.getElementById('psqlToS3_sourcePort').value = result.parsed.port;
+ document.getElementById('psqlToS3_sourceDatabase').value = result.parsed.database;
+
+ showNotification('✅ Source PostgreSQL URI parsed successfully', 'success');
+ } else {
+ showNotification(`❌ Failed to parse PostgreSQL URI: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error parsing PostgreSQL URI: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function getPsqlToS3SchemasHandler() {
+ setLoading(true);
+ try {
+ const params = {};
+
+ if (psqlToS3Config.source.uri) {
+ params.uri = psqlToS3Config.source.uri;
+ } else if (psqlToS3Config.source.host && psqlToS3Config.source.user && psqlToS3Config.source.password && psqlToS3Config.source.database) {
+ params.host = psqlToS3Config.source.host;
+ params.user = psqlToS3Config.source.user;
+ params.password = psqlToS3Config.source.password;
+ params.database = psqlToS3Config.source.database;
+ params.port = psqlToS3Config.source.port;
+ } else {
+ showNotification('Please enter source PostgreSQL connection details', 'warning');
+ setLoading(false);
+ return;
+ }
+
+ const result = await getPostgresSchemas(params);
+
+ if (result.success) {
+ renderPsqlToS3Schemas(result.schemas || []);
+ document.getElementById('psqlToS3_schemasSection').style.display = 'block';
+ showNotification(`✅ Found ${result.count} schema(s)`, 'success');
+ } else {
+ showNotification(`❌ Failed to get schemas: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error getting schemas: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function getPsqlToS3TablesHandler() {
+ setLoading(true);
+ try {
+ const params = {};
+
+ if (psqlToS3Config.source.uri) {
+ params.uri = psqlToS3Config.source.uri;
+ } else if (psqlToS3Config.source.host && psqlToS3Config.source.user && psqlToS3Config.source.password && psqlToS3Config.source.database) {
+ params.host = psqlToS3Config.source.host;
+ params.user = psqlToS3Config.source.user;
+ params.password = psqlToS3Config.source.password;
+ params.database = psqlToS3Config.source.database;
+ params.port = psqlToS3Config.source.port;
+ } else {
+ showNotification('Please enter source PostgreSQL connection details', 'warning');
+ setLoading(false);
+ return;
+ }
+
+ const result = await getPostgresTables(params);
+ if (result.success) {
+ renderPsqlToS3Tables(result.tables || []);
+ document.getElementById('psqlToS3_tablesSection').style.display = 'block';
+ showNotification(`✅ Found ${result.count} table(s)`, 'success');
+ } else {
+ showNotification(`❌ Failed to get tables: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error getting tables: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function testPsqlToS3PgConnectionHandler() {
+ if (!psqlToS3Config.source.uri && !psqlToS3Config.source.host) {
+ showNotification('Please enter PostgreSQL connection details', 'warning');
+ return;
+ }
+
+ setLoading(true);
+ try {
+ const uri = psqlToS3Config.source.uri || buildPostgresConnectionString(
+ psqlToS3Config.source.host,
+ psqlToS3Config.source.database,
+ psqlToS3Config.source.user,
+ psqlToS3Config.source.port
+ );
+
+ const result = await testPgToS3PostgresConnection(uri);
+ if (result.success) {
+ showNotification(`✅ PostgreSQL connection successful!`, 'success');
+ } else {
+ showNotification(`❌ PostgreSQL connection failed: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error testing PostgreSQL connection: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function testPsqlToS3S3ConnectionHandler() {
+ setLoading(true);
+ try {
+ const result = await testPgToS3S3Connection(
+ psqlToS3Config.s3.accessKeyId,
+ psqlToS3Config.s3.secretAccessKey,
+ psqlToS3Config.s3.region,
+ psqlToS3Config.s3.endpointUrl
+ );
+
+ if (result.success) {
+ showNotification(`✅ S3 connection successful!`, 'success');
+ } else {
+ showNotification(`❌ S3 connection failed: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error testing S3 connection: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function startPsqlToS3MigrationHandler() {
+ if (!psqlToS3Config.source.uri && !psqlToS3Config.source.host) {
+ showNotification('Please enter PostgreSQL connection details', 'warning');
+ return;
+ }
+
+ if (!psqlToS3Config.s3.bucket) {
+ showNotification('Please enter S3 bucket name', 'warning');
+ return;
+ }
+
+ setLoading(true);
+
+ try {
+ const uri = psqlToS3Config.source.uri || buildPostgresConnectionString(
+ psqlToS3Config.source.host,
+ psqlToS3Config.source.database,
+ psqlToS3Config.source.user,
+ psqlToS3Config.source.port
+ );
+
+ const result = await startPgToS3Migration(
+ uri,
+ psqlToS3Config.s3.bucket,
+ psqlToS3Config.s3.prefix,
+ {
+ compress: psqlToS3Config.compress,
+ format: psqlToS3Config.format,
+ accessKeyId: psqlToS3Config.s3.accessKeyId,
+ secretAccessKey: psqlToS3Config.s3.secretAccessKey,
+ region: psqlToS3Config.s3.region,
+ endpointUrl: psqlToS3Config.s3.endpointUrl
+ }
+ );
+
+ if (result.success) {
+ activeMigration = result.migration_id;
+ showNotification(`✅ PostgreSQL to S3 migration ${activeMigration} started!`, 'success');
+ pollPgToS3MigrationStatus(activeMigration);
+ } else {
+ showNotification(`❌ Failed to start migration: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error starting migration: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ // ==================== PSQL to PSQL Handlers ====================
+
+ async function testPsqlToPsqlSourceConnectionHandler() {
+ setLoading(true);
+ try {
+ let uri = psqlToPsqlConfig.source.uri;
+ if (!uri && psqlToPsqlConfig.source.host && psqlToPsqlConfig.source.user && psqlToPsqlConfig.source.password && psqlToPsqlConfig.source.database) {
+ uri = `postgresql://${psqlToPsqlConfig.source.user}:${psqlToPsqlConfig.source.password}@${psqlToPsqlConfig.source.host}:${psqlToPsqlConfig.source.port}/${psqlToPsqlConfig.source.database}`;
+ }
+
+ const result = await testPostgresConnection({ uri });
+
+ const statusDiv = document.getElementById('psqlToPsql_sourceConnectionStatus');
+ if (result.success) {
+ showNotification(`✅ PSQL to PSQL - Source connection successful!`, 'success');
+ statusDiv.innerHTML = `
+ Success: ✅ Connected
+ Host: ${psqlToPsqlConfig.source.host || result.connection?.host}:${psqlToPsqlConfig.source.port || result.connection?.port}
+ Version: ${result.version || 'Unknown'}
+ `;
+ statusDiv.className = 'status-message success';
+ } else {
+ showNotification(`❌ PSQL to PSQL - Source connection failed: ${result.error}`, 'error');
+ statusDiv.innerHTML = `Error: ${result.error}
`;
+ statusDiv.className = 'status-message error';
+ }
+ statusDiv.style.display = 'block';
+ } catch (error) {
+ showNotification(`❌ Error testing source PostgreSQL connection: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function parsePsqlToPsqlSourceUriHandler() {
+ const uri = document.getElementById('psqlToPsql_sourceUri')?.value;
+ if (!uri) {
+ showNotification('Please enter PostgreSQL URI', 'warning');
+ return;
+ }
+
+ setLoading(true);
+ try {
+ const result = await parsePostgresUri(uri);
+ if (result.success && result.parsed) {
+ psqlToPsqlConfig.source = {
+ host: result.parsed.host || '',
+ user: result.parsed.user || '',
+ password: result.parsed.password || '',
+ port: result.parsed.port || 5432,
+ database: result.parsed.database || '',
+ uri: uri
+ };
+
+ document.getElementById('psqlToPsql_sourceHost').value = result.parsed.host;
+ document.getElementById('psqlToPsql_sourceUser').value = result.parsed.user;
+ document.getElementById('psqlToPsql_sourcePassword').value = result.parsed.password;
+ document.getElementById('psqlToPsql_sourcePort').value = result.parsed.port;
+ document.getElementById('psqlToPsql_sourceDatabase').value = result.parsed.database;
+
+ showNotification('✅ Source PostgreSQL URI parsed successfully', 'success');
+ } else {
+ showNotification(`❌ Failed to parse PostgreSQL URI: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error parsing PostgreSQL URI: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function getPsqlToPsqlSchemasHandler() {
+ setLoading(true);
+ try {
+ const params = {};
+
+ if (psqlToPsqlConfig.source.uri) {
+ params.uri = psqlToPsqlConfig.source.uri;
+ } else if (psqlToPsqlConfig.source.host && psqlToPsqlConfig.source.user && psqlToPsqlConfig.source.password && psqlToPsqlConfig.source.database) {
+ params.host = psqlToPsqlConfig.source.host;
+ params.user = psqlToPsqlConfig.source.user;
+ params.password = psqlToPsqlConfig.source.password;
+ params.database = psqlToPsqlConfig.source.database;
+ params.port = psqlToPsqlConfig.source.port;
+ } else {
+ showNotification('Please enter source PostgreSQL connection details', 'warning');
+ setLoading(false);
+ return;
+ }
+
+ const result = await getPostgresSchemas(params);
+
+ if (result.success) {
+ psqlToPsqlSchemas = result.schemas || [];
+ renderPsqlToPsqlSchemas();
+ document.getElementById('psqlToPsql_schemasSection').style.display = 'block';
+ showNotification(`✅ Found ${result.count} schema(s)`, 'success');
+ } else {
+ showNotification(`❌ Failed to get schemas: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error getting schemas: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function getPsqlToPsqlTablesHandler() {
+ setLoading(true);
+ try {
+ const params = {};
+
+ if (psqlToPsqlConfig.source.uri) {
+ params.uri = psqlToPsqlConfig.source.uri;
+ } else if (psqlToPsqlConfig.source.host && psqlToPsqlConfig.source.user && psqlToPsqlConfig.source.password && psqlToPsqlConfig.source.database) {
+ params.host = psqlToPsqlConfig.source.host;
+ params.user = psqlToPsqlConfig.source.user;
+ params.password = psqlToPsqlConfig.source.password;
+ params.database = psqlToPsqlConfig.source.database;
+ params.port = psqlToPsqlConfig.source.port;
+ } else {
+ showNotification('Please enter source PostgreSQL connection details', 'warning');
+ setLoading(false);
+ return;
+ }
+
+ const result = await getPostgresTables(params);
+ if (result.success) {
+ psqlToPsqlTables = result.tables || [];
+ renderPsqlToPsqlTables();
+ document.getElementById('psqlToPsql_tablesSection').style.display = 'block';
+ showNotification(`✅ Found ${result.count} table(s)`, 'success');
+ } else {
+ showNotification(`❌ Failed to get tables: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error getting tables: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function testPsqlToPsqlDestConnectionHandler() {
+ setLoading(true);
+ try {
+ let uri = psqlToPsqlConfig.dest.uri;
+ if (!uri && psqlToPsqlConfig.dest.host && psqlToPsqlConfig.dest.user && psqlToPsqlConfig.dest.password && psqlToPsqlConfig.dest.database) {
+ uri = `postgresql://${psqlToPsqlConfig.dest.user}:${psqlToPsqlConfig.dest.password}@${psqlToPsqlConfig.dest.host}:${psqlToPsqlConfig.dest.port}/${psqlToPsqlConfig.dest.database}`;
+ }
+
+ const result = await testPostgresConnection({ uri });
+
+ const statusDiv = document.getElementById('psqlToPsql_destConnectionStatus');
+ if (result.success) {
+ showNotification(`✅ PSQL to PSQL - Destination connection successful!`, 'success');
+ statusDiv.innerHTML = `
+ Success: ✅ Connected
+ Host: ${psqlToPsqlConfig.dest.host || result.connection?.host}:${psqlToPsqlConfig.dest.port || result.connection?.port}
+ Version: ${result.version || 'Unknown'}
+ `;
+ statusDiv.className = 'status-message success';
+ } else {
+ showNotification(`❌ PSQL to PSQL - Destination connection failed: ${result.error}`, 'error');
+ statusDiv.innerHTML = `Error: ${result.error}
`;
+ statusDiv.className = 'status-message error';
+ }
+ statusDiv.style.display = 'block';
+ } catch (error) {
+ showNotification(`❌ Error testing destination PostgreSQL connection: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function parsePsqlToPsqlDestUriHandler() {
+ const uri = document.getElementById('psqlToPsql_destUri')?.value;
+ if (!uri) {
+ showNotification('Please enter destination PostgreSQL URI', 'warning');
+ return;
+ }
+
+ setLoading(true);
+ try {
+ const result = await parsePostgresUri(uri);
+ if (result.success && result.parsed) {
+ psqlToPsqlConfig.dest = {
+ host: result.parsed.host || '',
+ user: result.parsed.user || '',
+ password: result.parsed.password || '',
+ port: result.parsed.port || 5432,
+ database: result.parsed.database || '',
+ uri: uri
+ };
+
+ document.getElementById('psqlToPsql_destHost').value = result.parsed.host;
+ document.getElementById('psqlToPsql_destUser').value = result.parsed.user;
+ document.getElementById('psqlToPsql_destPassword').value = result.parsed.password;
+ document.getElementById('psqlToPsql_destPort').value = result.parsed.port;
+ document.getElementById('psqlToPsql_destDatabase').value = result.parsed.database;
+
+ showNotification('✅ Destination PostgreSQL URI parsed successfully', 'success');
+ updatePostgresSummaries();
+ } else {
+ showNotification(`❌ Failed to parse PostgreSQL URI: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error parsing PostgreSQL URI: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function startPsqlToPsqlMigrationHandler() {
+ let sourceUri = psqlToPsqlConfig.source.uri || buildPostgresConnectionString(
+ psqlToPsqlConfig.source.host,
+ psqlToPsqlConfig.source.database,
+ psqlToPsqlConfig.source.user,
+ psqlToPsqlConfig.source.port
+ );
+
+ let destUri = psqlToPsqlConfig.dest.uri || buildPostgresConnectionString(
+ psqlToPsqlConfig.dest.host,
+ psqlToPsqlConfig.dest.database,
+ psqlToPsqlConfig.dest.user,
+ psqlToPsqlConfig.dest.port
+ );
+
+ if (!sourceUri) {
+ showNotification('Please enter source PostgreSQL connection details', 'warning');
+ return;
+ }
+
+ if (!destUri) {
+ showNotification('Please enter destination PostgreSQL connection details', 'warning');
+ return;
+ }
+
+ setLoading(true);
+
+ try {
+ const result = await startPostgresMigration(
+ sourceUri,
+ destUri,
+ psqlToPsqlSelectedSchemas.length > 0 ? psqlToPsqlSelectedSchemas : null,
+ psqlToPsqlSelectedTables.length > 0 ? psqlToPsqlSelectedTables : null
+ );
+
+ if (result.success) {
+ activeMigration = result.migration_id;
+ showNotification(`✅ PostgreSQL migration ${activeMigration} started!`, 'success');
+ pollPostgresMigrationStatus(activeMigration);
+ } else {
+ showNotification(`❌ Failed to start migration: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error starting migration: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ // ==================== S3 to S3 Handlers ====================
+
+ async function testS3ToS3SourceConnectionHandler() {
+ setLoading(true);
+ try {
+ const result = await testSourceS3Connection({
+ useEnvVars: false,
+ accessKeyId: s3ToS3Config.source.accessKeyId,
+ secretAccessKey: s3ToS3Config.source.secretAccessKey,
+ region: s3ToS3Config.source.region,
+ endpointUrl: s3ToS3Config.source.endpointUrl,
+ sessionToken: s3ToS3Config.source.sessionToken
+ });
+
+ const statusDiv = document.getElementById('s3ToS3_sourceConnectionStatus');
+ if (result.success) {
+ showNotification(`✅ S3 to S3 - Source connection successful!`, 'success');
+ statusDiv.innerHTML = `
+ Success: ✅ Connected
+ Endpoint: ${s3ToS3Config.source.endpointUrl || 'AWS S3 (default)'}
+ Region: ${s3ToS3Config.source.region}
+ Buckets Found: ${result.bucket_count || 0}
+ `;
+ statusDiv.className = 'status-message success';
+ } else {
+ showNotification(`❌ S3 to S3 - Source connection failed: ${result.error}`, 'error');
+ statusDiv.innerHTML = `Error: ${result.error}
`;
+ statusDiv.className = 'status-message error';
+ }
+ statusDiv.style.display = 'block';
+ } catch (error) {
+ showNotification(`❌ Error testing source S3 connection: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function listS3ToS3SourceBucketsHandler() {
+ setLoading(true);
+ try {
+ const result = await listSourceS3Buckets(
+ s3ToS3Config.source.accessKeyId,
+ s3ToS3Config.source.secretAccessKey,
+ s3ToS3Config.source.region,
+ s3ToS3Config.source.endpointUrl,
+ s3ToS3Config.source.sessionToken
+ );
+
+ if (result.success) {
+ s3ToS3SourceBuckets = result.buckets || [];
+ renderS3ToS3SourceBuckets();
+ document.getElementById('s3ToS3_sourceBucketsList').style.display = 'block';
+ showNotification(`✅ Found ${s3ToS3SourceBuckets.length} source bucket(s)`, 'success');
+ } else {
+ showNotification(`❌ Failed to list source buckets: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error listing source buckets: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function testS3ToS3DestConnectionHandler() {
+ setLoading(true);
+ try {
+ const result = await testDestinationS3Connection({
+ useEnvVars: false,
+ accessKeyId: s3ToS3Config.dest.accessKeyId,
+ secretAccessKey: s3ToS3Config.dest.secretAccessKey,
+ region: s3ToS3Config.dest.region,
+ endpointUrl: s3ToS3Config.dest.endpointUrl,
+ sessionToken: s3ToS3Config.dest.sessionToken
+ });
+
+ const statusDiv = document.getElementById('s3ToS3_destConnectionStatus');
+ if (result.success) {
+ showNotification(`✅ S3 to S3 - Destination connection successful!`, 'success');
+ statusDiv.innerHTML = `
+ Success: ✅ Connected
+ Endpoint: ${s3ToS3Config.dest.endpointUrl || 'AWS S3 (default)'}
+ Region: ${s3ToS3Config.dest.region}
+ Buckets Found: ${result.bucket_count || 0}
+ `;
+ statusDiv.className = 'status-message success';
+ } else {
+ showNotification(`❌ S3 to S3 - Destination connection failed: ${result.error}`, 'error');
+ statusDiv.innerHTML = `Error: ${result.error}
`;
+ statusDiv.className = 'status-message error';
+ }
+ statusDiv.style.display = 'block';
+ } catch (error) {
+ showNotification(`❌ Error testing destination S3 connection: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function listS3ToS3DestBucketsHandler() {
+ setLoading(true);
+ try {
+ const result = await listDestinationS3Buckets(
+ s3ToS3Config.dest.accessKeyId,
+ s3ToS3Config.dest.secretAccessKey,
+ s3ToS3Config.dest.region,
+ s3ToS3Config.dest.endpointUrl,
+ s3ToS3Config.dest.sessionToken
+ );
+
+ if (result.success) {
+ s3ToS3DestBuckets = result.buckets || [];
+ renderS3ToS3DestBuckets();
+ document.getElementById('s3ToS3_destBucketsList').style.display = 'block';
+ showNotification(`✅ Found ${s3ToS3DestBuckets.length} destination bucket(s)`, 'success');
+ } else {
+ showNotification(`❌ Failed to list destination buckets: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error listing destination buckets: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function createS3ToS3DestBucketHandler() {
+ if (!s3ToS3Config.dest.bucket) {
+ showNotification('Please enter destination bucket name', 'warning');
+ return;
+ }
+
+ setLoading(true);
+ try {
+ const result = await createS3Bucket(
+ s3ToS3Config.dest.bucket,
+ s3ToS3Config.dest.region,
+ {
+ accessKeyId: s3ToS3Config.dest.accessKeyId,
+ secretAccessKey: s3ToS3Config.dest.secretAccessKey,
+ endpointUrl: s3ToS3Config.dest.endpointUrl,
+ sessionToken: s3ToS3Config.dest.sessionToken
+ }
+ );
+
+ if (result.success) {
+ if (result.created) {
+ showNotification(`✅ Bucket created successfully: ${s3ToS3Config.dest.bucket}`, 'success');
+ } else {
+ showNotification(`ℹ️ Bucket already exists: ${s3ToS3Config.dest.bucket}`, 'info');
+ }
+ } else {
+ showNotification(`❌ Failed to create bucket: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error creating bucket: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function startS3ToS3MigrationHandler() {
+ if (!s3ToS3Config.source.bucket) {
+ showNotification('Please select source bucket', 'warning');
+ return;
+ }
+
+ if (!s3ToS3Config.dest.bucket) {
+ showNotification('Please select destination bucket', 'warning');
+ return;
+ }
+
+ setLoading(true);
+
+ try {
+ const includePatterns = document.getElementById('psqlToPsql_includePatterns')?.value
+ ?.split(',').map(p => p.trim()).filter(p => p) || null;
+ const excludePatterns = document.getElementById('psqlToPsql_excludePatterns')?.value
+ ?.split(',').map(p => p.trim()).filter(p => p) || null;
+
+ const result = await startS3Migration(
+ s3ToS3Config.source.bucket,
+ s3ToS3Config.dest.bucket,
+ '',
+ {
+ sourceAccessKeyId: s3ToS3Config.source.accessKeyId,
+ sourceSecretAccessKey: s3ToS3Config.source.secretAccessKey,
+ sourceRegion: s3ToS3Config.source.region,
+ sourceEndpointUrl: s3ToS3Config.source.endpointUrl,
+ sourceSessionToken: s3ToS3Config.source.sessionToken,
+
+ destAccessKeyId: s3ToS3Config.dest.accessKeyId,
+ destSecretAccessKey: s3ToS3Config.dest.secretAccessKey,
+ destRegion: s3ToS3Config.dest.region,
+ destEndpointUrl: s3ToS3Config.dest.endpointUrl,
+ destSessionToken: s3ToS3Config.dest.sessionToken,
+
+ includePatterns,
+ excludePatterns,
+ preserveMetadata: document.getElementById('psqlToPsql_preserveMetadata')?.checked,
+ storageClass: document.getElementById('psqlToPsql_storageClass')?.value,
+ createDestBucket: document.getElementById('psqlToPsql_createDestBucket')?.checked,
+ maxConcurrent: parseInt(document.getElementById('psqlToPsql_maxConcurrent')?.value) || 5
+ }
+ );
+
+ if (result.success) {
+ activeMigration = result.migration_id;
+ showNotification(`✅ S3 to S3 migration ${activeMigration} started!`, 'success');
+ pollS3MigrationStatus(activeMigration);
+ } else {
+ showNotification(`❌ Failed to start migration: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error starting migration: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ // ==================== Polling Functions ====================
+
+ async function pollPostgresMigrationStatus(migrationId) {
+ try {
+ const status = await getPostgresMigrationStatus(migrationId);
+ if (status.success && status.status) {
+ if (status.status.logs) {
+ migrationLogs = status.status.logs;
+ renderMigrationLogs();
+ document.getElementById('migrationLogs').style.display = 'block';
+ }
+
+ if (status.status.success === true) {
+ showNotification(`✅ Migration ${migrationId} completed!`, 'success');
+ loadMigrationsHandler();
+ } else if (status.status.success === false) {
+ showNotification(`❌ Migration ${migrationId} failed: ${status.status.error}`, 'error');
+ } else {
+ setTimeout(() => pollPostgresMigrationStatus(migrationId), 2000);
+ }
+ }
+ } catch (error) {
+ console.error('Error polling migration status:', error);
+ setTimeout(() => pollPostgresMigrationStatus(migrationId), 5000);
+ }
+ }
+
+ async function pollS3MigrationStatus(migrationId) {
+ try {
+ const status = await getS3MigrationStatus(migrationId);
+ if (status.success && status.status) {
+ if (status.status.logs) {
+ migrationLogs = status.status.logs;
+ renderMigrationLogs();
+ document.getElementById('migrationLogs').style.display = 'block';
+ }
+
+ if (status.status.success === true) {
+ showNotification(`✅ Migration ${migrationId} completed!`, 'success');
+ loadMigrationsHandler();
+ } else if (status.status.success === false) {
+ showNotification(`❌ Migration ${migrationId} failed: ${status.status.error}`, 'error');
+ } else {
+ setTimeout(() => pollS3MigrationStatus(migrationId), 2000);
+ }
+ }
+ } catch (error) {
+ console.error('Error polling migration status:', error);
+ setTimeout(() => pollS3MigrationStatus(migrationId), 5000);
+ }
+ }
+
+ async function pollPgToS3MigrationStatus(migrationId) {
+ try {
+ const status = await getPgToS3MigrationStatus(migrationId);
+ if (status.success && status.status) {
+ if (status.status.logs) {
+ migrationLogs = status.status.logs;
+ renderMigrationLogs();
+ document.getElementById('migrationLogs').style.display = 'block';
+ }
+
+ if (status.status.success === true) {
+ showNotification(`✅ Migration ${migrationId} completed!`, 'success');
+ loadMigrationsHandler();
+ } else if (status.status.success === false) {
+ showNotification(`❌ Migration ${migrationId} failed: ${status.status.error}`, 'error');
+ } else {
+ setTimeout(() => pollPgToS3MigrationStatus(migrationId), 2000);
+ }
+ }
+ } catch (error) {
+ console.error('Error polling migration status:', error);
+ setTimeout(() => pollPgToS3MigrationStatus(migrationId), 5000);
+ }
+ }
+
+ // ==================== Migration Status Functions ====================
+
+ async function loadMigrationsHandler() {
+ setLoading(true);
+ try {
+ const postgresResult = await listPostgresMigrations();
+ const s3Result = await listS3Migrations();
+ const pgToS3Result = await listPgToS3Migrations();
+
+ migrations = [];
+ if (postgresResult.success) migrations = migrations.concat(postgresResult.migrations || []);
+ if (s3Result.success) migrations = migrations.concat(s3Result.migrations || []);
+ if (pgToS3Result.success) migrations = migrations.concat(pgToS3Result.migrations || []);
+
+ renderMigrations();
+ document.getElementById('migrationsList').style.display = 'block';
+ showNotification(`✅ Found ${migrations.length} migrations`, 'success');
+ } catch (error) {
+ console.error('Error loading migrations:', error);
+ showNotification(`❌ Error loading migrations: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ // ==================== Environment Handlers ====================
+
+ async function loadCurrentEnvHandler() {
+ try {
+ const result = await getCurrentEnv();
+ if (result.success) {
+ currentEnv = result.environment_variables || {};
+ envPreview.textContent = formatEnvVars(currentEnv, 'dotenv');
+ renderEnvConfigCards();
+ showNotification('✅ Environment refreshed', 'success');
+ updateStatusInfo();
+ }
+ } catch (error) {
+ showNotification(`❌ Error loading environment: ${error.message}`, 'error');
+ }
+ }
+// ==================== إضافة دوال التتبع إلى App() ====================
+
+// أضف هذه الدوال داخل App() بعد دوال الـ polling الحالية
+
+/**
+ * Track migration with real-time progress using Server-Sent Events
+ * @param {string} migrationId - Migration ID to track
+ * @param {string} type - Migration type ('postgres', 's3', 'postgres-s3')
+ */
+function trackMigrationWithProgress(migrationId, type) {
+ showNotification(`📊 Tracking migration ${migrationId}...`, 'info');
+
+ // إنشاء منطقة عرض التقدم
+ const progressContainer = document.createElement('div');
+ progressContainer.className = 'progress-modal';
+ progressContainer.innerHTML = `
+
+ `;
+
+ document.body.appendChild(progressContainer);
+
+ const closeBtn = progressContainer.querySelector('.close-modal');
+ closeBtn.addEventListener('click', () => progressContainer.remove());
+
+ // بدء تتبع التقدم
+ const progressDiv = progressContainer.querySelector(`#progress-${migrationId}`);
+
+ progressDiv.innerHTML = `
+
+
+
Connecting...
+
+
+
+ `;
+
+ const progressBar = progressDiv.querySelector('.progress-bar-fill');
+ const statsDiv = progressDiv.querySelector('.progress-stats');
+ const detailsDiv = progressDiv.querySelector('.progress-details');
+ const stopBtn = progressDiv.querySelector('.stop-progress-btn');
+
+ // إنشاء تدفق التقدم
+ const stream = createProgressStream(migrationId, type, {
+ onProgress: (progress) => {
+ // تحديث شريط التقدم
+ const percentage = progress.percentage || progress.percentages?.size || 0;
+ progressBar.style.width = `${percentage}%`;
+
+ // تحديث الإحصائيات
+ statsDiv.innerHTML = formatProgressDisplay(progress).replace(/\n/g, '
');
+
+ // تحديث التفاصيل
+ let details = '';
+ if (progress.current_speed_formatted || progress.speed?.current_formatted) {
+ details += `⚡ Speed: ${progress.current_speed_formatted || progress.speed?.current_formatted}
`;
+ }
+ if (progress.eta_formatted || progress.time?.eta_formatted) {
+ details += `⏳ ETA: ${progress.eta_formatted || progress.time?.eta_formatted}
`;
+ }
+ if (progress.elapsed_time_formatted || progress.time?.elapsed_formatted) {
+ details += `⏱️ Elapsed: ${progress.elapsed_time_formatted || progress.time?.elapsed_formatted}
`;
+ }
+ detailsDiv.innerHTML = details;
+ },
+ onComplete: (completion) => {
+ statsDiv.innerHTML = '✅ Migration completed successfully!';
+ progressBar.style.width = '100%';
+ showNotification(`Migration ${migrationId} completed!`, 'success');
+ stopBtn.remove();
+
+ // تحديث قائمة الترحيلات
+ loadMigrationsHandler();
+ },
+ onError: (error) => {
+ statsDiv.innerHTML = `❌ Error: ${error.error}`;
+ showNotification(`Migration error: ${error.error}`, 'error');
+ }
+ });
+
+ stopBtn.addEventListener('click', () => {
+ stream.stop();
+ progressContainer.remove();
+ showNotification('Progress tracking stopped', 'info');
+ });
+}
+
+// ==================== تعديل دوال بدء الترحيل ====================
+
+// استبدل دوال بدء الترحيل بهذه النسخ المعدلة:
+
+async function startPsqlToS3MigrationHandler() {
+ if (!psqlToS3Config.source.uri && !psqlToS3Config.source.host) {
+ showNotification('Please enter PostgreSQL connection details', 'warning');
+ return;
+ }
+
+ if (!psqlToS3Config.s3.bucket) {
+ showNotification('Please enter S3 bucket name', 'warning');
+ return;
+ }
+
+ setLoading(true);
+
+ try {
+ const uri = psqlToS3Config.source.uri || buildPostgresConnectionString(
+ psqlToS3Config.source.host,
+ psqlToS3Config.source.database,
+ psqlToS3Config.source.user,
+ psqlToS3Config.source.port
+ );
+
+ const result = await startPgToS3Migration(
+ uri,
+ psqlToS3Config.s3.bucket,
+ psqlToS3Config.s3.prefix,
+ {
+ compress: psqlToS3Config.compress,
+ format: psqlToS3Config.format,
+ accessKeyId: psqlToS3Config.s3.accessKeyId,
+ secretAccessKey: psqlToS3Config.s3.secretAccessKey,
+ region: psqlToS3Config.s3.region,
+ endpointUrl: psqlToS3Config.s3.endpointUrl
+ }
+ );
+
+ if (result.success) {
+ activeMigration = result.migration_id;
+ showNotification(`✅ PostgreSQL to S3 migration ${activeMigration} started!`, 'success');
+
+ // استخدام التدفق المباشر بدلاً من polling
+ trackMigrationWithProgress(activeMigration, 'postgres-s3');
+ } else {
+ showNotification(`❌ Failed to start migration: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error starting migration: ${error.message}`, 'error');
+ }
+ setLoading(false);
+}
+
+async function startPsqlToPsqlMigrationHandler() {
+ let sourceUri = psqlToPsqlConfig.source.uri || buildPostgresConnectionString(
+ psqlToPsqlConfig.source.host,
+ psqlToPsqlConfig.source.database,
+ psqlToPsqlConfig.source.user,
+ psqlToPsqlConfig.source.port
+ );
+
+ let destUri = psqlToPsqlConfig.dest.uri || buildPostgresConnectionString(
+ psqlToPsqlConfig.dest.host,
+ psqlToPsqlConfig.dest.database,
+ psqlToPsqlConfig.dest.user,
+ psqlToPsqlConfig.dest.port
+ );
+
+ if (!sourceUri) {
+ showNotification('Please enter source PostgreSQL connection details', 'warning');
+ return;
+ }
+
+ if (!destUri) {
+ showNotification('Please enter destination PostgreSQL connection details', 'warning');
+ return;
+ }
+
+ setLoading(true);
+
+ try {
+ const result = await startPostgresMigration(
+ sourceUri,
+ destUri,
+ psqlToPsqlSelectedSchemas.length > 0 ? psqlToPsqlSelectedSchemas : null,
+ psqlToPsqlSelectedTables.length > 0 ? psqlToPsqlSelectedTables : null
+ );
+
+ if (result.success) {
+ activeMigration = result.migration_id;
+ showNotification(`✅ PostgreSQL migration ${activeMigration} started!`, 'success');
+
+ // استخدام التدفق المباشر بدلاً من polling
+ trackMigrationWithProgress(activeMigration, 'postgres');
+ } else {
+ showNotification(`❌ Failed to start migration: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error starting migration: ${error.message}`, 'error');
+ }
+ setLoading(false);
+}
+
+async function startS3ToS3MigrationHandler() {
+ if (!s3ToS3Config.source.bucket) {
+ showNotification('Please select source bucket', 'warning');
+ return;
+ }
+
+ if (!s3ToS3Config.dest.bucket) {
+ showNotification('Please select destination bucket', 'warning');
+ return;
+ }
+
+ setLoading(true);
+
+ try {
+ const includePatterns = document.getElementById('psqlToPsql_includePatterns')?.value
+ ?.split(',').map(p => p.trim()).filter(p => p) || null;
+ const excludePatterns = document.getElementById('psqlToPsql_excludePatterns')?.value
+ ?.split(',').map(p => p.trim()).filter(p => p) || null;
+
+ const result = await startS3Migration(
+ s3ToS3Config.source.bucket,
+ s3ToS3Config.dest.bucket,
+ '',
+ {
+ sourceAccessKeyId: s3ToS3Config.source.accessKeyId,
+ sourceSecretAccessKey: s3ToS3Config.source.secretAccessKey,
+ sourceRegion: s3ToS3Config.source.region,
+ sourceEndpointUrl: s3ToS3Config.source.endpointUrl,
+ sourceSessionToken: s3ToS3Config.source.sessionToken,
+
+ destAccessKeyId: s3ToS3Config.dest.accessKeyId,
+ destSecretAccessKey: s3ToS3Config.dest.secretAccessKey,
+ destRegion: s3ToS3Config.dest.region,
+ destEndpointUrl: s3ToS3Config.dest.endpointUrl,
+ destSessionToken: s3ToS3Config.dest.sessionToken,
+
+ includePatterns,
+ excludePatterns,
+ preserveMetadata: document.getElementById('psqlToPsql_preserveMetadata')?.checked,
+ storageClass: document.getElementById('psqlToPsql_storageClass')?.value,
+ createDestBucket: document.getElementById('psqlToPsql_createDestBucket')?.checked,
+ maxConcurrent: parseInt(document.getElementById('psqlToPsql_maxConcurrent')?.value) || 5
+ }
+ );
+
+ if (result.success) {
+ activeMigration = result.migration_id;
+ showNotification(`✅ S3 to S3 migration ${activeMigration} started!`, 'success');
+
+ // استخدام التدفق المباشر بدلاً من polling
+ trackMigrationWithProgress(activeMigration, 's3');
+ } else {
+ showNotification(`❌ Failed to start migration: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error starting migration: ${error.message}`, 'error');
+ }
+ setLoading(false);
+}
+ async function injectEnvironmentHandler() {
+ setLoading(true);
+ try {
+ const envVars = {
+ // PSQL to PSQL Source
+ SOURCE_PG_HOST: psqlToPsqlConfig.source.host,
+ SOURCE_PG_PORT: psqlToPsqlConfig.source.port.toString(),
+ SOURCE_PG_USER: psqlToPsqlConfig.source.user,
+ SOURCE_PG_PASSWORD: psqlToPsqlConfig.source.password,
+ SOURCE_PG_DATABASE: psqlToPsqlConfig.source.database,
+
+ // PSQL to PSQL Destination
+ DEST_PG_HOST: psqlToPsqlConfig.dest.host,
+ DEST_PG_PORT: psqlToPsqlConfig.dest.port.toString(),
+ DEST_PG_USER: psqlToPsqlConfig.dest.user,
+ DEST_PG_PASSWORD: psqlToPsqlConfig.dest.password,
+ DEST_PG_DATABASE: psqlToPsqlConfig.dest.database,
+
+ // S3 Source
+ SOURCE_AWS_ACCESS_KEY_ID: s3ToS3Config.source.accessKeyId,
+ SOURCE_AWS_SECRET_ACCESS_KEY: s3ToS3Config.source.secretAccessKey,
+ SOURCE_AWS_REGION: s3ToS3Config.source.region,
+ SOURCE_AWS_ENDPOINT_URL: s3ToS3Config.source.endpointUrl,
+ SOURCE_S3_BUCKET: s3ToS3Config.source.bucket,
+
+ // S3 Destination
+ DEST_AWS_ACCESS_KEY_ID: s3ToS3Config.dest.accessKeyId,
+ DEST_AWS_SECRET_ACCESS_KEY: s3ToS3Config.dest.secretAccessKey,
+ DEST_AWS_REGION: s3ToS3Config.dest.region,
+ DEST_AWS_ENDPOINT_URL: s3ToS3Config.dest.endpointUrl,
+ DEST_S3_BUCKET: s3ToS3Config.dest.bucket
+ };
+
+ const result = await injectEnv(envVars);
+ if (result.success) {
+ showNotification(`✅ Injected ${result.injected_variables?.length || 0} environment variables`, 'success');
+ loadCurrentEnvHandler();
+ } else {
+ showNotification(`❌ Failed to inject environment: ${result.error}`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error injecting environment: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function clearEnvironmentHandler() {
+ try {
+ const result = await injectEnv({});
+ if (result.success) {
+ showNotification('✅ Environment cleared', 'success');
+ loadCurrentEnvHandler();
+ }
+ } catch (error) {
+ showNotification(`❌ Error clearing environment: ${error.message}`, 'error');
+ }
+ }
+
+ async function copyEnvToClipboardHandler() {
+ if (!currentEnv || Object.keys(currentEnv).length === 0) {
+ showNotification('No environment variables to copy', 'warning');
+ return;
+ }
+
+ const format = document.getElementById('envFormat')?.value || 'dotenv';
+ const formatted = formatEnvVars(currentEnv, format);
+ const success = await copyToClipboard(formatted);
+ if (success) {
+ showNotification(`✅ Copied ${Object.keys(currentEnv).length} variables to clipboard`, 'success');
+ } else {
+ showNotification(`❌ Failed to copy to clipboard`, 'error');
+ }
+ }
+
+ // ==================== Security Handlers ====================
+
+ async function clearSessionHandler() {
+ setLoading(true);
+ try {
+ const result = await clearSession();
+ if (result.success) {
+ showNotification(`✅ ${result.message}`, 'success');
+ // Reset all configs
+ psqlToS3Config = { source: { host: '', user: '', password: '', port: 5432, database: '', uri: '' }, s3: { accessKeyId: '', secretAccessKey: '', region: 'us-east-1', endpointUrl: '', bucket: '', prefix: '' }, compress: true, format: 'csv' };
+ psqlToPsqlConfig = { source: { host: '', user: '', password: '', port: 5432, database: '', uri: '' }, dest: { host: '', user: '', password: '', port: 5432, database: '', uri: '' } };
+ s3ToS3Config = { source: { accessKeyId: '', secretAccessKey: '', region: 'us-east-1', endpointUrl: '', sessionToken: '', bucket: 'my-source-bucket' }, dest: { accessKeyId: '', secretAccessKey: '', region: 'us-east-1', endpointUrl: '', sessionToken: '', bucket: 'my-destination-bucket' } };
+ currentEnv = {};
+ envPreview.textContent = '';
+ updateStatusInfo();
+ loadSecurityStatus();
+ } else {
+ showNotification(`❌ Failed to clear session`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error clearing session: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function clearMigrationHandler() {
+ const migrationId = document.getElementById('cleanupMigrationId')?.value;
+ if (!migrationId) {
+ showNotification('Please enter migration ID', 'warning');
+ return;
+ }
+
+ setLoading(true);
+ try {
+ const result = await clearMigration(migrationId);
+ if (result.success) {
+ showNotification(`✅ ${result.message}`, 'success');
+ document.getElementById('cleanupMigrationId').value = '';
+ } else {
+ showNotification(`❌ Failed to clear migration data`, 'error');
+ }
+ } catch (error) {
+ showNotification(`❌ Error clearing migration: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ async function loadSecurityStatus() {
+ try {
+ const result = await getSecurityStatus();
+ if (result.success) {
+ document.getElementById('sessionId').textContent = result.security_status.current_session_id || 'Unknown';
+ document.getElementById('activeSessions').textContent = result.security_status.active_sessions || 0;
+ }
+ } catch (error) {
+ console.error('Error loading security status:', error);
+ }
+ }
+
+ // ==================== Render Functions ====================
+
+ function renderPsqlToS3Schemas(schemas) {
+ const container = document.getElementById('psqlToS3_schemasContainer');
+ container.innerHTML = schemas.map(schema => `
+
+
+
+ `).join('');
+ }
+
+ function renderPsqlToS3Tables(tables) {
+ const container = document.getElementById('psqlToS3_tablesContainer');
+ const groups = groupTablesBySchema(tables);
+
+ let html = '';
+ Object.entries(groups).forEach(([schema, group]) => {
+ html += `
+
+
📚 Schema: ${schema}
+
+ ${group.count} tables
+
+
+
+ `;
+ });
+ container.innerHTML = html;
+ }
+
+ function renderPsqlToPsqlSchemas() {
+ const container = document.getElementById('psqlToPsql_schemasContainer');
+ container.innerHTML = psqlToPsqlSchemas.map(schema => `
+
+
+
+ `).join('');
+
+ document.querySelectorAll('#psqlToPsql_schemasContainer .schema-checkbox').forEach(checkbox => {
+ checkbox.addEventListener('change', (e) => {
+ if (e.target.checked) {
+ psqlToPsqlSelectedSchemas.push(e.target.value);
+ } else {
+ psqlToPsqlSelectedSchemas = psqlToPsqlSelectedSchemas.filter(s => s !== e.target.value);
+ }
+ });
+ });
+ }
+
+ function renderPsqlToPsqlTables() {
+ const container = document.getElementById('psqlToPsql_tablesContainer');
+ const groups = groupTablesBySchema(psqlToPsqlTables);
+
+ let html = '';
+ Object.entries(groups).forEach(([schema, group]) => {
+ html += `
+
+
📚 Schema: ${schema}
+
+ ${group.count} tables
+
+
+
+ `;
+ });
+ container.innerHTML = html;
+
+ document.querySelectorAll('#psqlToPsql_tablesContainer .table-checkbox').forEach(checkbox => {
+ checkbox.addEventListener('change', (e) => {
+ const tableName = e.target.value;
+ if (e.target.checked) {
+ if (!psqlToPsqlSelectedTables.includes(tableName)) {
+ psqlToPsqlSelectedTables.push(tableName);
+ }
+ } else {
+ psqlToPsqlSelectedTables = psqlToPsqlSelectedTables.filter(t => t !== tableName);
+ }
+ });
+ });
+ }
+
+ function renderS3ToS3SourceBuckets() {
+ const container = document.getElementById('s3ToS3_sourceBucketsContainer');
+ container.innerHTML = s3ToS3SourceBuckets.map(bucket => `
+
+
${bucket.name}
+
+ Region: ${bucket.region}
+ Created: ${new Date(bucket.creation_date).toLocaleDateString()}
+ Objects: ${bucket.object_count}
+ Size: ${formatFileSize(bucket.total_size)}
+
+
+
+ `).join('');
+
+ document.querySelectorAll('#s3ToS3_sourceBucketsContainer .select-bucket-btn').forEach(btn => {
+ btn.addEventListener('click', (e) => {
+ const bucketName = e.target.dataset.bucket;
+ s3ToS3Config.source.bucket = bucketName;
+ document.getElementById('s3ToS3_sourceBucket').value = bucketName;
+ updateS3Summaries();
+ showNotification(`✅ Selected source bucket: ${bucketName}`, 'success');
+ document.getElementById('s3ToS3_sourceBucketsList').style.display = 'none';
+ });
+ });
+ }
+
+ function renderS3ToS3DestBuckets() {
+ const container = document.getElementById('s3ToS3_destBucketsContainer');
+ container.innerHTML = s3ToS3DestBuckets.map(bucket => `
+
+
${bucket.name}
+
+ Region: ${bucket.region}
+ Created: ${new Date(bucket.creation_date).toLocaleDateString()}
+ Objects: ${bucket.object_count}
+ Size: ${formatFileSize(bucket.total_size)}
+
+
+
+ `).join('');
+
+ document.querySelectorAll('#s3ToS3_destBucketsContainer .select-bucket-btn').forEach(btn => {
+ btn.addEventListener('click', (e) => {
+ const bucketName = e.target.dataset.bucket;
+ s3ToS3Config.dest.bucket = bucketName;
+ document.getElementById('s3ToS3_destBucket').value = bucketName;
+ updateS3Summaries();
+ showNotification(`✅ Selected destination bucket: ${bucketName}`, 'success');
+ document.getElementById('s3ToS3_destBucketsList').style.display = 'none';
+ });
+ });
+ }
+
+ function renderMigrations() {
+ const container = document.getElementById('migrationsContainer');
+ if (!container) return;
+
+ if (migrations.length === 0) {
+ container.innerHTML = 'No migrations found
';
+ return;
+ }
+
+ container.innerHTML = migrations.map(migration => `
+
+
+
+
Status: ${migration.status}
+
Started: ${migration.started_at ? new Date(migration.started_at * 1000).toLocaleString() : 'N/A'}
+
+
+
+ ${migration.status === 'running' ? `
+
+ ` : ''}
+
+
+ `).join('');
+
+ document.querySelectorAll('.view-logs-btn').forEach(btn => {
+ btn.addEventListener('click', async (e) => {
+ const migrationId = e.target.dataset.id;
+ setActiveMigration(migrationId);
+ });
+ });
+ }
+
+ function renderMigrationLogs() {
+ const container = document.getElementById('logsContainer');
+ if (!container) return;
+
+ if (!migrationLogs || migrationLogs.length === 0) {
+ container.innerHTML = 'No logs available
';
+ return;
+ }
+
+ container.innerHTML = migrationLogs.map(log => `
+
+ ${log.timestamp ? new Date(log.timestamp).toLocaleTimeString() : ''}
+ ${log.message}
+
+ `).join('');
+ }
+
+ function renderEnvConfigCards() {
+ const container = document.getElementById('envConfigCards');
+ if (!container) return;
+
+ container.innerHTML = `
+
+
🐘 PSQL to PSQL Source
+
${formatPostgresConfig(psqlToPsqlConfig.source)}
+
+
+
🐘 PSQL to PSQL Dest
+
${formatPostgresConfig(psqlToPsqlConfig.dest)}
+
+
+
📤 S3 to S3 Source
+
${formatS3Config({
+ endpoint_url: s3ToS3Config.source.endpointUrl,
+ region: s3ToS3Config.source.region,
+ bucket: s3ToS3Config.source.bucket,
+ access_key_id: s3ToS3Config.source.accessKeyId,
+ secret_access_key: s3ToS3Config.source.secretAccessKey
+ }, 'source')}
+
+
+
📥 S3 to S3 Dest
+
${formatS3Config({
+ endpoint_url: s3ToS3Config.dest.endpointUrl,
+ region: s3ToS3Config.dest.region,
+ bucket: s3ToS3Config.dest.bucket,
+ access_key_id: s3ToS3Config.dest.accessKeyId,
+ secret_access_key: s3ToS3Config.dest.secretAccessKey
+ }, 'destination')}
+
+ `;
+ }
+
+ // ==================== Additional Handlers ====================
+
+ async function setActiveMigration(migrationId) {
+ activeMigration = migrationId;
+ setLoading(true);
+ try {
+ let result = await getPostgresMigrationStatus(migrationId);
+ if (!result.success) {
+ result = await getS3MigrationStatus(migrationId);
+ }
+ if (!result.success) {
+ result = await getPgToS3MigrationStatus(migrationId);
+ }
+
+ if (result.success && result.status) {
+ // Switch to environment tab where migrations are shown
+ document.querySelectorAll('.tab-btn').forEach(b => b.classList.remove('active'));
+ document.querySelectorAll('.tab-content').forEach(c => c.classList.remove('active'));
+ document.querySelector('[data-tab="environment"]').classList.add('active');
+ document.getElementById('tab-environment').classList.add('active');
+
+ if (result.status.logs && Array.isArray(result.status.logs)) {
+ migrationLogs = result.status.logs;
+ renderMigrationLogs();
+ document.getElementById('migrationLogs').style.display = 'block';
+ }
+
+ showNotification(`✅ Loaded logs for migration ${migrationId}`, 'success');
+ }
+ } catch (error) {
+ console.error('Error fetching migration logs:', error);
+ showNotification(`❌ Error loading migration logs: ${error.message}`, 'error');
+ }
+ setLoading(false);
+ }
+
+ // ==================== Initialize ====================
+ function init() {
+ setupEventListeners();
+
+ // Set initial summaries
+ updatePostgresSummaries();
+ updateS3Summaries();
+
+ // Load initial data
+ loadCurrentEnvHandler();
+ loadSecurityStatus();
+ updateStatusInfo();
+ }
+
+ init();
+}
+
+// ==================== Start the app ====================
+document.addEventListener("DOMContentLoaded", App);
\ No newline at end of file
diff --git a/migrator.py b/migrator.py
new file mode 100644
index 0000000..06c55fa
--- /dev/null
+++ b/migrator.py
@@ -0,0 +1,4500 @@
+#!/usr/bin/env python3
+"""
+migrator_unified.py - Unified Migration Engine
+Combined migrator for PostgreSQL, S3 to S3, and PostgreSQL to S3 migrations
+"""
+
+import subprocess
+import signal
+import json
+import os
+import sys
+import time
+import threading
+import hashlib
+import io
+import csv
+import gzip
+from datetime import datetime
+from urllib.parse import urlparse
+import boto3
+from botocore.exceptions import ClientError, NoCredentialsError, EndpointConnectionError
+
+# ============================================================================
+# Part 1: PostgreSQL Migrator (PostgresMigrator)
+# ============================================================================
+
+class PostgresMigrator:
+ """PostgreSQL to PostgreSQL migration engine"""
+
+ def __init__(self):
+ self.migrations = {}
+ self._lock = threading.Lock()
+
+ def parse_postgres_uri(self, uri):
+ """Parse PostgreSQL URI and extract connection parameters"""
+ try:
+ if not uri.startswith(('postgresql://', 'postgres://')):
+ uri = 'postgresql://' + uri
+
+ parsed = urlparse(uri)
+
+ return {
+ 'host': parsed.hostname,
+ 'port': parsed.port or 5432,
+ 'user': parsed.username or '',
+ 'password': parsed.password or '',
+ 'database': parsed.path.lstrip('/') if parsed.path else 'postgres',
+ 'uri': uri
+ }
+ except Exception as e:
+ return None
+ def get_live_migration_progress(self, migration_id):
+ """الحصول على التقدم المباشر للترحيل مع تفاصيل دقيقة"""
+ with self._lock:
+ if migration_id not in self.migrations:
+ return {'success': False, 'error': 'Migration not found'}
+
+ mig_data = self.migrations[migration_id]
+
+ # إذا كان الترحيل مكتملاً
+ if mig_data.get('status') != 'running':
+ return self._get_detailed_completed_progress(migration_id)
+
+ # إذا كان هناك متتبع تقدم نشط
+ if 'progress_tracker' in mig_data:
+ return self._get_live_tracker_progress(migration_id)
+
+ # إذا كان الترحيل قيد التشغيل ولكن بدون متتبع (تقديري)
+ return self._get_estimated_live_progress(migration_id)
+
+ def _get_live_tracker_progress(self, migration_id):
+ """الحصول على التقدم من المتتبع المباشر"""
+ mig_data = self.migrations[migration_id]
+ tracker = mig_data['progress_tracker']
+
+ current_time = time.time()
+ elapsed_time = current_time - tracker['start_time']
+
+ # حساب النسب المئوية
+ tables_percentage = (tracker['processed_tables'] / tracker['total_tables'] * 100) if tracker['total_tables'] > 0 else 0
+ size_percentage = (tracker['processed_size'] / tracker['total_size'] * 100) if tracker['total_size'] > 0 else 0
+ rows_percentage = (tracker['processed_rows'] / tracker['total_rows'] * 100) if tracker['total_rows'] > 0 else 0
+
+ # حساب السرعة الحالية
+ recent_samples = [s for s in tracker.get('speed_samples', []) if current_time - s['time'] < 10]
+ if recent_samples:
+ recent_size = sum(s['size'] for s in recent_samples)
+ recent_time = recent_samples[-1]['time'] - recent_samples[0]['time'] if len(recent_samples) > 1 else 1
+ current_speed = recent_size / recent_time if recent_time > 0 else 0
+ else:
+ current_speed = tracker['processed_size'] / elapsed_time if elapsed_time > 0 else 0
+
+ # ETA بناءً على السرعة الحالية
+ remaining_size = tracker['total_size'] - tracker['processed_size'] - tracker.get('failed_size', 0)
+ eta_seconds = remaining_size / current_speed if current_speed > 0 else 0
+
+ # تفاصيل الجدول الحالي
+ current_table_info = None
+ if tracker.get('current_table'):
+ current_table_info = {
+ 'name': tracker['current_table'],
+ 'size': tracker.get('current_table_size', 0),
+ 'size_formatted': self._format_size(tracker.get('current_table_size', 0)),
+ 'rows_total': tracker.get('current_table_rows', 0),
+ 'rows_processed': tracker.get('current_table_rows_processed', 0),
+ 'rows_percentage': round((tracker.get('current_table_rows_processed', 0) / tracker.get('current_table_rows', 1)) * 100, 2) if tracker.get('current_table_rows', 0) > 0 else 0,
+ 'estimated_remaining': tracker.get('current_table_remaining_time', 0)
+ }
+
+ return {
+ 'success': True,
+ 'migration_id': migration_id,
+ 'status': 'running',
+ 'type': 'live',
+ 'timestamp': current_time,
+
+ # إحصائيات عامة
+ 'total': {
+ 'tables': tracker['total_tables'],
+ 'size': tracker['total_size'],
+ 'size_formatted': self._format_size(tracker['total_size']),
+ 'rows': tracker.get('total_rows', 0)
+ },
+
+ # إحصائيات منجزة
+ 'processed': {
+ 'tables': tracker['processed_tables'],
+ 'size': tracker['processed_size'],
+ 'size_formatted': self._format_size(tracker['processed_size']),
+ 'rows': tracker.get('processed_rows', 0)
+ },
+
+ # إحصائيات فاشلة
+ 'failed': {
+ 'tables': tracker.get('failed_tables', 0),
+ 'size': tracker.get('failed_size', 0),
+ 'size_formatted': self._format_size(tracker.get('failed_size', 0)),
+ 'rows': tracker.get('failed_rows', 0)
+ },
+
+ # إحصائيات متبقية
+ 'remaining': {
+ 'tables': tracker['total_tables'] - tracker['processed_tables'] - tracker.get('failed_tables', 0),
+ 'size': tracker['total_size'] - tracker['processed_size'] - tracker.get('failed_size', 0),
+ 'size_formatted': self._format_size(tracker['total_size'] - tracker['processed_size'] - tracker.get('failed_size', 0))
+ },
+
+ # النسب المئوية
+ 'percentages': {
+ 'tables': round(tables_percentage, 2),
+ 'size': round(size_percentage, 2),
+ 'rows': round(rows_percentage, 2)
+ },
+
+ # معلومات الوقت والسرعة
+ 'time': {
+ 'elapsed': elapsed_time,
+ 'elapsed_formatted': self._format_time(elapsed_time),
+ 'eta': eta_seconds,
+ 'eta_formatted': self._format_time(eta_seconds),
+ 'estimated_completion': current_time + eta_seconds if eta_seconds > 0 else None
+ },
+
+ 'speed': {
+ 'current': current_speed,
+ 'current_formatted': f"{self._format_size(current_speed)}/s",
+ 'average': tracker['processed_size'] / elapsed_time if elapsed_time > 0 else 0,
+ 'average_formatted': f"{self._format_size(tracker['processed_size'] / elapsed_time if elapsed_time > 0 else 0)}/s",
+ 'peak': max((s['size'] for s in tracker.get('speed_samples', [])), default=0),
+ 'peak_formatted': self._format_size(max((s['size'] for s in tracker.get('speed_samples', [])), default=0))
+ },
+
+ # معلومات الجدول الحالي
+ 'current_table': current_table_info,
+
+ # آخر 5 جداول منجزة
+ 'recent_tables': tracker.get('tables_details', [])[-5:],
+
+ # أشرطة التقدم
+ 'progress_bars': {
+ 'tables': self._format_progress_bar(tables_percentage),
+ 'size': self._format_progress_bar(size_percentage),
+ 'rows': self._format_progress_bar(rows_percentage) if tracker.get('total_rows', 0) > 0 else None
+ }
+ }
+
+ def _get_estimated_live_progress(self, migration_id):
+ """تقدير التقدم للترحيل الجاري بدون متتبع"""
+ mig_data = self.migrations[migration_id]
+ elapsed_time = time.time() - mig_data['started_at']
+
+ # محاولة الحصول على تقديرات أفضل
+ estimated_total_tables = mig_data.get('estimated_tables', 10)
+ estimated_total_size = mig_data.get('estimated_size', 100 * 1024 * 1024)
+
+ # تقدير التقدم بناءً على وقت pg_dump المعتاد
+ # pg_dump عادة يستغرق 1-5 دقائق لكل GB حسب السرعة
+ estimated_duration = estimated_total_size / (5 * 1024 * 1024) # 5 MB/s كتقدير
+ progress_percentage = min((elapsed_time / estimated_duration) * 100, 99) if estimated_duration > 0 else 0
+
+ tables_migrated = int(estimated_total_tables * progress_percentage / 100)
+ size_migrated = int(estimated_total_size * progress_percentage / 100)
+
+ return {
+ 'success': True,
+ 'migration_id': migration_id,
+ 'status': 'running',
+ 'type': 'estimated',
+ 'timestamp': time.time(),
+ 'estimated': True,
+
+ 'total': {
+ 'tables': estimated_total_tables,
+ 'size': estimated_total_size,
+ 'size_formatted': self._format_size(estimated_total_size)
+ },
+
+ 'processed': {
+ 'tables': tables_migrated,
+ 'size': size_migrated,
+ 'size_formatted': self._format_size(size_migrated)
+ },
+
+ 'percentages': {
+ 'tables': round(progress_percentage, 2),
+ 'size': round(progress_percentage, 2)
+ },
+
+ 'time': {
+ 'elapsed': elapsed_time,
+ 'elapsed_formatted': self._format_time(elapsed_time)
+ },
+
+ 'progress_bars': {
+ 'main': self._format_progress_bar(progress_percentage)
+ },
+
+ 'note': 'This is an estimated progress. For accurate tracking, use migrate_tables_individually()'
+ }
+
+ def _get_detailed_completed_progress(self, migration_id):
+ """الحصول على تفاصيل دقيقة للترحيل المكتمل"""
+ mig_data = self.migrations[migration_id]
+
+ if 'success' not in mig_data:
+ return {'success': False, 'error': 'Migration not completed'}
+
+ stats = mig_data.get('stats', {})
+ execution_time = mig_data.get('execution_time', 0)
+
+ # إذا كان هناك متتبع تقدم، استخدم بياناته
+ if 'progress_tracker' in mig_data:
+ tracker = mig_data['progress_tracker']
+ total_size = tracker.get('total_size', 0)
+ processed_size = tracker.get('processed_size', 0)
+ total_tables = tracker.get('total_tables', 0)
+ processed_tables = tracker.get('processed_tables', 0)
+ tables_details = tracker.get('tables_details', [])
+ else:
+ # استخدام التقديرات
+ total_tables = stats.get('tables_created', 0) + stats.get('tables_altered', 0)
+ processed_tables = stats.get('tables_created', 0)
+ estimated_size_per_table = 10 * 1024 * 1024
+ total_size = total_tables * estimated_size_per_table
+ processed_size = processed_tables * estimated_size_per_table
+ tables_details = []
+
+ return {
+ 'success': True,
+ 'migration_id': migration_id,
+ 'status': 'completed' if mig_data.get('success') else 'failed',
+ 'type': 'completed',
+ 'timestamp': time.time(),
+
+ 'total': {
+ 'tables': total_tables,
+ 'size': total_size,
+ 'size_formatted': self._format_size(total_size)
+ },
+
+ 'processed': {
+ 'tables': processed_tables,
+ 'size': processed_size,
+ 'size_formatted': self._format_size(processed_size),
+ 'tables_created': stats.get('tables_created', 0),
+ 'tables_altered': stats.get('tables_altered', 0),
+ 'indexes_created': stats.get('indexes_created', 0)
+ },
+
+ 'percentages': {
+ 'tables': 100.0 if processed_tables == total_tables else round((processed_tables / total_tables * 100) if total_tables > 0 else 0, 2),
+ 'size': 100.0 if processed_size == total_size else round((processed_size / total_size * 100) if total_size > 0 else 0, 2)
+ },
+
+ 'time': {
+ 'total': execution_time,
+ 'total_formatted': self._format_time(execution_time),
+ 'average_speed': processed_size / execution_time if execution_time > 0 else 0,
+ 'average_speed_formatted': f"{self._format_size(processed_size / execution_time if execution_time > 0 else 0)}/s"
+ },
+
+ 'tables_details': tables_details[-10:], # آخر 10 جداول
+
+ 'error': mig_data.get('error') if not mig_data.get('success') else None
+ }
+
+ def migrate_tables_individually(self, migration_id, source_uri, dest_uri, schemas=None, tables=None):
+ """ترحيل الجداول بشكل فردي مع تتبع دقيق للتقدم"""
+
+ # تهيئة متتبع التقدم
+ self._init_detailed_tracker(migration_id, source_uri, schemas, tables)
+
+ def migration_task():
+ try:
+ result = self._run_individual_migration(migration_id, source_uri, dest_uri, schemas, tables)
+ with self._lock:
+ self.migrations[migration_id].update(result)
+ self.migrations[migration_id]['status'] = 'completed' if result['success'] else 'failed'
+ self.migrations[migration_id]['completed_at'] = time.time()
+ except Exception as e:
+ with self._lock:
+ self.migrations[migration_id]['status'] = 'failed'
+ self.migrations[migration_id]['error'] = str(e)
+
+ thread = threading.Thread(target=migration_task, daemon=True)
+ thread.start()
+
+ return migration_id
+
+ def _init_detailed_tracker(self, migration_id, source_uri, schemas=None, tables=None):
+ """تهيئة متتبع تفصيلي مع بيانات دقيقة"""
+
+ # الحصول على قائمة الجداول وأحجامها
+ tables_list = []
+ total_size = 0
+ total_rows = 0
+
+ if tables:
+ for table in tables:
+ if '.' in table:
+ schema, name = table.split('.', 1)
+ else:
+ schema = 'public'
+ name = table
+
+ size = self._get_table_size(source_uri, schema, name) or 0
+ rows = self._get_table_row_count(source_uri, schema, name) or 0
+
+ tables_list.append({
+ 'schema': schema,
+ 'name': name,
+ 'size': size,
+ 'rows': rows
+ })
+ total_size += size
+ total_rows += rows
+
+ elif schemas:
+ for schema in schemas:
+ result = self.get_tables(source_uri, schema)
+ if result['success']:
+ for table_info in result['tables']:
+ size = self._get_table_size(source_uri, schema, table_info['name']) or 0
+ rows = self._get_table_row_count(source_uri, schema, table_info['name']) or 0
+
+ tables_list.append({
+ 'schema': schema,
+ 'name': table_info['name'],
+ 'size': size,
+ 'rows': rows
+ })
+ total_size += size
+ total_rows += rows
+
+ else:
+ result = self.get_tables(source_uri)
+ if result['success']:
+ for table_info in result['tables']:
+ size = self._get_table_size(source_uri, table_info['schema'], table_info['name']) or 0
+ rows = self._get_table_row_count(source_uri, table_info['schema'], table_info['name']) or 0
+
+ tables_list.append({
+ 'schema': table_info['schema'],
+ 'name': table_info['name'],
+ 'size': size,
+ 'rows': rows
+ })
+ total_size += size
+ total_rows += rows
+
+ with self._lock:
+ self.migrations[migration_id] = {
+ 'status': 'running',
+ 'started_at': time.time(),
+ 'source': source_uri,
+ 'destination': dest_uri,
+ 'schemas': schemas,
+ 'tables': tables,
+ 'progress_tracker': {
+ 'total_tables': len(tables_list),
+ 'total_size': total_size,
+ 'total_rows': total_rows,
+ 'processed_tables': 0,
+ 'processed_size': 0,
+ 'processed_rows': 0,
+ 'failed_tables': 0,
+ 'failed_size': 0,
+ 'failed_rows': 0,
+ 'current_table': None,
+ 'current_table_size': 0,
+ 'current_table_rows': 0,
+ 'current_table_rows_processed': 0,
+ 'current_table_start_time': None,
+ 'start_time': time.time(),
+ 'last_update': time.time(),
+ 'tables_list': tables_list,
+ 'tables_details': [],
+ 'speed_samples': []
+ }
+ }
+
+ def _run_individual_migration(self, migration_id, source_uri, dest_uri, schemas=None, tables=None):
+ """تشغيل الترحيل بشكل فردي مع تتبع كل جدول"""
+
+ logs = []
+ successful_tables = []
+ failed_tables = []
+
+ with self._lock:
+ tracker = self.migrations[migration_id]['progress_tracker']
+ tables_list = tracker['tables_list']
+
+ for i, table_info in enumerate(tables_list, 1):
+ table_name = f"{table_info['schema']}.{table_info['name']}"
+
+ # تحديث معلومات الجدول الحالي
+ with self._lock:
+ tracker = self.migrations[migration_id]['progress_tracker']
+ tracker['current_table'] = table_name
+ tracker['current_table_size'] = table_info['size']
+ tracker['current_table_rows'] = table_info['rows']
+ tracker['current_table_rows_processed'] = 0
+ tracker['current_table_start_time'] = time.time()
+
+ print(f"[{migration_id}] 📊 Migrating table {i}/{len(tables_list)}: {table_name} ({self._format_size(table_info['size'])})")
+
+ # ترحيل الجدول مع تتبع الصفوف
+ table_result = self._migrate_table_with_row_tracking(
+ migration_id, source_uri, dest_uri,
+ table_info['schema'], table_info['name']
+ )
+
+ if table_result['success']:
+ # تحديث الإحصائيات
+ with self._lock:
+ tracker = self.migrations[migration_id]['progress_tracker']
+ tracker['processed_tables'] += 1
+ tracker['processed_size'] += table_info['size']
+ tracker['processed_rows'] += table_result.get('rows_migrated', 0)
+
+ # إضافة تفاصيل الجدول
+ tracker['tables_details'].append({
+ 'name': table_name,
+ 'size': table_info['size'],
+ 'size_formatted': self._format_size(table_info['size']),
+ 'rows': table_result.get('rows_migrated', 0),
+ 'time': table_result.get('migration_time', 0),
+ 'time_formatted': self._format_time(table_result.get('migration_time', 0)),
+ 'speed': table_result.get('speed', 0),
+ 'speed_formatted': f"{self._format_size(table_result.get('speed', 0))}/s",
+ 'timestamp': time.time()
+ })
+
+ # إضافة عينة سرعة
+ tracker['speed_samples'].append({
+ 'time': time.time(),
+ 'size': table_info['size'],
+ 'tables': 1
+ })
+
+ successful_tables.append(table_name)
+ print(f"[{migration_id}] ✅ Completed {table_name} in {self._format_time(table_result.get('migration_time', 0))}")
+
+ else:
+ # تحديث الإحصائيات الفاشلة
+ with self._lock:
+ tracker = self.migrations[migration_id]['progress_tracker']
+ tracker['failed_tables'] += 1
+ tracker['failed_size'] += table_info['size']
+
+ failed_tables.append({
+ 'table': table_name,
+ 'error': table_result.get('error')
+ })
+ print(f"[{migration_id}] ❌ Failed {table_name}: {table_result.get('error')}")
+
+ execution_time = time.time() - self.migrations[migration_id]['started_at']
+
+ return {
+ 'success': len(failed_tables) == 0,
+ 'migration_id': migration_id,
+ 'execution_time': execution_time,
+ 'stats': {
+ 'tables_created': len(successful_tables),
+ 'tables_failed': len(failed_tables),
+ 'total_tables': len(tables_list),
+ 'total_size': sum(t['size'] for t in tables_list)
+ },
+ 'successful_tables': successful_tables,
+ 'failed_tables': failed_tables,
+ 'logs': logs
+ }
+
+ def _migrate_table_with_row_tracking(self, migration_id, source_uri, dest_uri, schema, table):
+ """ترحيل جدول مع تتبع عدد الصفوف المنقولة"""
+ try:
+ start_time = time.time()
+
+ # الحصول على إجمالي الصفوف
+ total_rows = self._get_table_row_count(source_uri, schema, table) or 0
+
+ # استخدام COPY مع تتبع التقدم (يتطلب تعديل حسب الإمكانيات)
+ # هذا مثال مبسط - في الواقع قد تحتاج لاستخدام COPY مع callback
+
+ # تنفيذ أمر pg_dump للجدول الواحد
+ dump_cmd = [
+ 'pg_dump', '--dbname', source_uri,
+ '-t', f'"{schema}"."{table}"',
+ '--data-only', '--inserts'
+ ]
+
+ psql_cmd = ['psql', '--dbname', dest_uri]
+
+ full_cmd = ' '.join(dump_cmd) + ' | ' + ' '.join(psql_cmd)
+
+ result = subprocess.run(full_cmd, shell=True, capture_output=True, text=True, timeout=300)
+
+ migration_time = time.time() - start_time
+
+ if result.returncode == 0:
+ # تقدير عدد الصفوف المنقولة (يمكن تحسينه)
+ rows_migrated = total_rows
+
+ return {
+ 'success': True,
+ 'rows_migrated': rows_migrated,
+ 'migration_time': migration_time,
+ 'speed': (table_size or 0) / migration_time if migration_time > 0 else 0
+ }
+ else:
+ return {
+ 'success': False,
+ 'error': result.stderr[:500]
+ }
+
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ def _get_table_row_count(self, uri, schema, table):
+ """الحصول على عدد صفوف الجدول"""
+ try:
+ parsed = self.parse_postgres_uri(uri)
+ if not parsed:
+ return None
+
+ env = os.environ.copy()
+ env['PGPASSWORD'] = parsed['password']
+
+ sql = f'SELECT COUNT(*) FROM "{schema}"."{table}";'
+
+ cmd = [
+ 'psql',
+ '-h', parsed['host'],
+ '-p', str(parsed['port']),
+ '-U', parsed['user'],
+ '-d', parsed['database'],
+ '-t',
+ '-c', sql
+ ]
+
+ result = subprocess.run(cmd, env=env, capture_output=True, text=True, timeout=30)
+
+ if result.returncode == 0:
+ count_line = result.stdout.strip()
+ if count_line and count_line.isdigit():
+ return int(count_line)
+
+ return None
+
+ except Exception:
+ return None
+
+ def format_live_status(self, migration_id):
+ """تنسيق الحالة المباشرة كنص مقروء"""
+ progress = self.get_live_migration_progress(migration_id)
+
+ if not progress.get('success', True):
+ return f"❌ Error: {progress.get('error', 'Unknown error')}"
+
+ lines = []
+ lines.append("=" * 60)
+ lines.append(f"🚀 Migration {migration_id} - {progress['status'].upper()}")
+ lines.append("=" * 60)
+
+ if progress['type'] == 'estimated':
+ lines.append("⚠️ ESTIMATED PROGRESS (not real-time)")
+
+ # شريط التقدم الرئيسي
+ if 'progress_bars' in progress:
+ if 'size' in progress['progress_bars']:
+ lines.append(f"\n📊 Overall Progress: {progress['progress_bars']['size']}")
+ elif 'main' in progress['progress_bars']:
+ lines.append(f"\n📊 Progress: {progress['progress_bars']['main']}")
+
+ # إحصائيات الملفات
+ lines.append(f"\n📦 Tables:")
+ lines.append(f" Total: {progress['total']['tables']} tables")
+ lines.append(f" Processed: {progress['processed']['tables']} tables ({progress['percentages']['tables']}%)")
+ if progress.get('failed', {}).get('tables', 0) > 0:
+ lines.append(f" ❌ Failed: {progress['failed']['tables']} tables")
+
+ # إحصائيات الحجم
+ lines.append(f"\n💾 Size:")
+ lines.append(f" Total: {progress['total']['size_formatted']}")
+ lines.append(f" Transferred: {progress['processed']['size_formatted']} ({progress['percentages']['size']}%)")
+ if progress.get('remaining', {}).get('size', 0) > 0:
+ lines.append(f" Remaining: {progress['remaining']['size_formatted']}")
+
+ # معلومات السرعة والوقت
+ if 'time' in progress:
+ lines.append(f"\n⏱️ Time:")
+ lines.append(f" Elapsed: {progress['time']['elapsed_formatted']}")
+ if 'eta_formatted' in progress['time']:
+ lines.append(f" ETA: {progress['time']['eta_formatted']}")
+
+ if 'speed' in progress:
+ lines.append(f"\n⚡ Speed:")
+ lines.append(f" Current: {progress['speed']['current_formatted']}")
+ lines.append(f" Average: {progress['speed']['average_formatted']}")
+ if progress['speed']['peak'] > 0:
+ lines.append(f" Peak: {progress['speed']['peak_formatted']}")
+
+ # الجدول الحالي
+ if progress.get('current_table'):
+ ct = progress['current_table']
+ lines.append(f"\n📄 Current Table: {ct['name']}")
+ lines.append(f" Size: {ct['size_formatted']}")
+ if ct.get('rows_total', 0) > 0:
+ lines.append(f" Rows: {ct['rows_processed']}/{ct['rows_total']} ({ct['rows_percentage']}%)")
+
+ # آخر الجداول المنجزة
+ if progress.get('recent_tables'):
+ lines.append(f"\n✅ Recently Completed:")
+ for table in progress['recent_tables'][-3:]: # آخر 3 جداول
+ lines.append(f" • {table['name']} - {table['size_formatted']} in {table.get('time_formatted', 'N/A')}")
+
+ lines.append("\n" + "=" * 60)
+
+ return '\n'.join(lines)
+ def calculate_migration_progress(self, migration_id):
+ """حساب نسبة التقدم في الترحيل بناءً على عدد وحجم الجداول"""
+ with self._lock:
+ if migration_id not in self.migrations:
+ return {'success': False, 'error': 'Migration not found'}
+
+ mig_data = self.migrations[migration_id]
+
+ if mig_data.get('status') == 'running':
+ # حساب التقديرات للترحيل الجاري
+ return self._estimate_running_progress(migration_id)
+ else:
+ # إحصائيات للترحيل المكتمل
+ return self._get_completed_stats(migration_id)
+
+ def _estimate_running_progress(self, migration_id):
+ """تقدير تقدم الترحيل الجاري"""
+ mig_data = self.migrations[migration_id]
+ elapsed_time = time.time() - mig_data['started_at']
+
+ # تقدير عدد الجداول (إذا كانت متوفرة)
+ estimated_total_tables = mig_data.get('estimated_tables', 10)
+ estimated_total_size = mig_data.get('estimated_size', 100 * 1024 * 1024) # 100 MB افتراضي
+
+ # حساب التقدم بناءً على الوقت المنقضي (تقدير تقريبي)
+ # في الواقع العملي، ستحتاج لمراقبة خرج pg_dump
+ progress_percentage = min(elapsed_time / 60 * 10, 95) # 10% لكل دقيقة حتى 95%
+
+ return {
+ 'migration_id': migration_id,
+ 'status': 'running',
+ 'elapsed_time': elapsed_time,
+ 'elapsed_time_formatted': self._format_time(elapsed_time),
+ 'progress_percentage': round(progress_percentage, 2),
+ 'estimated': True,
+ 'estimated_total_tables': estimated_total_tables,
+ 'estimated_total_size': estimated_total_size,
+ 'estimated_total_size_formatted': self._format_size(estimated_total_size),
+ 'tables_migrated': int(estimated_total_tables * progress_percentage / 100),
+ 'size_migrated': int(estimated_total_size * progress_percentage / 100),
+ 'size_migrated_formatted': self._format_size(int(estimated_total_size * progress_percentage / 100))
+ }
+
+ def _get_completed_stats(self, migration_id):
+ """الحصول على إحصائيات الترحيل المكتمل"""
+ mig_data = self.migrations[migration_id]
+
+ if not isinstance(mig_data, dict) or 'success' not in mig_data:
+ return {'success': False, 'error': 'Migration not completed'}
+
+ execution_time = mig_data.get('execution_time', 0)
+ stats = mig_data.get('stats', {})
+
+ # حساب عدد الجداول المنقولة
+ tables_created = stats.get('tables_created', 0)
+ tables_altered = stats.get('tables_altered', 0)
+ indexes_created = stats.get('indexes_created', 0)
+
+ # تقدير حجم البيانات المنقولة (صعب حسابه بدقة مع pg_dump)
+ # نستخدم عدد الجداول كمؤشر تقريبي
+ estimated_size_per_table = 10 * 1024 * 1024 # 10 MB لكل جدول تقديري
+ estimated_total_size = tables_created * estimated_size_per_table
+
+ return {
+ 'migration_id': migration_id,
+ 'status': 'completed' if mig_data.get('success') else 'failed',
+ 'success': mig_data.get('success', False),
+ 'execution_time': execution_time,
+ 'execution_time_formatted': self._format_time(execution_time),
+ 'progress_percentage': 100.0 if mig_data.get('success') else 0,
+ 'tables_created': tables_created,
+ 'tables_altered': tables_altered,
+ 'indexes_created': indexes_created,
+ 'total_tables_affected': tables_created + tables_altered,
+ 'estimated_size_migrated': estimated_total_size,
+ 'estimated_size_migrated_formatted': self._format_size(estimated_total_size),
+ 'error': mig_data.get('error') if not mig_data.get('success') else None
+ }
+
+ def track_table_migration(self, table_name, total_rows=None, rows_processed=None):
+ """تتبع ترحيل جدول معين (للاستخدام مع ترحيل مخصص)"""
+ # هذه الدالة يمكن استخدامها إذا كنت تقوم بترحيل الجداول بشكل فردي
+ tracker_key = f"table_tracker_{table_name}"
+
+ if not hasattr(self, '_table_trackers'):
+ self._table_trackers = {}
+
+ if tracker_key not in self._table_trackers:
+ self._table_trackers[tracker_key] = {
+ 'table_name': table_name,
+ 'total_rows': total_rows,
+ 'rows_processed': 0,
+ 'start_time': time.time(),
+ 'status': 'in_progress'
+ }
+
+ tracker = self._table_trackers[tracker_key]
+
+ if rows_processed is not None:
+ tracker['rows_processed'] = rows_processed
+
+ # حساب التقدم
+ if tracker['total_rows'] and tracker['total_rows'] > 0:
+ progress = (tracker['rows_processed'] / tracker['total_rows']) * 100
+ else:
+ progress = 0
+
+ elapsed_time = time.time() - tracker['start_time']
+
+ return {
+ 'table_name': table_name,
+ 'total_rows': tracker['total_rows'],
+ 'rows_processed': tracker['rows_processed'],
+ 'progress_percentage': round(progress, 2),
+ 'elapsed_time': elapsed_time,
+ 'elapsed_time_formatted': self._format_time(elapsed_time),
+ 'status': tracker['status']
+ }
+
+ def estimate_migration_size(self, source_uri, schemas=None, tables=None):
+ """تقدير حجم الترحيل قبل البدء"""
+ try:
+ total_size = 0
+ table_count = 0
+ table_details = []
+
+ if tables:
+ # حساب حجم الجداول المحددة
+ for table in tables:
+ if '.' in table:
+ schema, table_name = table.split('.', 1)
+ else:
+ schema = 'public'
+ table_name = table
+
+ # الحصول على حجم الجدول من PostgreSQL
+ size = self._get_table_size(source_uri, schema, table_name)
+ if size:
+ total_size += size
+ table_count += 1
+ table_details.append({
+ 'table': f"{schema}.{table_name}",
+ 'size': size,
+ 'size_formatted': self._format_size(size)
+ })
+
+ elif schemas:
+ # حساب حجم جميع الجداول في المخططات المحددة
+ for schema in schemas:
+ tables_result = self.get_tables(source_uri, schema)
+ if tables_result['success']:
+ for table_info in tables_result['tables']:
+ size = self._get_table_size(source_uri, schema, table_info['name'])
+ if size:
+ total_size += size
+ table_count += 1
+ table_details.append({
+ 'table': f"{schema}.{table_info['name']}",
+ 'size': size,
+ 'size_formatted': self._format_size(size)
+ })
+ else:
+ # حساب حجم جميع الجداول
+ tables_result = self.get_tables(source_uri)
+ if tables_result['success']:
+ for table_info in tables_result['tables']:
+ size = self._get_table_size(source_uri, table_info['schema'], table_info['name'])
+ if size:
+ total_size += size
+ table_count += 1
+ table_details.append({
+ 'table': f"{table_info['schema']}.{table_info['name']}",
+ 'size': size,
+ 'size_formatted': self._format_size(size)
+ })
+
+ return {
+ 'success': True,
+ 'total_size': total_size,
+ 'total_size_formatted': self._format_size(total_size),
+ 'table_count': table_count,
+ 'table_details': table_details[:10], # أول 10 جداول فقط
+ 'estimated_transfer_time': self._estimate_transfer_time(total_size)
+ }
+
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ def _get_table_size(self, uri, schema, table_name):
+ """الحصول على حجم جدول معين من PostgreSQL"""
+ try:
+ parsed = self.parse_postgres_uri(uri)
+ if not parsed:
+ return None
+
+ env = os.environ.copy()
+ env['PGPASSWORD'] = parsed['password']
+
+ # استعلام لجلب حجم الجدول
+ sql = f"""
+ SELECT pg_total_relation_size('"{schema}"."{table_name}"') as size;
+ """
+
+ cmd = [
+ 'psql',
+ '-h', parsed['host'],
+ '-p', str(parsed['port']),
+ '-U', parsed['user'],
+ '-d', parsed['database'],
+ '-t',
+ '-c', sql
+ ]
+
+ result = subprocess.run(cmd, env=env, capture_output=True, text=True, timeout=10)
+
+ if result.returncode == 0:
+ size_line = result.stdout.strip()
+ if size_line and size_line.isdigit():
+ return int(size_line)
+
+ return None
+
+ except Exception:
+ return None
+
+ def _estimate_transfer_time(self, size_bytes):
+ """تقدير وقت النقل بناءً على حجم البيانات"""
+ # سرعات تقديرية للشبكة
+ speeds = {
+ 'slow': 1024 * 1024, # 1 MB/s
+ 'average': 5 * 1024 * 1024, # 5 MB/s
+ 'fast': 20 * 1024 * 1024, # 20 MB/s
+ 'very_fast': 100 * 1024 * 1024 # 100 MB/s
+ }
+
+ estimates = {}
+ for speed_name, speed_bps in speeds.items():
+ seconds = size_bytes / speed_bps if speed_bps > 0 else 0
+ estimates[speed_name] = {
+ 'seconds': seconds,
+ 'formatted': self._format_time(seconds)
+ }
+
+ return estimates
+
+ def get_migration_detailed_progress(self, migration_id):
+ """الحصول على تقدم مفصل للترحيل"""
+ progress = self.calculate_migration_progress(migration_id)
+
+ if not progress.get('success', True):
+ return progress
+
+ # إضافة معلومات إضافية
+ with self._lock:
+ if migration_id in self.migrations:
+ mig_data = self.migrations[migration_id]
+ progress['source'] = mig_data.get('source', 'Unknown')
+ progress['destination'] = mig_data.get('destination', 'Unknown')
+ progress['started_at'] = mig_data.get('started_at')
+
+ if 'schemas' in mig_data:
+ progress['selected_schemas'] = mig_data['schemas']
+ if 'tables' in mig_data:
+ progress['selected_tables'] = mig_data['tables']
+
+ # إضافة شريط تقدم نصي
+ percentage = progress.get('progress_percentage', 0)
+ progress['progress_bar'] = self._format_progress_bar(percentage)
+
+ return progress
+
+ def _format_progress_bar(self, percentage, width=30):
+ """تنسيق شريط التقدم كنص"""
+ filled = int(width * percentage / 100)
+ empty = width - filled
+ bar = '█' * filled + '░' * empty
+ return f"[{bar}] {percentage:.1f}%"
+
+ def _format_size(self, size_bytes):
+ """تنسيق حجم الملف"""
+ if size_bytes == 0:
+ return '0 B'
+
+ size_names = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
+ i = 0
+ while size_bytes >= 1024 and i < len(size_names) - 1:
+ size_bytes /= 1024.0
+ i += 1
+
+ return f"{size_bytes:.2f} {size_names[i]}"
+
+ def _format_time(self, seconds):
+ """تنسيق الوقت"""
+ if seconds < 60:
+ return f"{seconds:.0f}s"
+ elif seconds < 3600:
+ minutes = seconds / 60
+ return f"{minutes:.1f}m"
+ elif seconds < 86400:
+ hours = seconds / 3600
+ return f"{hours:.1f}h"
+ else:
+ days = seconds / 86400
+ return f"{days:.1f}d"
+
+ def set_migration_estimates(self, migration_id, estimated_tables, estimated_size):
+ """تعيين التقديرات للترحيل الجاري"""
+ with self._lock:
+ if migration_id in self.migrations:
+ self.migrations[migration_id]['estimated_tables'] = estimated_tables
+ self.migrations[migration_id]['estimated_size'] = estimated_size
+
+ def compare_source_destination(self, migration_id):
+ """مقارنة المصدر والوجهة بعد الترحيل"""
+ with self._lock:
+ if migration_id not in self.migrations:
+ return {'success': False, 'error': 'Migration not found'}
+
+ mig_data = self.migrations[migration_id]
+
+ if not mig_data.get('success'):
+ return {'success': False, 'error': 'Migration not completed successfully'}
+
+ # مقارنة بسيطة (يمكن توسيعها)
+ source_tables = mig_data.get('stats', {}).get('tables_created', 0)
+
+ return {
+ 'success': True,
+ 'source_tables_count': source_tables,
+ 'destination_tables_count': source_tables, # نفس العدد إذا نجح الترحيل
+ 'match': True,
+ 'verification_time': datetime.utcnow().isoformat()
+ }
+ def parse_uri_to_env(self, uri, prefix=''):
+ """Parse URI to environment variables format"""
+ parsed = self.parse_postgres_uri(uri)
+ if not parsed:
+ return None
+
+ env_vars = {
+ f'{prefix}DB_HOST': parsed['host'],
+ f'{prefix}DB_PORT': str(parsed['port']),
+ f'{prefix}DB_USER': parsed['user'],
+ f'{prefix}DB_PASSWORD': parsed['password'],
+ f'{prefix}DB_NAME': parsed['database'],
+ f'{prefix}DB_URI': parsed['uri']
+ }
+
+ # Create connection string variants
+ env_vars[f'{prefix}DB_CONNECTION_STRING'] = f"host={parsed['host']} port={parsed['port']} dbname={parsed['database']} user={parsed['user']} password={parsed['password']}"
+ env_vars[f'{prefix}DB_URL'] = parsed['uri']
+
+ return env_vars
+
+ def test_connection(self, uri_input):
+ """Test PostgreSQL connection with detailed error info"""
+
+ # 🔴 الخطوة 1: معالجة إذا كان المدخل JSON (dict)
+ if isinstance(uri_input, dict):
+ print(f"⚠️ Received JSON object, extracting URI string...")
+
+ # استخراج الـ URI من المفتاح 'uri' أو 'url'
+ if 'uri' in uri_input:
+ uri = uri_input['uri']
+ print(f"✅ Extracted URI from 'uri' key")
+ elif 'url' in uri_input:
+ uri = uri_input['url']
+ print(f"✅ Extracted URI from 'url' key")
+ else:
+ # إذا لم يكن هناك مفتاح واضح، ابحث في كل المفاتيح
+ error_msg = f'JSON must contain "uri" or "url" key. Received keys: {list(uri_input.keys())}'
+ print(f"❌ {error_msg}")
+ return {'success': False, 'error': error_msg}
+ else:
+ # إذا كان string مباشرة
+ uri = uri_input
+
+ # 🔴 تحقق أن uri هو string الآن
+ if not isinstance(uri, str):
+ error_msg = f'URI must be a string. Got type: {type(uri)}'
+ print(f"❌ {error_msg}")
+ return {'success': False, 'error': error_msg}
+
+ print(f"🔍 Testing connection to: {uri}")
+ parsed = self.parse_postgres_uri(uri)
+
+ if not parsed:
+ error_msg = f'Invalid URI format: {uri}'
+ print(f"❌ {error_msg}")
+ return {'success': False, 'error': error_msg}
+
+ # استخراج معلومات الاتصال للطباعة عند الفشل
+ host = parsed['host']
+ port = parsed['port']
+ user = parsed['user']
+ database = parsed['database']
+
+ env = os.environ.copy()
+ env['PGPASSWORD'] = parsed['password']
+
+ cmd = [
+ 'psql',
+ '-h', host,
+ '-p', str(port),
+ '-U', user,
+ '-d', database,
+ '-c', 'SELECT version(); SELECT current_database();',
+ '-t'
+ ]
+
+ try:
+ print(f" 🔗 Attempting connection to host: {host}:{port}")
+ print(f" 👤 User: {user}, Database: {database}")
+
+ result = subprocess.run(cmd, env=env, capture_output=True, text=True, timeout=10)
+
+ if result.returncode == 0:
+ lines = result.stdout.strip().split('\n')
+ version = lines[0] if len(lines) > 0 else ''
+ db_name = lines[1] if len(lines) > 1 else ''
+
+ print(f"✅ Connection successful to {host}:{port}")
+
+ return {
+ 'success': True,
+ 'message': 'Connection successful',
+ 'version': version,
+ 'database': db_name,
+ 'connection': {
+ 'host': host,
+ 'port': port,
+ 'user': user,
+ 'database': database
+ }
+ }
+ else:
+ # 🔴 هنا طباعة تفاصيل الفشل مع الـ host
+ error_output = result.stderr.strip()
+ print(f"❌ Connection FAILED to host: {host}:{port}")
+ print(f" 🔸 User: {user}")
+ print(f" 🔸 Database: {database}")
+ print(f" 🔸 Error details: {error_output[:200]}") # أول 200 حرف فقط
+
+ return {
+ 'success': False,
+ 'error': error_output,
+ 'connection_details': {
+ 'host': host,
+ 'port': port,
+ 'user': user,
+ 'database': database
+ }
+ }
+
+ except subprocess.TimeoutExpired:
+ print(f"❌ Connection TIMEOUT to host: {host}:{port}")
+ print(f" 🔸 Server not responding after 10 seconds")
+ print(f" 🔸 Please check: firewall, network, server status")
+
+ return {
+ 'success': False,
+ 'error': 'Connection timeout',
+ 'connection_details': {
+ 'host': host,
+ 'port': port,
+ 'user': user,
+ 'database': database
+ }
+ }
+
+ except FileNotFoundError:
+ print(f"❌ PSQL CLIENT NOT FOUND")
+ print(f" 🔸 The 'psql' command-line tool is not installed")
+ print(f" 🔸 Install PostgreSQL client tools and try again")
+
+ return {
+ 'success': False,
+ 'error': 'psql command not found. Install PostgreSQL client.',
+ 'connection_details': {
+ 'host': host,
+ 'port': port,
+ 'user': user,
+ 'database': database
+ }
+ }
+
+ except Exception as e:
+ print(f"❌ UNEXPECTED ERROR connecting to host: {host}:{port}")
+ print(f" 🔸 Error type: {type(e).__name__}")
+ print(f" 🔸 Error message: {str(e)}")
+
+ return {
+ 'success': False,
+ 'error': str(e),
+ 'connection_details': {
+ 'host': host,
+ 'port': port,
+ 'user': user,
+ 'database': database
+ }
+ }
+
+ def get_schemas(self, uri):
+ """Get list of schemas from PostgreSQL database"""
+ parsed = self.parse_postgres_uri(uri)
+ if not parsed:
+ return {'success': False, 'error': 'Invalid URI format'}
+
+ env = os.environ.copy()
+ env['PGPASSWORD'] = parsed['password']
+
+ sql = """
+ SELECT schema_name
+ FROM information_schema.schemata
+ WHERE schema_name NOT IN ('information_schema', 'pg_catalog', 'pg_toast')
+ ORDER BY schema_name;
+ """
+
+ cmd = [
+ 'psql',
+ '-h', parsed['host'],
+ '-p', str(parsed['port']),
+ '-U', parsed['user'],
+ '-d', parsed['database'],
+ '-t',
+ '-c', sql
+ ]
+
+ try:
+ result = subprocess.run(cmd, env=env, capture_output=True, text=True, timeout=10)
+
+ if result.returncode == 0:
+ schemas = [s.strip() for s in result.stdout.splitlines() if s.strip()]
+ return {
+ 'success': True,
+ 'schemas': schemas,
+ 'count': len(schemas)
+ }
+ else:
+ return {'success': False, 'error': result.stderr.strip()}
+
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ def get_tables(self, uri, schema=''):
+ """Get list of tables from PostgreSQL database"""
+ parsed = self.parse_postgres_uri(uri)
+ if not parsed:
+ return {'success': False, 'error': 'Invalid URI fformat'}
+
+ env = os.environ.copy()
+ env['PGPASSWORD'] = parsed['password']
+
+ if schema:
+ sql = f"""
+ SELECT table_name, table_type
+ FROM information_schema.tables
+ WHERE table_schema = '{schema}'
+ ORDER BY table_name;
+ """
+ else:
+ sql = """
+ SELECT table_schema, table_name, table_type
+ FROM information_schema.tables
+ WHERE table_schema NOT IN ('information_schema', 'pg_catalog', 'pg_toast')
+ ORDER BY table_schema, table_name;
+ """
+
+ cmd = [
+ 'psql',
+ '-h', parsed['host'],
+ '-p', str(parsed['port']),
+ '-U', parsed['user'],
+ '-d', parsed['database'],
+ '-t',
+ '-c', sql
+ ]
+
+ try:
+ result = subprocess.run(cmd, env=env, capture_output=True, text=True, timeout=10)
+
+ if result.returncode == 0:
+ lines = [line.strip() for line in result.stdout.splitlines() if line.strip()]
+ tables = []
+
+ if schema:
+ for line in lines:
+ if '|' in line:
+ table_name, table_type = line.split('|')
+ tables.append({
+ 'name': table_name.strip(),
+ 'type': table_type.strip(),
+ 'schema': schema
+ })
+ else:
+ for line in lines:
+ if '|' in line:
+ parts = line.split('|')
+ if len(parts) >= 3:
+ table_schema, table_name, table_type = parts[:3]
+ tables.append({
+ 'schema': table_schema.strip(),
+ 'name': table_name.strip(),
+ 'type': table_type.strip()
+ })
+
+ return {
+ 'success': True,
+ 'tables': tables,
+ 'count': len(tables)
+ }
+ else:
+ return {'success': False, 'error': result.stderr.strip()}
+
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ def get_table_counts(self, uri, schema=None):
+ """Get row counts for tables"""
+ parsed = self.parse_postgres_uri(uri)
+ if not parsed:
+ return {}
+
+ env = os.environ.copy()
+ env['PGPASSWORD'] = parsed['password']
+
+ if schema:
+ sql = f"""
+ SELECT table_name,
+ (SELECT COUNT(*) FROM "{schema}"."{table_name}") as row_count
+ FROM information_schema.tables
+ WHERE table_schema = '{schema}'
+ ORDER BY table_name;
+ """
+ else:
+ sql = """
+ SELECT table_schema, table_name,
+ (SELECT COUNT(*) FROM information_schema.tables t2
+ WHERE t2.table_schema = t1.table_schema
+ AND t2.table_name = t1.table_name) as row_count
+ FROM information_schema.tables t1
+ WHERE table_schema NOT IN ('information_schema', 'pg_catalog', 'pg_toast')
+ ORDER BY table_schema, table_name;
+ """
+
+ cmd = [
+ 'psql',
+ '-h', parsed['host'],
+ '-p', str(parsed['port']),
+ '-U', parsed['user'],
+ '-d', parsed['database'],
+ '-t',
+ '-c', sql
+ ]
+
+ try:
+ result = subprocess.run(cmd, env=env, capture_output=True, text=True, timeout=30)
+
+ table_counts = {}
+ if result.returncode == 0:
+ for line in result.stdout.splitlines():
+ if line.strip() and '|' in line:
+ parts = line.split('|')
+ if len(parts) >= 3:
+ table_schema, table_name, count = parts[:3]
+ key = f"{table_schema.strip()}.{table_name.strip()}"
+ table_counts[key] = int(count.strip()) if count.strip().isdigit() else 0
+ return table_counts
+
+ except Exception:
+ return {}
+
+ def run_migration(self, migration_id, source_uri, dest_uri, schemas=None, tables=None):
+ """Run migration and return results"""
+ logs = []
+
+ def log(msg, level='info'):
+ log_entry = {
+ 'timestamp': time.time(),
+ 'message': msg,
+ 'level': level
+ }
+ logs.append(log_entry)
+ print(f"[{migration_id}] {msg}")
+
+ try:
+ log(f"🔧 Starting PostgreSQL Migration {migration_id}", 'info')
+ log(f"Source: {source_uri}", 'info')
+ log(f"Destination: {dest_uri}", 'info')
+
+ if schemas:
+ log(f"Selected schemas: {', '.join(schemas)}", 'info')
+ if tables:
+ log(f"Selected tables: {', '.join(tables)}", 'info')
+
+ # Build pg_dump command
+ dump_cmd = ['pg_dump', '--dbname', source_uri, '--clean', '--if-exists', '--no-owner']
+
+ # Add schema/table filters
+ if tables:
+ for table in tables:
+ if '.' in table:
+ dump_cmd.extend(['-t', table])
+ else:
+ log(f"⚠️ Table '{table}' should include schema (e.g., public.table)", 'warning')
+ elif schemas:
+ for schema in schemas:
+ dump_cmd.extend(['-n', schema])
+
+ # Build psql command
+ psql_cmd = ['psql', '--dbname', dest_uri, '--single-transaction']
+
+ # Create pipe command
+ full_cmd = ' '.join(dump_cmd) + ' | ' + ' '.join(psql_cmd)
+
+ log(f"Executing: {full_cmd[:100]}...", 'info')
+
+ # Run the migration
+ start_time = time.time()
+ result = subprocess.run(full_cmd, shell=True, capture_output=True, text=True, timeout=300)
+ end_time = time.time()
+
+ execution_time = end_time - start_time
+
+ if result.returncode == 0:
+ log(f"✅ Migration completed successfully!", 'success')
+ log(f"⏱️ Execution time: {execution_time:.2f} seconds", 'info')
+
+ # Parse statistics
+ create_tables = result.stdout.count('CREATE TABLE')
+ if create_tables > 0:
+ log(f"📊 Tables created: {create_tables}", 'info')
+
+ alter_tables = result.stdout.count('ALTER TABLE')
+ create_indexes = result.stdout.count('CREATE INDEX')
+
+ if alter_tables > 0:
+ log(f"🔄 Tables altered: {alter_tables}", 'info')
+ if create_indexes > 0:
+ log(f"📈 Indexes created: {create_indexes}", 'info')
+
+ # Extract environment variables from URIs
+ source_env = self.parse_uri_to_env(source_uri, 'SRC_')
+ dest_env = self.parse_uri_to_env(dest_uri, 'DEST_')
+ all_env = {}
+ if source_env:
+ all_env.update(source_env)
+ if dest_env:
+ all_env.update(dest_env)
+
+ return {
+ 'success': True,
+ 'migration_id': migration_id,
+ 'execution_time': execution_time,
+ 'stats': {
+ 'tables_created': create_tables,
+ 'tables_altered': alter_tables,
+ 'indexes_created': create_indexes
+ },
+ 'environment_variables': all_env,
+ 'logs': logs
+ }
+ else:
+ error_msg = result.stderr[:1000] if result.stderr else 'Unknown error'
+ log(f"❌ Migration failed: {error_msg}", 'error')
+
+ return {
+ 'success': False,
+ 'migration_id': migration_id,
+ 'error': error_msg,
+ 'logs': logs
+ }
+
+ except subprocess.TimeoutExpired:
+ log("❌ Migration timeout (5 minutes)", 'error')
+ return {
+ 'success': False,
+ 'migration_id': migration_id,
+ 'error': 'Migration timeout',
+ 'logs': logs
+ }
+ except Exception as e:
+ log(f"❌ Error: {str(e)}", 'error')
+ return {
+ 'success': False,
+ 'migration_id': migration_id,
+ 'error': str(e),
+ 'logs': logs
+ }
+
+ def start_migration_async(self, migration_id, source_uri, dest_uri, schemas=None, tables=None):
+ """Start migration in background thread"""
+ def migration_task():
+ result = self.run_migration(migration_id, source_uri, dest_uri, schemas, tables)
+ with self._lock:
+ self.migrations[migration_id] = result
+
+ thread = threading.Thread(target=migration_task, daemon=True)
+ thread.start()
+
+ # Initialize migration entry
+ with self._lock:
+ self.migrations[migration_id] = {
+ 'status': 'running',
+ 'started_at': time.time(),
+ 'source': source_uri,
+ 'destination': dest_uri
+ }
+
+ return migration_id
+
+ def get_migration_status(self, migration_id):
+ """Get status of a migration"""
+ with self._lock:
+ return self.migrations.get(migration_id)
+
+ def list_migrations(self):
+ """List all migrations"""
+ with self._lock:
+ migrations_list = []
+ for mig_id, mig_data in self.migrations.items():
+ if isinstance(mig_data, dict) and 'success' in mig_data:
+ status = 'completed' if mig_data['success'] else 'failed'
+ else:
+ status = 'running'
+
+ migrations_list.append({
+ 'id': mig_id,
+ 'status': status,
+ 'data': mig_data
+ })
+
+ return migrations_list
+
+
+# ============================================================================
+# Part 2: S3 to S3 Migrator (S3ToS3Migrator)
+# ============================================================================
+
+class S3ToS3Migrator:
+ """S3 to S3 migration engine"""
+
+ def __init__(self):
+ self.migrations = {}
+ self._lock = threading.Lock()
+ self.source_clients = {}
+ self.dest_clients = {}
+
+ # ==================== S3 Client Management ====================
+
+ def get_source_client(self, access_key_id, secret_access_key, region='us-east-1',
+ endpoint_url=None, session_token=None):
+ """Get or create source S3 client"""
+ cache_key = f"src:{access_key_id}:{secret_access_key}:{region}:{endpoint_url}"
+
+ if cache_key in self.source_clients:
+ return self.source_clients[cache_key]
+
+ try:
+ s3_config = {
+ 'aws_access_key_id': access_key_id,
+ 'aws_secret_access_key': secret_access_key,
+ 'region_name': region
+ }
+
+ if session_token:
+ s3_config['aws_session_token'] = session_token
+
+ if endpoint_url:
+ s3_config['endpoint_url'] = endpoint_url
+ # تعطيل SSL للخدمات المحلية
+ if 'localhost' in endpoint_url or '127.0.0.1' in endpoint_url:
+ s3_config['verify'] = False
+
+ client = boto3.client('s3', **s3_config)
+ self.source_clients[cache_key] = client
+ return client
+
+ except Exception as e:
+ print(f"❌ Failed to create source S3 client: {str(e)}")
+ return None
+ def calculate_migration_progress(self, migration_id):
+ """حساب نسبة التقدم في الترحيل بناءً على عدد وحجم الملفات"""
+ with self._lock:
+ if migration_id not in self.migrations:
+ return {'success': False, 'error': 'Migration not found'}
+
+ mig_data = self.migrations[migration_id]
+
+ if mig_data.get('status') == 'running':
+ return self._get_running_progress(migration_id)
+ else:
+ return self._get_completed_progress(migration_id)
+
+ def _get_running_progress(self, migration_id):
+ """الحصول على تقدم الترحيل الجاري"""
+ mig_data = self.migrations[migration_id]
+
+ # التحقق من وجود بيانات التتبع
+ if 'progress_tracker' not in mig_data:
+ # إنشاء متتبع تقدم إذا لم يكن موجوداً
+ mig_data['progress_tracker'] = {
+ 'total_objects': mig_data.get('total_objects', 0),
+ 'total_size': mig_data.get('total_size', 0),
+ 'processed_objects': 0,
+ 'processed_size': 0,
+ 'failed_objects': 0,
+ 'failed_size': 0,
+ 'current_object': None,
+ 'current_object_size': 0,
+ 'current_object_progress': 0,
+ 'start_time': mig_data.get('started_at', time.time()),
+ 'last_update': time.time(),
+ 'objects_details': [],
+ 'speed_samples': [],
+ 'estimated_time_remaining': 0
+ }
+
+ tracker = mig_data['progress_tracker']
+ elapsed_time = time.time() - tracker['start_time']
+
+ # حساب النسب المئوية
+ objects_percentage = (tracker['processed_objects'] / tracker['total_objects'] * 100) if tracker['total_objects'] > 0 else 0
+ size_percentage = (tracker['processed_size'] / tracker['total_size'] * 100) if tracker['total_size'] > 0 else 0
+
+ # حساب السرعة (متوسط آخر 10 ثواني)
+ current_time = time.time()
+ tracker['speed_samples'] = [s for s in tracker.get('speed_samples', []) if current_time - s['time'] < 10]
+
+ if tracker['speed_samples']:
+ recent_size = sum(s['size'] for s in tracker['speed_samples'])
+ recent_time = tracker['speed_samples'][-1]['time'] - tracker['speed_samples'][0]['time'] if len(tracker['speed_samples']) > 1 else 1
+ current_speed = recent_size / recent_time if recent_time > 0 else 0
+ else:
+ current_speed = tracker['processed_size'] / elapsed_time if elapsed_time > 0 else 0
+
+ # تقدير الوقت المتبقي
+ remaining_size = tracker['total_size'] - tracker['processed_size'] - tracker['failed_size']
+ if current_speed > 0:
+ eta_seconds = remaining_size / current_speed
+ else:
+ eta_seconds = 0
+
+ # تجميع تفاصيل الملفات الأخيرة
+ recent_objects = tracker['objects_details'][-10:] if tracker['objects_details'] else []
+
+ return {
+ 'migration_id': migration_id,
+ 'status': 'running',
+ 'timestamp': time.time(),
+ 'total_objects': tracker['total_objects'],
+ 'total_size': tracker['total_size'],
+ 'total_size_formatted': self._format_size(tracker['total_size']),
+ 'processed_objects': tracker['processed_objects'],
+ 'processed_size': tracker['processed_size'],
+ 'processed_size_formatted': self._format_size(tracker['processed_size']),
+ 'failed_objects': tracker['failed_objects'],
+ 'failed_size': tracker['failed_size'],
+ 'failed_size_formatted': self._format_size(tracker['failed_size']),
+ 'remaining_objects': tracker['total_objects'] - tracker['processed_objects'] - tracker['failed_objects'],
+ 'remaining_size': tracker['total_size'] - tracker['processed_size'] - tracker['failed_size'],
+ 'remaining_size_formatted': self._format_size(tracker['total_size'] - tracker['processed_size'] - tracker['failed_size']),
+ 'objects_percentage': round(objects_percentage, 2),
+ 'size_percentage': round(size_percentage, 2),
+ 'elapsed_time': elapsed_time,
+ 'elapsed_time_formatted': self._format_time(elapsed_time),
+ 'current_speed': current_speed,
+ 'current_speed_formatted': f"{self._format_size(current_speed)}/s",
+ 'average_speed': tracker['processed_size'] / elapsed_time if elapsed_time > 0 else 0,
+ 'average_speed_formatted': f"{self._format_size(tracker['processed_size'] / elapsed_time if elapsed_time > 0 else 0)}/s",
+ 'eta_seconds': eta_seconds,
+ 'eta_formatted': self._format_time(eta_seconds),
+ 'current_object': tracker['current_object'],
+ 'current_object_size': tracker['current_object_size'],
+ 'current_object_size_formatted': self._format_size(tracker['current_object_size']),
+ 'current_object_progress': tracker['current_object_progress'],
+ 'recent_objects': recent_objects,
+ 'progress_bar_objects': self._format_progress_bar(objects_percentage),
+ 'progress_bar_size': self._format_progress_bar(size_percentage),
+ 'estimated_completion_time': time.time() + eta_seconds if eta_seconds > 0 else None
+ }
+
+ def _get_completed_progress(self, migration_id):
+ """الحصول على إحصائيات الترحيل المكتمل"""
+ mig_data = self.migrations[migration_id]
+
+ stats = mig_data.get('stats', {})
+ successful = mig_data.get('successful', [])
+ failed = mig_data.get('failed', [])
+
+ total_objects = stats.get('total_objects', 0)
+ migrated = stats.get('migrated', 0)
+ failed_count = stats.get('failed', 0)
+ total_size = stats.get('total_size', 0)
+ migrated_size = stats.get('migrated_size', 0)
+
+ completion_time = mig_data.get('completed_at', time.time())
+ start_time = mig_data.get('started_at', completion_time)
+ total_time = completion_time - start_time
+
+ return {
+ 'migration_id': migration_id,
+ 'status': 'completed' if mig_data.get('success') else 'failed',
+ 'success': mig_data.get('success', False),
+ 'timestamp': time.time(),
+ 'total_objects': total_objects,
+ 'total_size': total_size,
+ 'total_size_formatted': self._format_size(total_size),
+ 'migrated_objects': migrated,
+ 'migrated_size': migrated_size,
+ 'migrated_size_formatted': self._format_size(migrated_size),
+ 'failed_objects': failed_count,
+ 'failed_size': total_size - migrated_size,
+ 'failed_size_formatted': self._format_size(total_size - migrated_size),
+ 'objects_percentage': 100.0 if migrated == total_objects else round((migrated / total_objects * 100) if total_objects > 0 else 0, 2),
+ 'size_percentage': 100.0 if migrated_size == total_size else round((migrated_size / total_size * 100) if total_size > 0 else 0, 2),
+ 'total_time': total_time,
+ 'total_time_formatted': self._format_time(total_time),
+ 'average_speed': migrated_size / total_time if total_time > 0 else 0,
+ 'average_speed_formatted': f"{self._format_size(migrated_size / total_time if total_time > 0 else 0)}/s",
+ 'successful_objects': successful[:10] if successful else [],
+ 'failed_objects_list': failed[:10] if failed else [],
+ 'progress_bar': self._format_progress_bar(100.0 if mig_data.get('success') else 0)
+ }
+
+ def update_progress(self, migration_id, processed_objects=1, processed_size=0,
+ failed_objects=0, failed_size=0, current_object=None,
+ object_details=None):
+ """تحديث تقدم الترحيل (للاستخدام أثناء الترحيل)"""
+ with self._lock:
+ if migration_id not in self.migrations:
+ return False
+
+ mig_data = self.migrations[migration_id]
+
+ if 'progress_tracker' not in mig_data:
+ mig_data['progress_tracker'] = {
+ 'total_objects': mig_data.get('total_objects', 0),
+ 'total_size': mig_data.get('total_size', 0),
+ 'processed_objects': 0,
+ 'processed_size': 0,
+ 'failed_objects': 0,
+ 'failed_size': 0,
+ 'start_time': mig_data.get('started_at', time.time()),
+ 'speed_samples': []
+ }
+
+ tracker = mig_data['progress_tracker']
+
+ # تحديث الإحصائيات
+ tracker['processed_objects'] += processed_objects
+ tracker['processed_size'] += processed_size
+ tracker['failed_objects'] += failed_objects
+ tracker['failed_size'] += failed_size
+ tracker['last_update'] = time.time()
+
+ # تحديث العنصر الحالي
+ if current_object:
+ tracker['current_object'] = current_object
+
+ # إضافة عينة سرعة
+ tracker['speed_samples'].append({
+ 'time': time.time(),
+ 'size': processed_size
+ })
+
+ # إضافة تفاصيل الكائن إذا وجدت
+ if object_details:
+ if 'objects_details' not in tracker:
+ tracker['objects_details'] = []
+ tracker['objects_details'].append({
+ 'timestamp': time.time(),
+ **object_details
+ })
+
+ return True
+
+ def get_live_progress_stream(self, migration_id, interval=1):
+ """الحصول على تدفق مباشر لتقدم الترحيل (للاستخدام مع WebSockets)"""
+ import time
+
+ while True:
+ progress = self.calculate_migration_progress(migration_id)
+ if progress.get('status') != 'running':
+ yield progress
+ break
+
+ yield progress
+ time.sleep(interval)
+
+ def format_live_status(self, migration_id):
+ """تنسيق الحالة المباشرة كنص مقروء"""
+ progress = self.calculate_migration_progress(migration_id)
+
+ if not progress.get('success', True) and progress.get('status') != 'running':
+ return f"❌ Error: {progress.get('error', 'Unknown error')}"
+
+ if progress['status'] == 'running':
+ status_lines = [
+ f"🚀 Migration {migration_id} - {progress['status'].upper()}",
+ f"📊 Progress: {progress['progress_bar_objects']}",
+ f"📦 Objects: {progress['processed_objects']}/{progress['total_objects']} ({progress['objects_percentage']}%)",
+ f"💾 Size: {progress['processed_size_formatted']}/{progress['total_size_formatted']} ({progress['size_percentage']}%)",
+ f"⚡ Speed: {progress['current_speed_formatted']} (Avg: {progress['average_speed_formatted']})",
+ f"⏱️ Elapsed: {progress['elapsed_time_formatted']} | ETA: {progress['eta_formatted']}",
+ ]
+
+ if progress['current_object']:
+ status_lines.append(f"📄 Current: {progress['current_object']} ({progress['current_object_size_formatted']})")
+
+ if progress['failed_objects'] > 0:
+ status_lines.append(f"❌ Failed: {progress['failed_objects']} objects ({progress['failed_size_formatted']})")
+
+ return '\n'.join(status_lines)
+
+ else:
+ # عرض ملخص الترحيل المكتمل
+ status_lines = [
+ f"{'✅' if progress['success'] else '❌'} Migration {migration_id} - {progress['status'].upper()}",
+ f"📊 Summary:",
+ f"📦 Objects: {progress['migrated_objects']}/{progress['total_objects']} ({progress['objects_percentage']}%)",
+ f"💾 Size: {progress['migrated_size_formatted']}/{progress['total_size_formatted']} ({progress['size_percentage']}%)",
+ f"⏱️ Total time: {progress['total_time_formatted']}",
+ f"⚡ Average speed: {progress['average_speed_formatted']}",
+ ]
+
+ if progress.get('failed_objects', 0) > 0:
+ status_lines.append(f"❌ Failed: {progress['failed_objects']} objects")
+
+ return '\n'.join(status_lines)
+
+ def estimate_transfer_time(self, total_size, current_speed=None):
+ """تقدير وقت النقل بناءً على حجم البيانات"""
+ # سرعات تقديرية للشبكة (بالبايت في الثانية)
+ speed_profiles = {
+ 'slow': 1 * 1024 * 1024, # 1 MB/s
+ 'average': 5 * 1024 * 1024, # 5 MB/s
+ 'fast': 20 * 1024 * 1024, # 20 MB/s
+ 'very_fast': 100 * 1024 * 1024, # 100 MB/s
+ 'gigabit': 125 * 1024 * 1024, # 1 Gbps ~ 125 MB/s
+ }
+
+ estimates = {}
+ for profile, speed in speed_profiles.items():
+ seconds = total_size / speed if speed > 0 else 0
+ estimates[profile] = {
+ 'speed': speed,
+ 'speed_formatted': f"{self._format_size(speed)}/s",
+ 'seconds': seconds,
+ 'formatted': self._format_time(seconds)
+ }
+
+ # إذا كان لدينا سرعة حالية، أضف تقدير بناءً عليها
+ if current_speed and current_speed > 0:
+ seconds = total_size / current_speed
+ estimates['current'] = {
+ 'speed': current_speed,
+ 'speed_formatted': f"{self._format_size(current_speed)}/s",
+ 'seconds': seconds,
+ 'formatted': self._format_time(seconds)
+ }
+
+ return estimates
+
+ def _format_progress_bar(self, percentage, width=30):
+ """تنسيق شريط التقدم كنص"""
+ filled = int(width * percentage / 100)
+ empty = width - filled
+ bar = '█' * filled + '░' * empty
+ return f"[{bar}] {percentage:.1f}%"
+
+ def _format_time(self, seconds):
+ """تنسيق الوقت"""
+ if seconds < 60:
+ return f"{seconds:.0f}s"
+ elif seconds < 3600:
+ minutes = seconds / 60
+ return f"{minutes:.1f}m"
+ elif seconds < 86400:
+ hours = seconds / 3600
+ return f"{hours:.1f}h"
+ else:
+ days = seconds / 86400
+ return f"{days:.1f}d"
+
+ def get_migration_speed_history(self, migration_id):
+ """الحصول على تاريخ السرعة للترحيل"""
+ with self._lock:
+ if migration_id not in self.migrations:
+ return {'success': False, 'error': 'Migration not found'}
+
+ mig_data = self.migrations[migration_id]
+ tracker = mig_data.get('progress_tracker', {})
+ speed_samples = tracker.get('speed_samples', [])
+
+ return {
+ 'success': True,
+ 'migration_id': migration_id,
+ 'speed_samples': speed_samples,
+ 'average_speed': sum(s['size'] for s in speed_samples) / len(speed_samples) if speed_samples else 0,
+ 'peak_speed': max((s['size'] for s in speed_samples), default=0),
+ 'samples_count': len(speed_samples)
+ }
+
+ def compare_source_destination(self, migration_id):
+ """مقارنة المصدر والوجهة بعد الترحيل"""
+ with self._lock:
+ if migration_id not in self.migrations:
+ return {'success': False, 'error': 'Migration not found'}
+
+ mig_data = self.migrations[migration_id]
+
+ if not mig_data.get('success'):
+ return {'success': False, 'error': 'Migration not completed successfully'}
+
+ stats = mig_data.get('stats', {})
+
+ return {
+ 'success': True,
+ 'source_objects': stats.get('total_objects', 0),
+ 'destination_objects': stats.get('migrated', 0),
+ 'source_size': stats.get('total_size', 0),
+ 'destination_size': stats.get('migrated_size', 0),
+ 'match': stats.get('total_objects', 0) == stats.get('migrated', 0),
+ 'verification_time': datetime.utcnow().isoformat()
+ }
+ def get_destination_client(self, access_key_id, secret_access_key, region='us-east-1',
+ endpoint_url=None, session_token=None):
+ """Get or create destination S3 client"""
+ cache_key = f"dst:{access_key_id}:{secret_access_key}:{region}:{endpoint_url}"
+
+ if cache_key in self.dest_clients:
+ return self.dest_clients[cache_key]
+
+ try:
+ s3_config = {
+ 'aws_access_key_id': access_key_id,
+ 'aws_secret_access_key': secret_access_key,
+ 'region_name': region
+ }
+
+ if session_token:
+ s3_config['aws_session_token'] = session_token
+
+ if endpoint_url:
+ s3_config['endpoint_url'] = endpoint_url
+ if 'localhost' in endpoint_url or '127.0.0.1' in endpoint_url:
+ s3_config['verify'] = False
+
+ client = boto3.client('s3', **s3_config)
+ self.dest_clients[cache_key] = client
+ return client
+
+ except Exception as e:
+ print(f"❌ Failed to create destination S3 client: {str(e)}")
+ return None
+
+ # ==================== Connection Testing ====================
+
+ def test_source_connection(self, access_key_id, secret_access_key, region='us-east-1',
+ endpoint_url=None, session_token=None):
+ """Test source S3 connection"""
+ return self._test_connection('Source', access_key_id, secret_access_key,
+ region, endpoint_url, session_token)
+
+ def test_destination_connection(self, access_key_id, secret_access_key, region='us-east-1',
+ endpoint_url=None, session_token=None):
+ """Test destination S3 connection"""
+ return self._test_connection('Destination', access_key_id, secret_access_key,
+ region, endpoint_url, session_token)
+
+ def _test_connection(self, conn_type, access_key_id, secret_access_key, region,
+ endpoint_url, session_token):
+ """Internal method to test S3 connection"""
+ if not access_key_id or not secret_access_key:
+ print(f"❌ {conn_type} AWS credentials are required")
+ return {
+ 'success': False,
+ 'error': f'{conn_type} AWS credentials are required',
+ 'details': {
+ 'access_key_id_provided': bool(access_key_id),
+ 'secret_key_provided': bool(secret_access_key)
+ }
+ }
+
+ print(f"🔍 Testing {conn_type} S3 connection...")
+ print(f" 🔸 Access Key ID: {access_key_id[:10]}...")
+ print(f" 🔸 Region: {region}")
+ print(f" 🔸 Endpoint URL: {endpoint_url or 'AWS S3 (default)'}")
+
+ try:
+ if conn_type == 'Source':
+ client = self.get_source_client(access_key_id, secret_access_key, region, endpoint_url, session_token)
+ else:
+ client = self.get_destination_client(access_key_id, secret_access_key, region, endpoint_url, session_token)
+
+ if not client:
+ return {
+ 'success': False,
+ 'error': f'Failed to create {conn_type} S3 client'
+ }
+
+ # Test connection by listing buckets
+ start_time = time.time()
+ response = client.list_buckets()
+ end_time = time.time()
+
+ response_time = end_time - start_time
+ buckets = [bucket['Name'] for bucket in response.get('Buckets', [])]
+
+ print(f"✅ {conn_type} S3 Connection Successful!")
+ print(f" 🔸 Response time: {response_time:.2f} seconds")
+ print(f" 🔸 Available buckets: {len(buckets)}")
+
+ return {
+ 'success': True,
+ 'message': f'{conn_type} S3 connection successful',
+ 'buckets': buckets[:10], # First 10 buckets only
+ 'bucket_count': len(buckets),
+ 'response_time': response_time,
+ 'connection_details': {
+ 'type': conn_type,
+ 'region': region,
+ 'endpoint': endpoint_url or 'AWS S3 (default)',
+ 'has_credentials': True
+ }
+ }
+
+ except NoCredentialsError:
+ error_msg = f"{conn_type} No credentials provided or credentials are invalid"
+ print(f"❌ {error_msg}")
+ return {'success': False, 'error': error_msg}
+
+ except EndpointConnectionError as e:
+ error_msg = f"{conn_type} Cannot connect to endpoint: {endpoint_url}"
+ print(f"❌ {error_msg}")
+ return {'success': False, 'error': error_msg, 'details': str(e)}
+
+ except ClientError as e:
+ error_code = e.response['Error']['Code']
+ error_msg = e.response['Error']['Message']
+ print(f"❌ {conn_type} AWS Error: {error_code} - {error_msg}")
+
+ # 404 usually means list_buckets permission denied, but connection works
+ if error_code == '404':
+ return {
+ 'success': True,
+ 'message': f'{conn_type} S3 connection successful (credentials valid, but list_buckets not allowed)',
+ 'connection_details': {
+ 'type': conn_type,
+ 'region': region,
+ 'endpoint': endpoint_url or 'AWS S3 (default)',
+ 'note': '404: Not Found - This is usually permission denied for list_buckets'
+ }
+ }
+
+ return {
+ 'success': False,
+ 'error': f"{error_code}: {error_msg}",
+ 'aws_error': error_code
+ }
+
+ except Exception as e:
+ error_msg = f"{conn_type} Unexpected error: {str(e)}"
+ print(f"❌ {error_msg}")
+ return {'success': False, 'error': error_msg}
+
+ # ==================== Bucket Operations ====================
+
+ def list_buckets(self, access_key_id, secret_access_key, region='us-east-1',
+ endpoint_url=None, session_token=None, is_source=True):
+ """List all S3 buckets with details"""
+ try:
+ if is_source:
+ client = self.get_source_client(access_key_id, secret_access_key, region, endpoint_url, session_token)
+ else:
+ client = self.get_destination_client(access_key_id, secret_access_key, region, endpoint_url, session_token)
+
+ if not client:
+ return {'success': False, 'error': 'Failed to create S3 client'}
+
+ response = client.list_buckets()
+ buckets_info = []
+
+ for bucket in response.get('Buckets', []):
+ bucket_name = bucket['Name']
+ creation_date = bucket['CreationDate']
+
+ # Get bucket location (region)
+ try:
+ location = client.get_bucket_location(Bucket=bucket_name)
+ bucket_region = location.get('LocationConstraint', 'us-east-1')
+ if not bucket_region:
+ bucket_region = 'us-east-1'
+ except:
+ bucket_region = region
+
+ # Get bucket size and object count (optional, may be slow)
+ try:
+ size_response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1000)
+ object_count = len(size_response.get('Contents', []))
+ total_size = sum(obj.get('Size', 0) for obj in size_response.get('Contents', []))
+ except:
+ object_count = 0
+ total_size = 0
+
+ buckets_info.append({
+ 'name': bucket_name,
+ 'creation_date': creation_date.isoformat() if hasattr(creation_date, 'isoformat') else str(creation_date),
+ 'region': bucket_region,
+ 'object_count': object_count,
+ 'total_size': total_size
+ })
+
+ return {
+ 'success': True,
+ 'buckets': buckets_info,
+ 'count': len(buckets_info)
+ }
+
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ def list_objects(self, bucket_name, prefix='', max_keys=1000, is_source=True,
+ access_key_id=None, secret_access_key=None, region='us-east-1',
+ endpoint_url=None, session_token=None):
+ """List objects in a bucket with pagination support"""
+ try:
+ if is_source:
+ client = self.get_source_client(access_key_id, secret_access_key, region, endpoint_url, session_token)
+ else:
+ client = self.get_destination_client(access_key_id, secret_access_key, region, endpoint_url, session_token)
+
+ if not client:
+ return {'success': False, 'error': 'Failed to create S3 client'}
+
+ all_objects = []
+ continuation_token = None
+
+ while True:
+ list_kwargs = {
+ 'Bucket': bucket_name,
+ 'Prefix': prefix,
+ 'MaxKeys': max_keys
+ }
+
+ if continuation_token:
+ list_kwargs['ContinuationToken'] = continuation_token
+
+ response = client.list_objects_v2(**list_kwargs)
+
+ for obj in response.get('Contents', []):
+ all_objects.append({
+ 'key': obj['Key'],
+ 'size': obj['Size'],
+ 'last_modified': obj['LastModified'].isoformat() if hasattr(obj['LastModified'], 'isoformat') else str(obj['LastModified']),
+ 'etag': obj['ETag'].strip('"')
+ })
+
+ if response.get('IsTruncated'):
+ continuation_token = response.get('NextContinuationToken')
+ else:
+ break
+
+ return {
+ 'success': True,
+ 'objects': all_objects,
+ 'count': len(all_objects),
+ 'bucket': bucket_name,
+ 'prefix': prefix,
+ 'total_size': sum(obj['size'] for obj in all_objects)
+ }
+
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ def bucket_exists(self, bucket_name, is_source=True, access_key_id=None,
+ secret_access_key=None, region='us-east-1', endpoint_url=None,
+ session_token=None):
+ """Check if bucket exists and is accessible"""
+ try:
+ if is_source:
+ client = self.get_source_client(access_key_id, secret_access_key, region, endpoint_url, session_token)
+ else:
+ client = self.get_destination_client(access_key_id, secret_access_key, region, endpoint_url, session_token)
+
+ if not client:
+ return {'success': False, 'error': 'Failed to create S3 client'}
+
+ client.head_bucket(Bucket=bucket_name)
+
+ # Get bucket location
+ try:
+ location = client.get_bucket_location(Bucket=bucket_name)
+ bucket_region = location.get('LocationConstraint', 'us-east-1')
+ if not bucket_region:
+ bucket_region = 'us-east-1'
+ except:
+ bucket_region = region
+
+ return {
+ 'success': True,
+ 'exists': True,
+ 'bucket': bucket_name,
+ 'region': bucket_region
+ }
+
+ except ClientError as e:
+ error_code = e.response['Error']['Code']
+ if error_code == '404':
+ return {'success': True, 'exists': False, 'bucket': bucket_name}
+ elif error_code == '403':
+ return {'success': False, 'error': 'Access denied to bucket', 'exists': False}
+ else:
+ return {'success': False, 'error': str(e)}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ def create_bucket(self, bucket_name, region='us-east-1', endpoint_url=None,
+ access_key_id=None, secret_access_key=None, session_token=None):
+ """Create a new S3 bucket in destination"""
+ try:
+ client = self.get_destination_client(access_key_id, secret_access_key, region, endpoint_url, session_token)
+ if not client:
+ return {'success': False, 'error': 'Failed to create S3 client'}
+
+ # Check if bucket already exists
+ exists_check = self.bucket_exists(bucket_name, is_source=False,
+ access_key_id=access_key_id,
+ secret_access_key=secret_access_key,
+ region=region, endpoint_url=endpoint_url,
+ session_token=session_token)
+
+ if exists_check.get('exists'):
+ return {
+ 'success': True,
+ 'message': f'Bucket already exists: {bucket_name}',
+ 'bucket': bucket_name,
+ 'created': False
+ }
+
+ # Create bucket
+ create_kwargs = {'Bucket': bucket_name}
+
+ # Special handling for us-east-1
+ if region != 'us-east-1':
+ create_kwargs['CreateBucketConfiguration'] = {
+ 'LocationConstraint': region
+ }
+
+ client.create_bucket(**create_kwargs)
+
+ return {
+ 'success': True,
+ 'message': f'Bucket created successfully: {bucket_name}',
+ 'bucket': bucket_name,
+ 'region': region,
+ 'created': True
+ }
+
+ except ClientError as e:
+ error_code = e.response['Error']['Code']
+ error_msg = e.response['Error']['Message']
+ return {
+ 'success': False,
+ 'error': f"{error_code}: {error_msg}",
+ 'bucket': bucket_name
+ }
+ except Exception as e:
+ return {'success': False, 'error': str(e), 'bucket': bucket_name}
+
+ # ==================== Object Migration ====================
+
+ def migrate_object(self, source_bucket, source_key, dest_bucket, dest_key=None,
+ source_credentials=None, dest_credentials=None,
+ preserve_metadata=True, preserve_acl=False,
+ metadata=None, storage_class='STANDARD'):
+ """
+ Migrate a single object from source S3 to destination S3
+ """
+ logs = []
+
+ def log(msg, level='info'):
+ log_entry = {
+ 'timestamp': time.time(),
+ 'message': msg,
+ 'level': level
+ }
+ logs.append(log_entry)
+ print(f"[Migration] {msg}")
+
+ try:
+ if not dest_key:
+ dest_key = source_key
+
+ log(f"📦 Migrating: s3://{source_bucket}/{source_key} → s3://{dest_bucket}/{dest_key}")
+
+ # Get source client
+ source_client = self.get_source_client(
+ source_credentials.get('access_key_id'),
+ source_credentials.get('secret_access_key'),
+ source_credentials.get('region', 'us-east-1'),
+ source_credentials.get('endpoint_url'),
+ source_credentials.get('session_token')
+ )
+
+ if not source_client:
+ return {
+ 'success': False,
+ 'error': 'Failed to create source S3 client',
+ 'logs': logs
+ }
+
+ # Get destination client
+ dest_client = self.get_destination_client(
+ dest_credentials.get('access_key_id'),
+ dest_credentials.get('secret_access_key'),
+ dest_credentials.get('region', 'us-east-1'),
+ dest_credentials.get('endpoint_url'),
+ dest_credentials.get('session_token')
+ )
+
+ if not dest_client:
+ return {
+ 'success': False,
+ 'error': 'Failed to create destination S3 client',
+ 'logs': logs
+ }
+
+ # Get object metadata from source
+ log("🔍 Getting source object metadata...")
+ head_response = source_client.head_object(
+ Bucket=source_bucket,
+ Key=source_key
+ )
+
+ source_size = head_response['ContentLength']
+ source_etag = head_response['ETag'].strip('"')
+ source_last_modified = head_response['LastModified']
+
+ log(f" 📊 Size: {self._format_size(source_size)}")
+ log(f" 🏷️ ETag: {source_etag[:16]}...")
+
+ # Prepare metadata
+ final_metadata = {}
+ if preserve_metadata:
+ final_metadata = head_response.get('Metadata', {}).copy()
+
+ if metadata:
+ final_metadata.update(metadata)
+
+ # Add migration metadata
+ final_metadata['migration_source_bucket'] = source_bucket
+ final_metadata['migration_source_key'] = source_key
+ final_metadata['migration_timestamp'] = datetime.utcnow().isoformat()
+ final_metadata['migration_tool'] = 'S3ToS3Migrator'
+
+ # Check if destination bucket exists, create if not
+ dest_bucket_check = self.bucket_exists(
+ dest_bucket, is_source=False,
+ access_key_id=dest_credentials.get('access_key_id'),
+ secret_access_key=dest_credentials.get('secret_access_key'),
+ region=dest_credentials.get('region', 'us-east-1'),
+ endpoint_url=dest_credentials.get('endpoint_url'),
+ session_token=dest_credentials.get('session_token')
+ )
+
+ if not dest_bucket_check.get('exists'):
+ log(f"⚠️ Destination bucket '{dest_bucket}' does not exist. Creating...")
+ create_result = self.create_bucket(
+ dest_bucket,
+ region=dest_credentials.get('region', 'us-east-1'),
+ endpoint_url=dest_credentials.get('endpoint_url'),
+ access_key_id=dest_credentials.get('access_key_id'),
+ secret_access_key=dest_credentials.get('secret_access_key'),
+ session_token=dest_credentials.get('session_token')
+ )
+
+ if not create_result['success']:
+ return {
+ 'success': False,
+ 'error': f"Failed to create destination bucket: {create_result['error']}",
+ 'logs': logs
+ }
+ log(f"✅ Created destination bucket: {dest_bucket}")
+
+ # Perform copy operation
+ log("⬆️ Copying object to destination...")
+ copy_start = time.time()
+
+ copy_source = {
+ 'Bucket': source_bucket,
+ 'Key': source_key
+ }
+
+ copy_kwargs = {
+ 'Bucket': dest_bucket,
+ 'Key': dest_key,
+ 'CopySource': copy_source,
+ 'Metadata': final_metadata,
+ 'MetadataDirective': 'REPLACE' if preserve_metadata else 'COPY'
+ }
+
+ if storage_class:
+ copy_kwargs['StorageClass'] = storage_class
+
+ dest_client.copy_object(**copy_kwargs)
+
+ copy_time = time.time() - copy_start
+
+ # Verify copy
+ log("🔍 Verifying destination object...")
+ dest_head = dest_client.head_object(
+ Bucket=dest_bucket,
+ Key=dest_key
+ )
+
+ dest_size = dest_head['ContentLength']
+ dest_etag = dest_head['ETag'].strip('"')
+
+ # Generate URL
+ if dest_credentials.get('endpoint_url'):
+ dest_url = f"{dest_credentials['endpoint_url']}/{dest_bucket}/{dest_key}"
+ else:
+ dest_url = f"https://{dest_bucket}.s3.{dest_credentials.get('region', 'us-east-1')}.amazonaws.com/{dest_key}"
+
+ log(f"✅ Migration completed in {copy_time:.2f} seconds")
+ log(f" 📍 Destination: {dest_url}")
+
+ return {
+ 'success': True,
+ 'message': 'Object migrated successfully',
+ 'source': {
+ 'bucket': source_bucket,
+ 'key': source_key,
+ 'size': source_size,
+ 'etag': source_etag,
+ 'last_modified': source_last_modified.isoformat() if hasattr(source_last_modified, 'isoformat') else str(source_last_modified)
+ },
+ 'destination': {
+ 'bucket': dest_bucket,
+ 'key': dest_key,
+ 'size': dest_size,
+ 'etag': dest_etag,
+ 'url': dest_url
+ },
+ 'copy_time': copy_time,
+ 'metadata': final_metadata,
+ 'logs': logs
+ }
+
+ except ClientError as e:
+ error_code = e.response['Error']['Code']
+ error_msg = e.response['Error']['Message']
+ log(f"❌ AWS Error: {error_code} - {error_msg}", 'error')
+ return {
+ 'success': False,
+ 'error': f"{error_code}: {error_msg}",
+ 'aws_error': error_code,
+ 'logs': logs
+ }
+ except Exception as e:
+ log(f"❌ Error: {str(e)}", 'error')
+ return {
+ 'success': False,
+ 'error': str(e),
+ 'logs': logs
+ }
+
+ def migrate_objects_batch(self, objects, source_bucket, dest_bucket,
+ source_credentials, dest_credentials,
+ preserve_metadata=True, storage_class='STANDARD',
+ max_concurrent=5):
+ """
+ Migrate multiple objects in batch
+ """
+ from concurrent.futures import ThreadPoolExecutor, as_completed
+
+ logs = []
+ results = {
+ 'successful': [],
+ 'failed': [],
+ 'total_size': 0,
+ 'total_time': 0
+ }
+
+ def log(msg, level='info'):
+ log_entry = {
+ 'timestamp': time.time(),
+ 'message': msg,
+ 'level': level
+ }
+ logs.append(log_entry)
+ print(f"[Batch] {msg}")
+
+ log(f"🚀 Starting batch migration of {len(objects)} objects")
+ log(f" 📦 Source: s3://{source_bucket}")
+ log(f" 📦 Destination: s3://{dest_bucket}")
+
+ start_time = time.time()
+
+ def migrate_single(obj):
+ source_key = obj['key'] if isinstance(obj, dict) else obj
+ dest_key = obj.get('dest_key', source_key) if isinstance(obj, dict) else source_key
+
+ result = self.migrate_object(
+ source_bucket=source_bucket,
+ source_key=source_key,
+ dest_bucket=dest_bucket,
+ dest_key=dest_key,
+ source_credentials=source_credentials,
+ dest_credentials=dest_credentials,
+ preserve_metadata=preserve_metadata,
+ storage_class=storage_class
+ )
+
+ return {
+ 'source_key': source_key,
+ 'dest_key': dest_key,
+ 'result': result
+ }
+
+ # Execute migrations in parallel
+ with ThreadPoolExecutor(max_workers=max_concurrent) as executor:
+ futures = [executor.submit(migrate_single, obj) for obj in objects]
+
+ for future in as_completed(futures):
+ try:
+ migration = future.result()
+ if migration['result']['success']:
+ results['successful'].append(migration)
+ results['total_size'] += migration['result']['destination']['size']
+ else:
+ results['failed'].append(migration)
+ except Exception as e:
+ results['failed'].append({
+ 'source_key': 'unknown',
+ 'dest_key': 'unknown',
+ 'error': str(e)
+ })
+
+ results['total_time'] = time.time() - start_time
+
+ log(f"✅ Batch migration completed in {results['total_time']:.2f} seconds")
+ log(f" ✅ Successful: {len(results['successful'])}")
+ log(f" ❌ Failed: {len(results['failed'])}")
+ log(f" 📦 Total size: {self._format_size(results['total_size'])}")
+
+ return {
+ 'success': len(results['failed']) == 0,
+ 'message': f"Migrated {len(results['successful'])}/{len(objects)} objects",
+ 'results': results,
+ 'logs': logs
+ }
+
+ # ==================== Full Migration ====================
+
+ def start_migration(self, migration_id, source_config, dest_config,
+ source_bucket, dest_bucket, prefix='',
+ include_patterns=None, exclude_patterns=None,
+ preserve_metadata=True, storage_class='STANDARD',
+ create_dest_bucket=True, max_concurrent=5):
+ """
+ Start full S3 to S3 migration in background
+ """
+
+ def migration_task():
+ result = self.migrate_bucket(
+ migration_id=migration_id,
+ source_config=source_config,
+ dest_config=dest_config,
+ source_bucket=source_bucket,
+ dest_bucket=dest_bucket,
+ prefix=prefix,
+ include_patterns=include_patterns,
+ exclude_patterns=exclude_patterns,
+ preserve_metadata=preserve_metadata,
+ storage_class=storage_class,
+ create_dest_bucket=create_dest_bucket,
+ max_concurrent=max_concurrent
+ )
+ with self._lock:
+ self.migrations[migration_id] = result
+
+ thread = threading.Thread(target=migration_task, daemon=True)
+ thread.start()
+
+ # Initialize migration entry
+ with self._lock:
+ self.migrations[migration_id] = {
+ 'status': 'running',
+ 'started_at': time.time(),
+ 'source_bucket': source_bucket,
+ 'dest_bucket': dest_bucket,
+ 'prefix': prefix,
+ 'source_config': {k: v[:10] + '...' if k in ['access_key_id', 'secret_access_key'] and v else None
+ for k, v in source_config.items()},
+ 'dest_config': {k: v[:10] + '...' if k in ['access_key_id', 'secret_access_key'] and v else None
+ for k, v in dest_config.items()}
+ }
+
+ return migration_id
+
+ def migrate_bucket(self, migration_id, source_config, dest_config,
+ source_bucket, dest_bucket, prefix='',
+ include_patterns=None, exclude_patterns=None,
+ preserve_metadata=True, storage_class='STANDARD',
+ create_dest_bucket=True, max_concurrent=5):
+ """
+ Migrate entire bucket from source to destination
+ """
+ logs = []
+
+ def log(msg, level='info'):
+ log_entry = {
+ 'timestamp': time.time(),
+ 'message': msg,
+ 'level': level
+ }
+ logs.append(log_entry)
+ print(f"[{migration_id}] {msg}")
+
+ try:
+ log(f"🔧 Starting S3 to S3 Migration {migration_id}")
+ log(f" 📤 Source: s3://{source_bucket}/{prefix}")
+ log(f" 📥 Destination: s3://{dest_bucket}/{prefix}")
+ log(f" ⚙️ Preserve metadata: {preserve_metadata}, Storage class: {storage_class}")
+
+ # Create destination bucket if needed
+ if create_dest_bucket:
+ dest_client = self.get_destination_client(
+ dest_config.get('access_key_id'),
+ dest_config.get('secret_access_key'),
+ dest_config.get('region', 'us-east-1'),
+ dest_config.get('endpoint_url'),
+ dest_config.get('session_token')
+ )
+
+ if dest_client:
+ exists_check = self.bucket_exists(
+ dest_bucket, is_source=False,
+ access_key_id=dest_config.get('access_key_id'),
+ secret_access_key=dest_config.get('secret_access_key'),
+ region=dest_config.get('region', 'us-east-1'),
+ endpoint_url=dest_config.get('endpoint_url'),
+ session_token=dest_config.get('session_token')
+ )
+
+ if not exists_check.get('exists'):
+ log(f"📦 Creating destination bucket: {dest_bucket}")
+ create_result = self.create_bucket(
+ dest_bucket,
+ region=dest_config.get('region', 'us-east-1'),
+ endpoint_url=dest_config.get('endpoint_url'),
+ access_key_id=dest_config.get('access_key_id'),
+ secret_access_key=dest_config.get('secret_access_key'),
+ session_token=dest_config.get('session_token')
+ )
+
+ if not create_result['success']:
+ log(f"❌ Failed to create destination bucket: {create_result['error']}", 'error')
+ return {
+ 'success': False,
+ 'migration_id': migration_id,
+ 'error': f"Failed to create destination bucket: {create_result['error']}",
+ 'logs': logs
+ }
+ log(f"✅ Destination bucket created/verified")
+
+ # List objects from source
+ log(f"🔍 Listing objects from source bucket: {source_bucket}/{prefix}")
+ list_result = self.list_objects(
+ bucket_name=source_bucket,
+ prefix=prefix,
+ max_keys=1000,
+ is_source=True,
+ access_key_id=source_config.get('access_key_id'),
+ secret_access_key=source_config.get('secret_access_key'),
+ region=source_config.get('region', 'us-east-1'),
+ endpoint_url=source_config.get('endpoint_url'),
+ session_token=source_config.get('session_token')
+ )
+
+ if not list_result['success']:
+ log(f"❌ Failed to list source bucket: {list_result['error']}", 'error')
+ return {
+ 'success': False,
+ 'migration_id': migration_id,
+ 'error': f"Failed to list source bucket: {list_result['error']}",
+ 'logs': logs
+ }
+
+ objects = list_result['objects']
+ total_objects = len(objects)
+ total_size = list_result['total_size']
+
+ log(f"📊 Found {total_objects} objects, total size: {self._format_size(total_size)}")
+
+ if total_objects == 0:
+ log(f"⚠️ No objects to migrate")
+ return {
+ 'success': True,
+ 'migration_id': migration_id,
+ 'message': 'No objects to migrate',
+ 'stats': {
+ 'total_objects': 0,
+ 'migrated': 0,
+ 'failed': 0,
+ 'total_size': 0
+ },
+ 'logs': logs
+ }
+
+ # Filter objects if patterns provided
+ if include_patterns or exclude_patterns:
+ import fnmatch
+ filtered_objects = []
+
+ for obj in objects:
+ key = obj['key']
+ include = True
+
+ if include_patterns:
+ include = any(fnmatch.fnmatch(key, pattern) for pattern in include_patterns)
+
+ if exclude_patterns:
+ include = include and not any(fnmatch.fnmatch(key, pattern) for pattern in exclude_patterns)
+
+ if include:
+ filtered_objects.append(obj)
+
+ objects = filtered_objects
+ log(f"🎯 After filtering: {len(objects)} objects")
+
+ # Migrate objects in batches
+ successful = []
+ failed = []
+ total_bytes = 0
+
+ log(f"🚀 Starting migration of {len(objects)} objects...")
+
+ # Process in chunks
+ chunk_size = max_concurrent * 10
+ for i in range(0, len(objects), chunk_size):
+ chunk = objects[i:i + chunk_size]
+ log(f"📦 Processing chunk {i//chunk_size + 1}/{(len(objects)-1)//chunk_size + 1}")
+
+ batch_result = self.migrate_objects_batch(
+ objects=chunk,
+ source_bucket=source_bucket,
+ dest_bucket=dest_bucket,
+ source_credentials=source_config,
+ dest_credentials=dest_config,
+ preserve_metadata=preserve_metadata,
+ storage_class=storage_class,
+ max_concurrent=max_concurrent
+ )
+
+ for success in batch_result['results']['successful']:
+ successful.append(success)
+ total_bytes += success['result']['destination']['size']
+
+ for fail in batch_result['results']['failed']:
+ failed.append(fail)
+
+ log(f" ✅ Progress: {len(successful)}/{len(objects)} objects, "
+ f"📦 {self._format_size(total_bytes)}")
+
+ # Summary
+ log(f"📊 Migration Summary:")
+ log(f" ✅ Successful: {len(successful)} objects")
+ log(f" ❌ Failed: {len(failed)} objects")
+ log(f" 📦 Total size: {self._format_size(total_size)}")
+ log(f" ⏱️ Total time: {time.time() - self.migrations[migration_id]['started_at']:.2f} seconds")
+
+ return {
+ 'success': len(failed) == 0,
+ 'migration_id': migration_id,
+ 'message': f"Migration completed: {len(successful)}/{total_objects} objects migrated",
+ 'stats': {
+ 'total_objects': total_objects,
+ 'migrated': len(successful),
+ 'failed': len(failed),
+ 'total_size': total_size,
+ 'migrated_size': total_bytes
+ },
+ 'successful': successful[:100], # First 100 only
+ 'failed': failed[:100], # First 100 only
+ 'logs': logs
+ }
+
+ except Exception as e:
+ log(f"❌ Migration failed: {str(e)}", 'error')
+ return {
+ 'success': False,
+ 'migration_id': migration_id,
+ 'error': str(e),
+ 'logs': logs
+ }
+
+ # ==================== Migration Status ====================
+
+ def get_migration_status(self, migration_id):
+ """Get status of a migration"""
+ with self._lock:
+ return self.migrations.get(migration_id)
+ """List all migrations"""
+ with self._lock:
+ migrations_list = []
+ for mig_id, mig_data in self.migrations.items():
+ if isinstance(mig_data, dict) and 'success' in mig_data:
+ status = 'completed' if mig_data['success'] else 'failed'
+ elif isinstance(mig_data, dict) and mig_data.get('status') == 'running':
+ status = 'running'
+ else:
+ status = 'unknown'
+
+ migrations_list.append({
+ 'id': mig_id,
+ 'status': status,
+ 'started_at': mig_data.get('started_at') if isinstance(mig_data, dict) else None,
+ 'source_bucket': mig_data.get('source_bucket') if isinstance(mig_data, dict) else None,
+ 'dest_bucket': mig_data.get('dest_bucket') if isinstance(mig_data, dict) else None,
+ 'data': mig_data
+ })
+
+ # Sort by started_at descending
+ migrations_list.sort(key=lambda x: x.get('started_at', 0), reverse=True)
+ return migrations_list
+ def list_migrations(self):
+ """List all migrations"""
+ with self._lock:
+ migrations_list = []
+ for mig_id, mig_data in self.migrations.items():
+ if isinstance(mig_data, dict) and 'success' in mig_data:
+ status = 'completed' if mig_data['success'] else 'failed'
+ elif isinstance(mig_data, dict) and mig_data.get('status') == 'running':
+ status = 'running'
+ else:
+ status = 'unknown'
+
+ migrations_list.append({
+ 'id': mig_id,
+ 'status': status,
+ 'started_at': mig_data.get('started_at') if isinstance(mig_data, dict) else None,
+ 'source_bucket': mig_data.get('source_bucket') if isinstance(mig_data, dict) else None,
+ 'dest_bucket': mig_data.get('dest_bucket') if isinstance(mig_data, dict) else None,
+ 'data': mig_data
+ })
+
+ # Sort by started_at descending
+ migrations_list.sort(key=lambda x: x.get('started_at', 0), reverse=True)
+ return migrations_list
+ def cancel_migration(self, migration_id):
+ """Cancel a running migration"""
+ with self._lock:
+ if migration_id in self.migrations:
+ mig_data = self.migrations[migration_id]
+ if isinstance(mig_data, dict) and mig_data.get('status') == 'running':
+ mig_data['status'] = 'cancelled'
+ mig_data['cancelled_at'] = time.time()
+ return {'success': True, 'message': 'Migration cancelled'}
+ return {'success': False, 'error': 'Migration not found or not running'}
+
+ # ==================== Helper Functions ====================
+
+ def _format_size(self, size_bytes):
+ """Format file size in human-readable format"""
+ if size_bytes == 0:
+ return '0 B'
+
+ size_names = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
+ i = 0
+ while size_bytes >= 1024 and i < len(size_names) - 1:
+ size_bytes /= 1024.0
+ i += 1
+
+ return f"{size_bytes:.2f} {size_names[i]}"
+
+ def parse_s3_uri(self, s3_uri):
+ """Parse S3 URI into bucket and key"""
+ try:
+ if not s3_uri.startswith('s3://'):
+ s3_uri = 's3://' + s3_uri
+
+ parsed = urlparse(s3_uri)
+ bucket = parsed.netloc
+ key = parsed.path.lstrip('/') if parsed.path else ''
+
+ return {
+ 'bucket': bucket,
+ 'key': key,
+ 'uri': s3_uri
+ }
+ except Exception as e:
+ return None
+
+ def generate_presigned_url(self, bucket, key, expiration=3600, is_source=True,
+ access_key_id=None, secret_access_key=None,
+ region='us-east-1', endpoint_url=None,
+ session_token=None):
+ """Generate presigned URL for temporary access"""
+ try:
+ if is_source:
+ client = self.get_source_client(access_key_id, secret_access_key, region, endpoint_url, session_token)
+ else:
+ client = self.get_destination_client(access_key_id, secret_access_key, region, endpoint_url, session_token)
+
+ if not client:
+ return {'success': False, 'error': 'Failed to create S3 client'}
+
+ url = client.generate_presigned_url(
+ 'get_object',
+ Params={'Bucket': bucket, 'Key': key},
+ ExpiresIn=expiration
+ )
+
+ return {
+ 'success': True,
+ 'url': url,
+ 'expires_in': expiration,
+ 'bucket': bucket,
+ 'key': key
+ }
+
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+
+# ============================================================================
+# Part 3: PostgreSQL to S3 Migrator (PostgresToS3Migrator)
+# ============================================================================
+
+class PostgresToS3Migrator:
+ """PostgreSQL to S3 migration engine"""
+
+ def __init__(self):
+ self.migrations = {}
+ self._lock = threading.Lock()
+ self.s3_clients = {}
+
+ def parse_postgres_uri(self, uri):
+ """Parse PostgreSQL URI and extract connection parameters"""
+ try:
+ if not uri.startswith(('postgresql://', 'postgres://')):
+ uri = 'postgresql://' + uri
+
+ parsed = urlparse(uri)
+
+ return {
+ 'host': parsed.hostname,
+ 'port': parsed.port or 5432,
+ 'user': parsed.username or '',
+ 'password': parsed.password or '',
+ 'database': parsed.path.lstrip('/') if parsed.path else 'postgres',
+ 'uri': uri
+ }
+ except Exception as e:
+ return None
+
+ def parse_uri_to_env(self, uri, prefix=''):
+ """Parse URI to environment variables format"""
+ parsed = self.parse_postgres_uri(uri)
+ if not parsed:
+ return None
+
+ env_vars = {
+ f'{prefix}DB_HOST': parsed['host'],
+ f'{prefix}DB_PORT': str(parsed['port']),
+ f'{prefix}DB_USER': parsed['user'],
+ f'{prefix}DB_PASSWORD': parsed['password'],
+ f'{prefix}DB_NAME': parsed['database'],
+ f'{prefix}DB_URI': parsed['uri']
+ }
+
+ # Create connection string variants
+ env_vars[f'{prefix}DB_CONNECTION_STRING'] = f"host={parsed['host']} port={parsed['port']} dbname={parsed['database']} user={parsed['user']} password={parsed['password']}"
+ env_vars[f'{prefix}DB_URL'] = parsed['uri']
+
+ return env_vars
+ def get_live_migration_progress(self, migration_id):
+ """الحصول على التقدم المباشر للترحيل من PostgreSQL إلى S3"""
+ with self._lock:
+ if migration_id not in self.migrations:
+ return {'success': False, 'error': 'Migration not found'}
+
+ mig_data = self.migrations[migration_id]
+
+ if mig_data.get('status') != 'running':
+ return self._get_completed_migration_stats(migration_id)
+
+ if 'progress_tracker' in mig_data:
+ return self._get_live_tracker_progress(migration_id)
+
+ return self._get_estimated_progress(migration_id)
+
+ def _get_live_tracker_progress(self, migration_id):
+ """الحصول على التقدم من المتتبع المباشر"""
+ mig_data = self.migrations[migration_id]
+ tracker = mig_data['progress_tracker']
+
+ current_time = time.time()
+ elapsed_time = current_time - tracker['start_time']
+
+ # حساب النسب المئوية
+ tables_percentage = (tracker['processed_tables'] / tracker['total_tables'] * 100) if tracker['total_tables'] > 0 else 0
+ size_percentage = (tracker['processed_size'] / tracker['total_size'] * 100) if tracker['total_size'] > 0 else 0
+ rows_percentage = (tracker['processed_rows'] / tracker['total_rows'] * 100) if tracker['total_rows'] > 0 else 0
+
+ # حساب السرعة الحالية
+ recent_samples = [s for s in tracker.get('speed_samples', []) if current_time - s['time'] < 10]
+ if recent_samples:
+ recent_size = sum(s['size'] for s in recent_samples)
+ recent_time = recent_samples[-1]['time'] - recent_samples[0]['time'] if len(recent_samples) > 1 else 1
+ current_speed = recent_size / recent_time if recent_time > 0 else 0
+ else:
+ current_speed = tracker['processed_size'] / elapsed_time if elapsed_time > 0 else 0
+
+ # ETA بناءً على السرعة الحالية
+ remaining_size = tracker['total_size'] - tracker['processed_size'] - tracker.get('failed_size', 0)
+ eta_seconds = remaining_size / current_speed if current_speed > 0 else 0
+
+ # تفاصيل الجدول الحالي
+ current_table_info = None
+ if tracker.get('current_table'):
+ ct = tracker['current_table']
+ current_table_info = {
+ 'name': ct['name'],
+ 'size': ct.get('size', 0),
+ 'size_formatted': self._format_size(ct.get('size', 0)),
+ 'rows': ct.get('rows', 0),
+ 'rows_processed': ct.get('rows_processed', 0),
+ 'rows_percentage': round((ct.get('rows_processed', 0) / max(ct.get('rows', 1), 1)) * 100, 2),
+ 'stage': ct.get('stage', 'querying'), # querying, compressing, uploading
+ 'progress': ct.get('progress', 0),
+ 'elapsed': current_time - ct.get('start_time', current_time),
+ 'elapsed_formatted': self._format_time(current_time - ct.get('start_time', current_time))
+ }
+
+ # تجميع النتائج
+ return {
+ 'success': True,
+ 'migration_id': migration_id,
+ 'status': 'running',
+ 'type': 'live',
+ 'timestamp': current_time,
+
+ 'total': {
+ 'tables': tracker['total_tables'],
+ 'size': tracker['total_size'],
+ 'size_formatted': self._format_size(tracker['total_size']),
+ 'rows': tracker.get('total_rows', 0)
+ },
+
+ 'processed': {
+ 'tables': tracker['processed_tables'],
+ 'size': tracker['processed_size'],
+ 'size_formatted': self._format_size(tracker['processed_size']),
+ 'rows': tracker.get('processed_rows', 0)
+ },
+
+ 'failed': {
+ 'tables': tracker.get('failed_tables', 0),
+ 'size': tracker.get('failed_size', 0),
+ 'size_formatted': self._format_size(tracker.get('failed_size', 0)),
+ 'rows': tracker.get('failed_rows', 0)
+ },
+
+ 'remaining': {
+ 'tables': tracker['total_tables'] - tracker['processed_tables'] - tracker.get('failed_tables', 0),
+ 'size': tracker['total_size'] - tracker['processed_size'] - tracker.get('failed_size', 0),
+ 'size_formatted': self._format_size(tracker['total_size'] - tracker['processed_size'] - tracker.get('failed_size', 0))
+ },
+
+ 'percentages': {
+ 'tables': round(tables_percentage, 2),
+ 'size': round(size_percentage, 2),
+ 'rows': round(rows_percentage, 2) if tracker.get('total_rows', 0) > 0 else 0
+ },
+
+ 'time': {
+ 'elapsed': elapsed_time,
+ 'elapsed_formatted': self._format_time(elapsed_time),
+ 'eta': eta_seconds,
+ 'eta_formatted': self._format_time(eta_seconds)
+ },
+
+ 'speed': {
+ 'current': current_speed,
+ 'current_formatted': f"{self._format_size(current_speed)}/s",
+ 'average': tracker['processed_size'] / elapsed_time if elapsed_time > 0 else 0,
+ 'average_formatted': f"{self._format_size(tracker['processed_size'] / elapsed_time if elapsed_time > 0 else 0)}/s",
+ 'peak': max((s['size'] for s in tracker.get('speed_samples', [])), default=0),
+ 'peak_formatted': self._format_size(max((s['size'] for s in tracker.get('speed_samples', [])), default=0))
+ },
+
+ 'current_table': current_table_info,
+
+ 'recent_exports': tracker.get('tables_details', [])[-5:],
+
+ 's3_info': {
+ 'bucket': mig_data.get('s3_bucket'),
+ 'prefix': mig_data.get('s3_prefix'),
+ 'region': mig_data.get('region', 'us-east-1'),
+ 'endpoint': mig_data.get('endpoint_url')
+ },
+
+ 'format': {
+ 'type': tracker.get('format', 'csv'),
+ 'compressed': tracker.get('compress', True)
+ },
+
+ 'progress_bars': {
+ 'tables': self._format_progress_bar(tables_percentage),
+ 'size': self._format_progress_bar(size_percentage),
+ 'rows': self._format_progress_bar(rows_percentage) if tracker.get('total_rows', 0) > 0 else None
+ }
+ }
+
+ def _get_estimated_progress(self, migration_id):
+ """تقدير التقدم للترحيل الجاري"""
+ mig_data = self.migrations[migration_id]
+ elapsed_time = time.time() - mig_data['started_at']
+
+ # تقدير عدد وحجم الجداول
+ estimated_total_tables = mig_data.get('estimated_tables', 10)
+ estimated_total_size = mig_data.get('estimated_size', 100 * 1024 * 1024)
+
+ # تقدير التقدم (افترض 5MB/s كسرعة متوسطة)
+ estimated_duration = estimated_total_size / (5 * 1024 * 1024)
+ progress_percentage = min((elapsed_time / estimated_duration) * 100, 99) if estimated_duration > 0 else 0
+
+ tables_exported = int(estimated_total_tables * progress_percentage / 100)
+ size_exported = int(estimated_total_size * progress_percentage / 100)
+
+ return {
+ 'success': True,
+ 'migration_id': migration_id,
+ 'status': 'running',
+ 'type': 'estimated',
+ 'timestamp': time.time(),
+ 'estimated': True,
+
+ 'total': {
+ 'tables': estimated_total_tables,
+ 'size': estimated_total_size,
+ 'size_formatted': self._format_size(estimated_total_size)
+ },
+
+ 'processed': {
+ 'tables': tables_exported,
+ 'size': size_exported,
+ 'size_formatted': self._format_size(size_exported)
+ },
+
+ 'percentages': {
+ 'tables': round(progress_percentage, 2),
+ 'size': round(progress_percentage, 2)
+ },
+
+ 'time': {
+ 'elapsed': elapsed_time,
+ 'elapsed_formatted': self._format_time(elapsed_time)
+ },
+
+ 'progress_bars': {
+ 'main': self._format_progress_bar(progress_percentage)
+ },
+
+ 'note': 'تقدير تقريبي - يتم جمع معلومات دقيقة عن الجداول...'
+ }
+
+ def _get_completed_migration_stats(self, migration_id):
+ """الحصول على إحصائيات الترحيل المكتمل"""
+ mig_data = self.migrations[migration_id]
+
+ stats = mig_data.get('stats', {})
+ successful_exports = mig_data.get('successful_exports', [])
+ failed_exports = mig_data.get('failed_exports', [])
+
+ total_tables = stats.get('total_tables', 0)
+ successful = stats.get('successful_exports', 0)
+ failed = stats.get('failed_exports', 0)
+ total_size = stats.get('total_size', 0)
+
+ execution_time = mig_data.get('execution_time', time.time() - mig_data.get('started_at', time.time()))
+
+ return {
+ 'success': True,
+ 'migration_id': migration_id,
+ 'status': 'completed' if mig_data.get('success') else 'failed',
+ 'type': 'completed',
+ 'timestamp': time.time(),
+
+ 'total': {
+ 'tables': total_tables,
+ 'size': total_size,
+ 'size_formatted': self._format_size(total_size)
+ },
+
+ 'processed': {
+ 'tables': successful,
+ 'size': total_size,
+ 'size_formatted': self._format_size(total_size)
+ },
+
+ 'failed': {
+ 'tables': failed,
+ 'size': 0,
+ 'size_formatted': '0 B'
+ },
+
+ 'percentages': {
+ 'tables': 100.0 if successful == total_tables else round((successful / total_tables * 100) if total_tables > 0 else 0, 2),
+ 'size': 100.0
+ },
+
+ 'time': {
+ 'total': execution_time,
+ 'total_formatted': self._format_time(execution_time),
+ 'average_speed': total_size / execution_time if execution_time > 0 else 0,
+ 'average_speed_formatted': f"{self._format_size(total_size / execution_time if execution_time > 0 else 0)}/s"
+ },
+
+ 'successful_exports': successful_exports[-10:],
+ 'failed_exports': failed_exports,
+
+ 's3_config': mig_data.get('s3_config', {})
+ }
+
+ def start_migration_with_progress(self, migration_id, postgres_uri, s3_bucket, s3_prefix='',
+ schemas=None, tables=None, compress=True, format='csv',
+ access_key_id=None, secret_access_key=None,
+ region='us-east-1', endpoint_url=None):
+ """بدء الترحيل مع تتبع التقدم المباشر"""
+
+ # تهيئة متتبع التقدم
+ self._init_progress_tracker(migration_id, postgres_uri, schemas, tables,
+ compress, format, region, endpoint_url)
+
+ def migration_task():
+ try:
+ result = self._run_migration_with_progress(
+ migration_id, postgres_uri, s3_bucket, s3_prefix,
+ schemas, tables, compress, format,
+ access_key_id, secret_access_key, region, endpoint_url
+ )
+ with self._lock:
+ self.migrations[migration_id].update(result)
+ self.migrations[migration_id]['status'] = 'completed' if result['success'] else 'failed'
+ self.migrations[migration_id]['completed_at'] = time.time()
+ except Exception as e:
+ with self._lock:
+ self.migrations[migration_id]['status'] = 'failed'
+ self.migrations[migration_id]['error'] = str(e)
+
+ thread = threading.Thread(target=migration_task, daemon=True)
+ thread.start()
+
+ return migration_id
+
+ def _init_progress_tracker(self, migration_id, postgres_uri, schemas, tables, compress, format, region, endpoint_url):
+ """تهيئة متتبع التقدم بالبيانات الفعلية"""
+
+ # الحصول على قائمة الجداول وأحجامها
+ tables_list = []
+ total_size = 0
+ total_rows = 0
+
+ if tables:
+ for table in tables:
+ if '.' in table:
+ schema, name = table.split('.', 1)
+ else:
+ schema = 'public'
+ name = table
+
+ # تقدير حجم الجدول (صعب مع PostgreSQL)
+ size = 10 * 1024 * 1024 # تقدير افتراضي 10MB
+ rows = self._get_table_row_count(postgres_uri, schema, name) or 0
+
+ tables_list.append({
+ 'schema': schema,
+ 'name': name,
+ 'size': size,
+ 'rows': rows,
+ 'status': 'pending'
+ })
+ total_size += size
+ total_rows += rows
+
+ elif schemas:
+ for schema in schemas:
+ result = self.get_tables(postgres_uri, schema)
+ if result['success']:
+ for table_info in result['tables']:
+ rows = self._get_table_row_count(postgres_uri, schema, table_info['name']) or 0
+ tables_list.append({
+ 'schema': schema,
+ 'name': table_info['name'],
+ 'size': 10 * 1024 * 1024, # تقدير افتراضي
+ 'rows': rows,
+ 'status': 'pending'
+ })
+ total_size += 10 * 1024 * 1024
+ total_rows += rows
+
+ else:
+ result = self.get_tables(postgres_uri)
+ if result['success']:
+ for table_info in result['tables']:
+ rows = self._get_table_row_count(postgres_uri, table_info['schema'], table_info['name']) or 0
+ tables_list.append({
+ 'schema': table_info['schema'],
+ 'name': table_info['name'],
+ 'size': 10 * 1024 * 1024, # تقدير افتراضي
+ 'rows': rows,
+ 'status': 'pending'
+ })
+ total_size += 10 * 1024 * 1024
+ total_rows += rows
+
+ with self._lock:
+ self.migrations[migration_id] = {
+ 'status': 'running',
+ 'started_at': time.time(),
+ 'postgres_uri': postgres_uri,
+ 's3_bucket': s3_bucket,
+ 's3_prefix': s3_prefix,
+ 'region': region,
+ 'endpoint_url': endpoint_url,
+ 'estimated_tables': len(tables_list),
+ 'estimated_size': total_size,
+ 'progress_tracker': {
+ 'total_tables': len(tables_list),
+ 'total_size': total_size,
+ 'total_rows': total_rows,
+ 'processed_tables': 0,
+ 'processed_size': 0,
+ 'processed_rows': 0,
+ 'failed_tables': 0,
+ 'failed_size': 0,
+ 'failed_rows': 0,
+ 'current_table': None,
+ 'start_time': time.time(),
+ 'last_update': time.time(),
+ 'tables_list': tables_list,
+ 'tables_details': [],
+ 'speed_samples': [],
+ 'format': format,
+ 'compress': compress
+ }
+ }
+
+ def _run_migration_with_progress(self, migration_id, postgres_uri, s3_bucket, s3_prefix,
+ schemas, tables, compress, format,
+ access_key_id, secret_access_key, region, endpoint_url):
+ """تشغيل الترحيل مع تتبع التقدم"""
+
+ successful_exports = []
+ failed_exports = []
+
+ with self._lock:
+ tracker = self.migrations[migration_id]['progress_tracker']
+ tables_list = tracker['tables_list']
+
+ for i, table_info in enumerate(tables_list, 1):
+ table_name = f"{table_info['schema']}.{table_info['name']}"
+
+ # تحديث معلومات الجدول الحالي
+ current_table = {
+ 'name': table_name,
+ 'size': table_info['size'],
+ 'rows': table_info['rows'],
+ 'rows_processed': 0,
+ 'stage': 'querying',
+ 'progress': 0,
+ 'start_time': time.time()
+ }
+
+ with self._lock:
+ self.migrations[migration_id]['progress_tracker']['current_table'] = current_table
+
+ print(f"[{migration_id}] 📊 Exporting table {i}/{len(tables_list)}: {table_name}")
+
+ # تصدير الجدول مع تتبع التقدم
+ result = self._export_table_with_progress(
+ migration_id, postgres_uri,
+ table_info['schema'], table_info['name'],
+ s3_bucket, s3_prefix, compress, format,
+ access_key_id, secret_access_key, region, endpoint_url,
+ table_info['rows']
+ )
+
+ if result['success']:
+ # تحديث الإحصائيات
+ with self._lock:
+ tracker = self.migrations[migration_id]['progress_tracker']
+ tracker['processed_tables'] += 1
+ tracker['processed_size'] += result['file_size']
+ tracker['processed_rows'] += result.get('rows_exported', 0)
+
+ # إضافة تفاصيل التصدير
+ tracker['tables_details'].append({
+ 'name': table_name,
+ 'size': result['file_size'],
+ 'size_formatted': self._format_size(result['file_size']),
+ 'rows': result.get('rows_exported', 0),
+ 'query_time': result.get('query_time', 0),
+ 'upload_time': result.get('upload_time', 0),
+ 'total_time': result.get('query_time', 0) + result.get('upload_time', 0),
+ 'speed': result['file_size'] / (result.get('upload_time', 1) or 1),
+ 'compressed': result.get('compressed', False),
+ 'format': format,
+ 'timestamp': time.time()
+ })
+
+ # إضافة عينة سرعة
+ tracker['speed_samples'].append({
+ 'time': time.time(),
+ 'size': result['file_size'],
+ 'tables': 1
+ })
+
+ successful_exports.append({
+ 'table': table_name,
+ 's3_key': result['s3_key'],
+ 'size': result['file_size'],
+ 's3_url': result['s3_url']
+ })
+
+ print(f"[{migration_id}] ✅ Exported {table_name} - {self._format_size(result['file_size'])}")
+
+ else:
+ # تحديث الإحصائيات الفاشلة
+ with self._lock:
+ tracker = self.migrations[migration_id]['progress_tracker']
+ tracker['failed_tables'] += 1
+ tracker['failed_size'] += table_info['size']
+
+ failed_exports.append({
+ 'table': table_name,
+ 'error': result.get('error', 'Unknown error')
+ })
+
+ print(f"[{migration_id}] ❌ Failed {table_name}: {result.get('error')}")
+
+ # حساب الإحصائيات النهائية
+ total_size = sum(e['size'] for e in successful_exports)
+
+ return {
+ 'success': len(failed_exports) == 0,
+ 'migration_id': migration_id,
+ 'execution_time': time.time() - self.migrations[migration_id]['started_at'],
+ 'stats': {
+ 'total_tables': len(tables_list),
+ 'successful_exports': len(successful_exports),
+ 'failed_exports': len(failed_exports),
+ 'total_size': total_size
+ },
+ 'successful_exports': successful_exports,
+ 'failed_exports': failed_exports,
+ 's3_config': {
+ 'bucket': s3_bucket,
+ 'prefix': s3_prefix,
+ 'region': region,
+ 'endpoint': endpoint_url
+ }
+ }
+
+ def _export_table_with_progress(self, migration_id, postgres_uri, schema, table,
+ s3_bucket, s3_prefix, compress, format,
+ access_key_id, secret_access_key, region, endpoint_url,
+ estimated_rows=0):
+ """تصدير جدول مع تتبع التقدم"""
+
+ try:
+ start_time = time.time()
+
+ # تحديث المرحلة: الاستعلام
+ self._update_table_stage(migration_id, 'querying', 10)
+
+ # تصدير الجدول
+ result = self.export_table_to_s3(
+ postgres_uri=postgres_uri,
+ schema=schema,
+ table=table,
+ s3_bucket=s3_bucket,
+ s3_key=f"{s3_prefix}{schema}_{table}_{int(time.time())}.{format}{'.gz' if compress else ''}",
+ compress=compress,
+ format=format,
+ access_key_id=access_key_id,
+ secret_access_key=secret_access_key,
+ region=region,
+ endpoint_url=endpoint_url
+ )
+
+ if result['success']:
+ result['rows_exported'] = estimated_rows
+ return result
+ else:
+ return result
+
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ def _update_table_stage(self, migration_id, stage, progress):
+ """تحديث مرحلة تصدير الجدول الحالي"""
+ with self._lock:
+ if migration_id in self.migrations:
+ tracker = self.migrations[migration_id].get('progress_tracker')
+ if tracker and tracker.get('current_table'):
+ tracker['current_table']['stage'] = stage
+ tracker['current_table']['progress'] = progress
+ tracker['last_update'] = time.time()
+
+ def _get_table_row_count(self, uri, schema, table):
+ """الحصول على عدد صفوف الجدول"""
+ try:
+ parsed = self.parse_postgres_uri(uri)
+ if not parsed:
+ return None
+
+ env = os.environ.copy()
+ env['PGPASSWORD'] = parsed['password']
+
+ sql = f'SELECT COUNT(*) FROM "{schema}"."{table}";'
+
+ cmd = [
+ 'psql',
+ '-h', parsed['host'],
+ '-p', str(parsed['port']),
+ '-U', parsed['user'],
+ '-d', parsed['database'],
+ '-t',
+ '-c', sql
+ ]
+
+ result = subprocess.run(cmd, env=env, capture_output=True, text=True, timeout=30)
+
+ if result.returncode == 0:
+ count_line = result.stdout.strip()
+ if count_line and count_line.isdigit():
+ return int(count_line)
+
+ return None
+
+ except Exception:
+ return None
+
+ def format_live_status(self, migration_id):
+ """تنسيق الحالة المباشرة كنص مقروء"""
+ progress = self.get_live_migration_progress(migration_id)
+
+ if not progress.get('success', True):
+ return f"❌ Error: {progress.get('error', 'Unknown error')}"
+
+ lines = []
+ lines.append("=" * 70)
+ lines.append(f"🚀 PostgreSQL to S3 Migration {migration_id} - {progress['status'].upper()}")
+ lines.append("=" * 70)
+
+ if progress.get('type') == 'estimated':
+ lines.append("⚠️ تقدير تقريبي - يتم جمع معلومات دقيقة...")
+
+ # معلومات S3
+ if 's3_info' in progress:
+ s3 = progress['s3_info']
+ lines.append(f"\n📦 S3 Destination: s3://{s3['bucket']}/{s3['prefix']}")
+ lines.append(f" Region: {s3['region']}")
+
+ # شريط التقدم الرئيسي
+ if 'progress_bars' in progress:
+ if 'size' in progress['progress_bars']:
+ lines.append(f"\n📊 Progress: {progress['progress_bars']['size']}")
+
+ # إحصائيات الجداول
+ lines.append(f"\n📋 Tables:")
+ lines.append(f" Total: {progress['total']['tables']} tables")
+ lines.append(f" Exported: {progress['processed']['tables']} tables ({progress['percentages']['tables']}%)")
+ if progress.get('failed', {}).get('tables', 0) > 0:
+ lines.append(f" ❌ Failed: {progress['failed']['tables']} tables")
+
+ # إحصائيات الحجم
+ lines.append(f"\n💾 Size:")
+ lines.append(f" Total: {progress['total']['size_formatted']}")
+ lines.append(f" Exported: {progress['processed']['size_formatted']} ({progress['percentages']['size']}%)")
+ if progress.get('remaining', {}).get('size', 0) > 0:
+ lines.append(f" Remaining: {progress['remaining']['size_formatted']}")
+
+ # معلومات الصفوف
+ if progress['total'].get('rows', 0) > 0:
+ lines.append(f"\n📊 Rows:")
+ lines.append(f" Total: {progress['total']['rows']:,} rows")
+ lines.append(f" Exported: {progress['processed']['rows']:,} rows ({progress['percentages']['rows']}%)")
+
+ # معلومات السرعة والوقت
+ lines.append(f"\n⏱️ Time:")
+ lines.append(f" Elapsed: {progress['time']['elapsed_formatted']}")
+ if 'eta' in progress['time']:
+ lines.append(f" ETA: {progress['time']['eta_formatted']}")
+
+ lines.append(f"\n⚡ Speed:")
+ lines.append(f" Current: {progress['speed']['current_formatted']}")
+ lines.append(f" Average: {progress['speed']['average_formatted']}")
+
+ # الجدول الحالي
+ if progress.get('current_table'):
+ ct = progress['current_table']
+ lines.append(f"\n📄 Current Table: {ct['name']}")
+ lines.append(f" Size: {ct['size_formatted']}")
+ lines.append(f" Stage: {ct['stage'].upper()}")
+ if ct.get('rows', 0) > 0:
+ lines.append(f" Rows: {ct['rows_processed']}/{ct['rows']} ({ct['rows_percentage']}%)")
+ lines.append(f" Elapsed: {ct['elapsed_formatted']}")
+
+ # آخر التصديرات الناجحة
+ if progress.get('recent_exports'):
+ lines.append(f"\n✅ Recent Exports:")
+ for exp in progress['recent_exports'][-3:]:
+ lines.append(f" • {exp['name']} - {exp['size_formatted']}")
+
+ lines.append("\n" + "=" * 70)
+
+ return '\n'.join(lines)
+
+ def _format_size(self, size_bytes):
+ """تنسيق حجم الملف"""
+ if size_bytes == 0:
+ return '0 B'
+
+ size_names = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
+ i = 0
+ while size_bytes >= 1024 and i < len(size_names) - 1:
+ size_bytes /= 1024.0
+ i += 1
+
+ return f"{size_bytes:.2f} {size_names[i]}"
+
+ def _format_time(self, seconds):
+ """تنسيق الوقت"""
+ if seconds < 60:
+ return f"{seconds:.0f}s"
+ elif seconds < 3600:
+ minutes = seconds / 60
+ return f"{minutes:.1f}m"
+ elif seconds < 86400:
+ hours = seconds / 3600
+ return f"{hours:.1f}h"
+ else:
+ days = seconds / 86400
+ return f"{days:.1f}d"
+
+ def _format_progress_bar(self, percentage, width=30):
+ """تنسيق شريط التقدم كنص"""
+ filled = int(width * percentage / 100)
+ empty = width - filled
+ bar = '█' * filled + '░' * empty
+ return f"[{bar}] {percentage:.1f}%"
+ def get_s3_client(self, access_key_id, secret_access_key, region='us-east-1', endpoint_url=None):
+ """Get or create S3 client with given credentials"""
+ cache_key = f"{access_key_id}:{secret_access_key}:{region}:{endpoint_url}"
+
+ if cache_key in self.s3_clients:
+ return self.s3_clients[cache_key]
+
+ try:
+ # Create S3 client configuration
+ s3_config = {
+ 'aws_access_key_id': access_key_id,
+ 'aws_secret_access_key': secret_access_key,
+ 'region_name': region
+ }
+
+ # Add endpoint if provided (for S3-compatible services like MinIO)
+ if endpoint_url:
+ s3_config['endpoint_url'] = endpoint_url
+
+ # Create S3 client
+ client = boto3.client('s3', **s3_config)
+
+ # Cache the client
+ self.s3_clients[cache_key] = client
+ return client
+
+ except Exception as e:
+ print(f"❌ Failed to create S3 client: {str(e)}")
+ return None
+
+ def test_s3_connection(self, access_key_id, secret_access_key, region='us-east-1', endpoint_url=None):
+ """Test S3 connection with the four parameters"""
+ # التحقق من بيانات الاعتماد
+ if not access_key_id or not secret_access_key:
+ print(f"❌ AWS credentials are required")
+ print(f" 🔸 Access Key ID: {'Missing' if not access_key_id else 'Provided'}")
+ print(f" 🔸 Secret Key: {'Missing' if not secret_access_key else 'Provided'}")
+ return {
+ 'success': False,
+ 'error': 'AWS credentials are required',
+ 'details': {
+ 'access_key_id_provided': bool(access_key_id),
+ 'secret_key_provided': bool(secret_access_key)
+ }
+ }
+
+ print(f"🔍 Testing S3 connection...")
+ print(f" 🔸 Access Key ID: {access_key_id[:10]}...")
+ print(f" 🔸 Secret Key: {secret_access_key[:10]}...")
+ print(f" 🔸 Region: {region}")
+ print(f" 🔸 Endpoint URL: {endpoint_url or 'AWS S3 (default)'}")
+
+ try:
+ # Get S3 client
+ s3_client = self.get_s3_client(access_key_id, secret_access_key, region, endpoint_url)
+ if not s3_client:
+ return {
+ 'success': False,
+ 'error': 'Failed to create S3 client'
+ }
+
+ # Test connection by listing buckets
+ start_time = time.time()
+ response = s3_client.list_buckets()
+ end_time = time.time()
+
+ response_time = end_time - start_time
+
+ buckets = [bucket['Name'] for bucket in response.get('Buckets', [])]
+
+ print(f"✅ S3 Connection Successful!")
+ print(f" 🔸 Response time: {response_time:.2f} seconds")
+ print(f" 🔸 Available buckets: {len(buckets)}")
+
+ if buckets:
+ print(f" 🔸 Buckets: {', '.join(buckets[:5])}{'...' if len(buckets) > 5 else ''}")
+
+ return {
+ 'success': True,
+ 'message': 'S3 connection successful',
+ 'buckets': buckets,
+ 'bucket_count': len(buckets),
+ 'response_time': response_time,
+ 'connection_details': {
+ 'region': region,
+ 'endpoint': endpoint_url or 'AWS S3 (default)',
+ 'has_credentials': bool(access_key_id and secret_access_key)
+ }
+ }
+
+ except NoCredentialsError:
+ error_msg = "No credentials provided or credentials are invalid"
+ print(f"❌ {error_msg}")
+ return {
+ 'success': False,
+ 'error': error_msg
+ }
+
+ except EndpointConnectionError as e:
+ error_msg = f"Cannot connect to endpoint: {endpoint_url}"
+ print(f"❌ {error_msg}")
+ return {
+ 'success': False,
+ 'error': error_msg,
+ 'details': str(e)
+ }
+
+ except ClientError as e:
+ error_code = e.response['Error']['Code']
+ error_msg = e.response['Error']['Message']
+ print(f"❌ AWS Error: {error_code} - {error_msg}")
+ return {
+ 'success': False,
+ 'error': f"{error_code}: {error_msg}",
+ 'aws_error': error_code
+ }
+
+ except Exception as e:
+ error_msg = f"Unexpected error: {str(e)}"
+ print(f"❌ {error_msg}")
+ return {
+ 'success': False,
+ 'error': error_msg
+ }
+
+ def list_s3_buckets(self, access_key_id, secret_access_key, region='us-east-1', endpoint_url=None):
+ """List all S3 buckets with details"""
+ try:
+ s3_client = self.get_s3_client(access_key_id, secret_access_key, region, endpoint_url)
+ if not s3_client:
+ return {
+ 'success': False,
+ 'error': 'Failed to create S3 client'
+ }
+
+ response = s3_client.list_buckets()
+ buckets_info = []
+
+ for bucket in response.get('Buckets', []):
+ bucket_name = bucket['Name']
+ creation_date = bucket['CreationDate']
+
+ # Get bucket location (region)
+ try:
+ location = s3_client.get_bucket_location(Bucket=bucket_name)
+ bucket_region = location.get('LocationConstraint', 'us-east-1')
+ if not bucket_region:
+ bucket_region = 'us-east-1'
+ except:
+ bucket_region = region
+
+ buckets_info.append({
+ 'name': bucket_name,
+ 'creation_date': creation_date.isoformat() if hasattr(creation_date, 'isoformat') else str(creation_date),
+ 'region': bucket_region
+ })
+
+ return {
+ 'success': True,
+ 'buckets': buckets_info,
+ 'count': len(buckets_info)
+ }
+
+ except Exception as e:
+ return {
+ 'success': False,
+ 'error': str(e)
+ }
+
+ def test_postgres_connection(self, uri_input):
+ """Test PostgreSQL connection with detailed error info"""
+
+ if isinstance(uri_input, dict):
+ print(f"⚠️ Received JSON object, extracting URI string...")
+
+ if 'uri' in uri_input:
+ uri = uri_input['uri']
+ print(f"✅ Extracted URI from 'uri' key")
+ elif 'url' in uri_input:
+ uri = uri_input['url']
+ print(f"✅ Extracted URI from 'url' key")
+ else:
+ error_msg = f'JSON must contain "uri" or "url" key. Received keys: {list(uri_input.keys())}'
+ print(f"❌ {error_msg}")
+ return {'success': False, 'error': error_msg}
+ else:
+ uri = uri_input
+
+ if not isinstance(uri, str):
+ error_msg = f'URI must be a string. Got type: {type(uri)}'
+ print(f"❌ {error_msg}")
+ return {'success': False, 'error': error_msg}
+
+ print(f"🔍 Testing PostgreSQL connection to: {uri}")
+ parsed = self.parse_postgres_uri(uri)
+
+ if not parsed:
+ error_msg = f'Invalid URI format: {uri}'
+ print(f"❌ {error_msg}")
+ return {'success': False, 'error': error_msg}
+
+ host = parsed['host']
+ port = parsed['port']
+ user = parsed['user']
+ database = parsed['database']
+
+ env = os.environ.copy()
+ env['PGPASSWORD'] = parsed['password']
+
+ cmd = [
+ 'psql',
+ '-h', host,
+ '-p', str(port),
+ '-U', user,
+ '-d', database,
+ '-c', 'SELECT version(); SELECT current_database();',
+ '-t'
+ ]
+
+ try:
+ print(f" 🔗 Attempting connection to host: {host}:{port}")
+ print(f" 👤 User: {user}, Database: {database}")
+
+ result = subprocess.run(cmd, env=env, capture_output=True, text=True, timeout=10)
+
+ if result.returncode == 0:
+ lines = result.stdout.strip().split('\n')
+ version = lines[0] if len(lines) > 0 else ''
+ db_name = lines[1] if len(lines) > 1 else ''
+
+ print(f"✅ Connection successful to {host}:{port}")
+
+ return {
+ 'success': True,
+ 'message': 'Connection successful',
+ 'version': version,
+ 'database': db_name,
+ 'connection': {
+ 'host': host,
+ 'port': port,
+ 'user': user,
+ 'database': database
+ }
+ }
+ else:
+ error_output = result.stderr.strip()
+ print(f"❌ Connection FAILED to host: {host}:{port}")
+ print(f" 🔸 User: {user}")
+ print(f" 🔸 Database: {database}")
+ print(f" 🔸 Error details: {error_output[:200]}")
+
+ return {
+ 'success': False,
+ 'error': error_output,
+ 'connection_details': {
+ 'host': host,
+ 'port': port,
+ 'user': user,
+ 'database': database
+ }
+ }
+
+ except subprocess.TimeoutExpired:
+ print(f"❌ Connection TIMEOUT to host: {host}:{port}")
+ print(f" 🔸 Server not responding after 10 seconds")
+ print(f" 🔸 Please check: firewall, network, server status")
+
+ return {
+ 'success': False,
+ 'error': 'Connection timeout',
+ 'connection_details': {
+ 'host': host,
+ 'port': port,
+ 'user': user,
+ 'database': database
+ }
+ }
+
+ except FileNotFoundError:
+ print(f"❌ PSQL CLIENT NOT FOUND")
+ print(f" 🔸 The 'psql' command-line tool is not installed")
+ print(f" 🔸 Install PostgreSQL client tools and try again")
+
+ return {
+ 'success': False,
+ 'error': 'psql command not found. Install PostgreSQL client.',
+ 'connection_details': {
+ 'host': host,
+ 'port': port,
+ 'user': user,
+ 'database': database
+ }
+ }
+
+ except Exception as e:
+ print(f"❌ UNEXPECTED ERROR connecting to host: {host}:{port}")
+ print(f" 🔸 Error type: {type(e).__name__}")
+ print(f" 🔸 Error message: {str(e)}")
+
+ return {
+ 'success': False,
+ 'error': str(e),
+ 'connection_details': {
+ 'host': host,
+ 'port': port,
+ 'user': user,
+ 'database': database
+ }
+ }
+
+ def get_schemas(self, uri):
+ """Get list of schemas from PostgreSQL database"""
+ parsed = self.parse_postgres_uri(uri)
+ if not parsed:
+ return {'success': False, 'error': 'Invalid URI format'}
+
+ env = os.environ.copy()
+ env['PGPASSWORD'] = parsed['password']
+
+ sql = """
+ SELECT schema_name
+ FROM information_schema.schemata
+ WHERE schema_name NOT IN ('information_schema', 'pg_catalog', 'pg_toast')
+ ORDER BY schema_name;
+ """
+
+ cmd = [
+ 'psql',
+ '-h', parsed['host'],
+ '-p', str(parsed['port']),
+ '-U', parsed['user'],
+ '-d', parsed['database'],
+ '-t',
+ '-c', sql
+ ]
+
+ try:
+ result = subprocess.run(cmd, env=env, capture_output=True, text=True, timeout=10)
+
+ if result.returncode == 0:
+ schemas = [s.strip() for s in result.stdout.splitlines() if s.strip()]
+ return {
+ 'success': True,
+ 'schemas': schemas,
+ 'count': len(schemas)
+ }
+ else:
+ return {'success': False, 'error': result.stderr.strip()}
+
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ def get_tables(self, uri, schema=''):
+ """Get list of tables from PostgreSQL database"""
+ parsed = self.parse_postgres_uri(uri)
+ if not parsed:
+ return {'success': False, 'error': 'Invalid URI format'}
+
+ env = os.environ.copy()
+ env['PGPASSWORD'] = parsed['password']
+
+ if schema:
+ sql = f"""
+ SELECT table_name, table_type
+ FROM information_schema.tables
+ WHERE table_schema = '{schema}'
+ ORDER BY table_name;
+ """
+ else:
+ sql = """
+ SELECT table_schema, table_name, table_type
+ FROM information_schema.tables
+ WHERE table_schema NOT IN ('information_schema', 'pg_catalog', 'pg_toast')
+ ORDER BY table_schema, table_name;
+ """
+
+ cmd = [
+ 'psql',
+ '-h', parsed['host'],
+ '-p', str(parsed['port']),
+ '-U', parsed['user'],
+ '-d', parsed['database'],
+ '-t',
+ '-c', sql
+ ]
+
+ try:
+ result = subprocess.run(cmd, env=env, capture_output=True, text=True, timeout=10)
+
+ if result.returncode == 0:
+ lines = [line.strip() for line in result.stdout.splitlines() if line.strip()]
+ tables = []
+
+ if schema:
+ for line in lines:
+ if '|' in line:
+ table_name, table_type = line.split('|')
+ tables.append({
+ 'name': table_name.strip(),
+ 'type': table_type.strip(),
+ 'schema': schema
+ })
+ else:
+ for line in lines:
+ if '|' in line:
+ parts = line.split('|')
+ if len(parts) >= 3:
+ table_schema, table_name, table_type = parts[:3]
+ tables.append({
+ 'schema': table_schema.strip(),
+ 'name': table_name.strip(),
+ 'type': table_type.strip()
+ })
+
+ return {
+ 'success': True,
+ 'tables': tables,
+ 'count': len(tables)
+ }
+ else:
+ return {'success': False, 'error': result.stderr.strip()}
+
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ def get_table_counts(self, uri, schema=None):
+ """Get row counts for tables"""
+ parsed = self.parse_postgres_uri(uri)
+ if not parsed:
+ return {}
+
+ env = os.environ.copy()
+ env['PGPASSWORD'] = parsed['password']
+
+ if schema:
+ sql = f"""
+ SELECT table_name,
+ (SELECT COUNT(*) FROM "{schema}"."{table_name}") as row_count
+ FROM information_schema.tables
+ WHERE table_schema = '{schema}'
+ ORDER BY table_name;
+ """
+ else:
+ sql = """
+ SELECT table_schema, table_name,
+ (SELECT COUNT(*) FROM information_schema.tables t2
+ WHERE t2.table_schema = t1.table_schema
+ AND t2.table_name = t1.table_name) as row_count
+ FROM information_schema.tables t1
+ WHERE table_schema NOT IN ('information_schema', 'pg_catalog', 'pg_toast')
+ ORDER BY table_schema, table_name;
+ """
+
+ cmd = [
+ 'psql',
+ '-h', parsed['host'],
+ '-p', str(parsed['port']),
+ '-U', parsed['user'],
+ '-d', parsed['database'],
+ '-t',
+ '-c', sql
+ ]
+
+ try:
+ result = subprocess.run(cmd, env=env, capture_output=True, text=True, timeout=30)
+
+ table_counts = {}
+ if result.returncode == 0:
+ for line in result.stdout.splitlines():
+ if line.strip() and '|' in line:
+ parts = line.split('|')
+ if len(parts) >= 3:
+ table_schema, table_name, count = parts[:3]
+ key = f"{table_schema.strip()}.{table_name.strip()}"
+ table_counts[key] = int(count.strip()) if count.strip().isdigit() else 0
+ return table_counts
+
+ except Exception:
+ return {}
+
+ def export_table_to_s3(self, postgres_uri, schema, table, s3_bucket, s3_key,
+ compress=True, format='csv',
+ access_key_id=None, secret_access_key=None,
+ region='us-east-1', endpoint_url=None):
+ """Export a single PostgreSQL table to S3"""
+ logs = []
+
+ def log(msg, level='info'):
+ log_entry = {
+ 'timestamp': time.time(),
+ 'message': msg,
+ 'level': level
+ }
+ logs.append(log_entry)
+ print(f"[Export] {msg}")
+
+ try:
+ log(f"🔧 Starting export of {schema}.{table} to S3", 'info')
+ log(f"PostgreSQL: {postgres_uri}", 'info')
+ log(f"S3 Destination: s3://{s3_bucket}/{s3_key}", 'info')
+ log(f"Format: {format}, Compress: {compress}", 'info')
+
+ # Parse PostgreSQL URI
+ parsed = self.parse_postgres_uri(postgres_uri)
+ if not parsed:
+ return {
+ 'success': False,
+ 'error': 'Invalid PostgreSQL URI',
+ 'logs': logs
+ }
+
+ # Set environment for psql
+ env = os.environ.copy()
+ env['PGPASSWORD'] = parsed['password']
+
+ # Build SQL command based on format
+ if format.lower() == 'csv':
+ sql = f"COPY (SELECT * FROM \"{schema}\".\"{table}\") TO STDOUT WITH CSV HEADER"
+ else: # json
+ sql = f"""
+ SELECT json_agg(row_to_json(t))
+ FROM (SELECT * FROM "{schema}"."{table}") t
+ """
+
+ # Build psql command
+ cmd = [
+ 'psql',
+ '-h', parsed['host'],
+ '-p', str(parsed['port']),
+ '-U', parsed['user'],
+ '-d', parsed['database'],
+ '-t',
+ '-c', sql
+ ]
+
+ # Execute query and capture output
+ log(f"📊 Querying PostgreSQL table...", 'info')
+ start_time = time.time()
+ result = subprocess.run(cmd, env=env, capture_output=True, text=True, timeout=300)
+
+ if result.returncode != 0:
+ error_msg = result.stderr[:500]
+ log(f"❌ Failed to query table: {error_msg}", 'error')
+ return {
+ 'success': False,
+ 'error': f"Query failed: {error_msg}",
+ 'logs': logs
+ }
+
+ query_time = time.time() - start_time
+ log(f"✅ Query completed in {query_time:.2f} seconds", 'success')
+
+ # Get data
+ data = result.stdout
+
+ # Prepare data for upload
+ if format.lower() == 'csv':
+ file_content = data.encode('utf-8')
+ content_type = 'text/csv'
+ file_extension = 'csv'
+ else: # json
+ file_content = json.dumps(json.loads(data), indent=2).encode('utf-8') if data.strip() else b'[]'
+ content_type = 'application/json'
+ file_extension = 'json'
+
+ # Compress if requested
+ if compress:
+ log(f"📦 Compressing data...", 'info')
+ buffer = io.BytesIO()
+ with gzip.GzipFile(fileobj=buffer, mode='wb') as f:
+ f.write(file_content)
+ file_content = buffer.getvalue()
+ file_extension = f"{file_extension}.gz"
+ content_type = 'application/gzip'
+ log(f"✅ Compression complete: {len(data)} → {len(file_content)} bytes", 'success')
+
+ # Upload to S3
+ log(f"⬆️ Uploading to S3...", 'info')
+
+ # Get S3 client
+ s3_client = self.get_s3_client(access_key_id, secret_access_key, region, endpoint_url)
+ if not s3_client:
+ return {
+ 'success': False,
+ 'error': 'Failed to create S3 client',
+ 'logs': logs
+ }
+
+ # Upload file
+ upload_start = time.time()
+ s3_client.put_object(
+ Bucket=s3_bucket,
+ Key=s3_key,
+ Body=file_content,
+ ContentType=content_type,
+ Metadata={
+ 'source_schema': schema,
+ 'source_table': table,
+ 'export_time': datetime.utcnow().isoformat(),
+ 'compressed': str(compress),
+ 'format': format
+ }
+ )
+ upload_time = time.time() - upload_start
+
+ # Generate S3 URL
+ if endpoint_url:
+ s3_url = f"{endpoint_url}/{s3_bucket}/{s3_key}"
+ else:
+ s3_url = f"https://{s3_bucket}.s3.{region}.amazonaws.com/{s3_key}"
+
+ log(f"✅ Upload completed in {upload_time:.2f} seconds", 'success')
+ log(f"🔗 S3 URL: {s3_url}", 'info')
+
+ return {
+ 'success': True,
+ 'message': f"Exported {schema}.{table} to S3 successfully",
+ 's3_url': s3_url,
+ 's3_bucket': s3_bucket,
+ 's3_key': s3_key,
+ 'file_size': len(file_content),
+ 'original_size': len(data) if compress else None,
+ 'compressed': compress,
+ 'format': format,
+ 'upload_time': upload_time,
+ 'query_time': query_time,
+ 'logs': logs
+ }
+
+ except subprocess.TimeoutExpired:
+ log("❌ Query timeout (5 minutes)", 'error')
+ return {
+ 'success': False,
+ 'error': 'Query timeout',
+ 'logs': logs
+ }
+ except Exception as e:
+ log(f"❌ Error: {str(e)}", 'error')
+ return {
+ 'success': False,
+ 'error': str(e),
+ 'logs': logs
+ }
+
+ def migrate_to_s3(self, migration_id, postgres_uri, s3_bucket, s3_prefix='',
+ schemas=None, tables=None, compress=True, format='csv',
+ access_key_id=None, secret_access_key=None,
+ region='us-east-1', endpoint_url=None):
+ """Migrate PostgreSQL data to S3"""
+ logs = []
+
+ def log(msg, level='info'):
+ log_entry = {
+ 'timestamp': time.time(),
+ 'message': msg,
+ 'level': level
+ }
+ logs.append(log_entry)
+ print(f"[{migration_id}] {msg}")
+
+ try:
+ log(f"🔧 Starting PostgreSQL to S3 Migration {migration_id}", 'info')
+ log(f"PostgreSQL: {postgres_uri}", 'info')
+ log(f"S3 Destination: s3://{s3_bucket}/{s3_prefix}", 'info')
+ log(f"Format: {format}, Compress: {compress}", 'info')
+ log(f"S3 Region: {region}, Endpoint: {endpoint_url or 'AWS S3 (default)'}", 'info')
+
+ # Get tables to export
+ tables_to_export = []
+ if tables:
+ tables_to_export = tables
+ log(f"📋 Selected tables: {len(tables_to_export)} tables", 'info')
+ elif schemas:
+ # Get all tables from selected schemas
+ for schema in schemas:
+ result = self.get_tables(postgres_uri, schema)
+ if result['success']:
+ for table_info in result['tables']:
+ tables_to_export.append(f"{table_info['schema']}.{table_info['name']}")
+ log(f"📋 Selected schemas: {', '.join(schemas)} → {len(tables_to_export)} tables", 'info')
+ else:
+ # Get all tables
+ result = self.get_tables(postgres_uri)
+ if not result['success']:
+ return {
+ 'success': False,
+ 'migration_id': migration_id,
+ 'error': f"Failed to get tables: {result['error']}",
+ 'logs': logs
+ }
+ for table_info in result['tables']:
+ tables_to_export.append(f"{table_info['schema']}.{table_info['name']}")
+ log(f"📋 All tables: {len(tables_to_export)} tables", 'info')
+
+ if not tables_to_export:
+ return {
+ 'success': False,
+ 'migration_id': migration_id,
+ 'error': 'No tables to export',
+ 'logs': logs
+ }
+
+ # Export each table
+ successful_exports = []
+ failed_exports = []
+
+ total_tables = len(tables_to_export)
+ log(f"🚀 Starting export of {total_tables} tables...", 'info')
+
+ for i, table_ref in enumerate(tables_to_export, 1):
+ if '.' in table_ref:
+ schema, table = table_ref.split('.', 1)
+ else:
+ schema = 'public'
+ table = table_ref
+
+ # Create S3 key
+ timestamp = datetime.utcnow().strftime('%Y%m%d_%H%M%S')
+ extension = 'csv.gz' if compress else 'csv' if format == 'csv' else 'json'
+ s3_key = f"{s3_prefix}{schema}_{table}_{timestamp}.{extension}" if s3_prefix else f"{schema}_{table}_{timestamp}.{extension}"
+
+ log(f"📊 Exporting table {i}/{total_tables}: {schema}.{table}", 'info')
+
+ # Export table
+ result = self.export_table_to_s3(
+ postgres_uri=postgres_uri,
+ schema=schema,
+ table=table,
+ s3_bucket=s3_bucket,
+ s3_key=s3_key,
+ compress=compress,
+ format=format,
+ access_key_id=access_key_id,
+ secret_access_key=secret_access_key,
+ region=region,
+ endpoint_url=endpoint_url
+ )
+
+ if result['success']:
+ successful_exports.append({
+ 'table': f"{schema}.{table}",
+ 's3_key': s3_key,
+ 'size': result.get('file_size', 0),
+ 's3_url': result.get('s3_url')
+ })
+ log(f"✅ Successfully exported {schema}.{table}", 'success')
+ else:
+ failed_exports.append({
+ 'table': f"{schema}.{table}",
+ 'error': result.get('error', 'Unknown error')
+ })
+ log(f"❌ Failed to export {schema}.{table}: {result.get('error')}", 'error')
+
+ # Calculate statistics
+ total_size = sum(export['size'] for export in successful_exports)
+
+ log(f"📊 Migration Summary:", 'info')
+ log(f" ✅ Successful: {len(successful_exports)} tables", 'success')
+ log(f" ❌ Failed: {len(failed_exports)} tables", 'error' if failed_exports else 'info')
+ log(f" 📦 Total size: {total_size:,} bytes", 'info')
+
+ # Generate S3 configuration
+ s3_config = {
+ 'access_key_id': access_key_id[:10] + '...' if access_key_id else None,
+ 'secret_access_key': '***' if secret_access_key else None,
+ 'region': region,
+ 'endpoint_url': endpoint_url,
+ 'bucket': s3_bucket,
+ 'prefix': s3_prefix
+ }
+
+ return {
+ 'success': True,
+ 'migration_id': migration_id,
+ 'message': f"Migration completed: {len(successful_exports)}/{total_tables} tables exported",
+ 'stats': {
+ 'total_tables': total_tables,
+ 'successful_exports': len(successful_exports),
+ 'failed_exports': len(failed_exports),
+ 'total_size': total_size
+ },
+ 'successful_exports': successful_exports,
+ 'failed_exports': failed_exports,
+ 's3_config': s3_config,
+ 'logs': logs
+ }
+
+ except Exception as e:
+ log(f"❌ Migration failed: {str(e)}", 'error')
+ return {
+ 'success': False,
+ 'migration_id': migration_id,
+ 'error': str(e),
+ 'logs': logs
+ }
+
+ def start_migration_async(self, migration_id, postgres_uri, s3_bucket, s3_prefix='',
+ schemas=None, tables=None, compress=True, format='csv',
+ access_key_id=None, secret_access_key=None,
+ region='us-east-1', endpoint_url=None):
+ """Start migration in background thread"""
+ def migration_task():
+ result = self.migrate_to_s3(
+ migration_id=migration_id,
+ postgres_uri=postgres_uri,
+ s3_bucket=s3_bucket,
+ s3_prefix=s3_prefix,
+ schemas=schemas,
+ tables=tables,
+ compress=compress,
+ format=format,
+ access_key_id=access_key_id,
+ secret_access_key=secret_access_key,
+ region=region,
+ endpoint_url=endpoint_url
+ )
+ with self._lock:
+ self.migrations[migration_id] = result
+
+ thread = threading.Thread(target=migration_task, daemon=True)
+ thread.start()
+
+ # Initialize migration entry
+ with self._lock:
+ self.migrations[migration_id] = {
+ 'status': 'running',
+ 'started_at': time.time(),
+ 'postgres_uri': postgres_uri,
+ 's3_bucket': s3_bucket,
+ 's3_prefix': s3_prefix
+ }
+
+ return migration_id
+
+ def get_migration_status(self, migration_id):
+ """Get status of a migration"""
+ with self._lock:
+ return self.migrations.get(migration_id)
+
+ def list_migrations(self):
+ """List all migrations"""
+ with self._lock:
+ migrations_list = []
+ for mig_id, mig_data in self.migrations.items():
+ if isinstance(mig_data, dict) and 'success' in mig_data:
+ status = 'completed' if mig_data['success'] else 'failed'
+ else:
+ status = 'running'
+
+ migrations_list.append({
+ 'id': mig_id,
+ 'status': status,
+ 'data': mig_data
+ })
+
+ return migrations_list
+
+
+# ============================================================================
+# Global instances for backward compatibility
+# ============================================================================
+
+# Create instances for each migrator type
+postgres_migrator = PostgresMigrator()
+s3_to_s3_migrator = S3ToS3Migrator()
+postgres_to_s3_migrator = PostgresToS3Migrator()
+
+# For backward compatibility with existing code that imports 'migrator'
+# You can choose which one to export as 'migrator' based on your needs
+# migrator = postgres_migrator # For PostgreSQL to PostgreSQL
+# migrator = s3_to_s3_migrator # For S3 to S3
+# migrator = postgres_to_s3_migrator # For PostgreSQL to S3
+
+# By default, let's export all three with clear names
+__all__ = [
+ 'PostgresMigrator',
+ 'S3ToS3Migrator',
+ 'PostgresToS3Migrator',
+ 'postgres_migrator',
+ 's3_to_s3_migrator',
+ 'postgres_to_s3_migrator'
+]
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..e5f2d13
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,6 @@
+Flask==2.3.3
+Flask-CORS==4.0.0
+boto3==1.28.25
+psycopg2-binary==2.9.7
+PyMySQL==1.1.0
+cryptography
\ No newline at end of file
diff --git a/unified_migration.env b/unified_migration.env
index fa2b106..79aa77a 100644
--- a/unified_migration.env
+++ b/unified_migration.env
@@ -1,25 +1,11 @@
-# ==================== Source MySQL Configuration ====================
-SOURCE_MYSQL_HOST=localhost
-SOURCE_MYSQL_PORT=3306
-SOURCE_MYSQL_USER=root
-SOURCE_MYSQL_PASSWORD=secret123
-SOURCE_MYSQL_DATABASE=sourcedb
-SOURCE_MYSQL_CHARSET=utf8mb4
+SRC_DB_HOST=localhost
+SRC_DB_PORT=5432
+SRC_DB_USER=postgres
+SRC_DB_PASSWORD=secret123
+SRC_DB_NAME=sourcedb
+SRC_DB_URI=postgresql://postgres:secret123@localhost:5432/sourcedb
-# Source SSL (اختياري)
-# SOURCE_MYSQL_SSL_CA=/path/to/ca.pem
-# SOURCE_MYSQL_SSL_CERT=/path/to/cert.pem
-# SOURCE_MYSQL_SSL_KEY=/path/to/key.pem
-
-# ==================== Destination MySQL Configuration ====================
-DEST_MYSQL_HOST=192.168.1.100
-DEST_MYSQL_PORT=3306
-DEST_MYSQL_USER=admin
-DEST_MYSQL_PASSWORD=admin456
-DEST_MYSQL_DATABASE=destdb
-DEST_MYSQL_CHARSET=utf8mb4
-
-# Destination SSL (اختياري)
-# DEST_MYSQL_SSL_CA=/path/to/ca.pem
-# DEST_MYSQL_SSL_CERT=/path/to/cert.pem
-# DEST_MYSQL_SSL_KEY=/path/to/key.pem
\ No newline at end of file
+DEST_AWS_ACCESS_KEY_ID=192.168.1.100
+DEST_AWS_SECRET_ACCESS_KEY=5432
+DEST_AWS_REGION=admin
+DEST_AWS_ENDPOINT_URL=admin456