diff --git a/README.md b/README.md index e7015f3..2883081 100644 --- a/README.md +++ b/README.md @@ -1,96 +1,96 @@ -# Ceph Max Storage Rechner +# Ceph Max Storage Calculator -Eine moderne Web-Anwendung zur Berechnung der maximal zulässigen Speichernutzung in einem Ceph-Cluster. +A modern web application for calculating the maximum allowed storage usage in a Ceph cluster. -## Funktionen +## Features -- Berechnung der maximalen Speichernutzung basierend auf Ceph-Dokumentation -- Unterstützung für Replikation und Erasure Coding -- Dynamisches Hinzufügen und Entfernen von Nodes und OSDs -- Benutzerfreundliche Oberfläche mit modernem Design -- Dark/Light Mode Unterstützung -- Export/Import von Konfigurationen -- PDF-Report Generierung -- Responsive Design für alle Geräte -- Ausfalltoleranz-Analyse für Nodes und OSDs +- Calculation of maximum storage usage based on Ceph documentation +- Support for Replication and Erasure Coding +- Dynamic addition and removal of nodes and OSDs +- User-friendly interface with modern design +- Dark/Light Mode support +- Export/Import of configurations +- PDF report generation +- Responsive design for all devices +- Fault tolerance analysis for nodes and OSDs ## Installation -1. Repository klonen: +1. Clone repository: ``` git clone cd ceph-calculator ``` -2. Python-Umgebung einrichten: +2. Set up Python environment: ``` python -m venv venv venv\Scripts\activate # Windows source venv/bin/activate # Linux/Mac ``` -3. Abhängigkeiten installieren: +3. Install dependencies: ``` pip install -r requirements.txt ``` -4. Anwendung starten: +4. Start application: ``` python run.py ``` -5. Öffnen Sie in einem Browser: `http://localhost:5000` +5. Open in a browser: `http://localhost:5000` -## Verwendung +## Usage -1. Wählen Sie den Replikationstyp: Replikation oder Erasure Coding -2. Geben Sie die entsprechenden Parameter ein: - - Bei Replikation: Anzahl der Replikate und min_size - - Bei Erasure Coding: k (Datenchunks) und m (Codierungschunks) -3. Fügen Sie Nodes hinzu und konfigurieren Sie deren OSDs mit entsprechenden Speichergrößen -4. Wählen Sie die gewünschte Speichereinheit (GB/TB) -5. Klicken Sie auf "Kapazität berechnen", um das Ergebnis zu sehen -6. Optional: Exportieren Sie die Konfiguration oder generieren Sie einen PDF-Report +1. Select the replication type: Replication or Erasure Coding +2. Enter the corresponding parameters: + - For Replication: Number of replicas and min_size + - For Erasure Coding: k (data chunks) and m (coding chunks) +3. Add nodes and configure their OSDs with appropriate storage sizes +4. Select the desired storage unit (GB/TB) +5. Click on "Calculate Capacity" to see the result +6. Optional: Export the configuration or generate a PDF report -## Technologie +## Technology - Backend: Flask - Frontend: - - Alpine.js für reaktive Benutzeroberfläche - - Tailwind CSS für modernes Design - - HTMX für interaktive Elemente -- Dark/Light Mode mit Tailwind CSS -- PDF-Generierung mit ReportLab -- Responsive Design für alle Geräte + - Alpine.js for reactive user interface + - Tailwind CSS for modern design + - HTMX for interactive elements +- Dark/Light Mode with Tailwind CSS +- PDF generation with ReportLab +- Responsive design for all devices -## Features im Detail +## Detailed Features -### Replikation -- Konfigurierbare Anzahl von Replikaten (1-10) -- Einstellbare min_size für I/O-Operationen -- Automatische Berechnung der Ausfalltoleranz +### Replication +- Configurable number of replicas (1-10) +- Adjustable min_size for I/O operations +- Automatic calculation of fault tolerance ### Erasure Coding -- Konfigurierbare k/m-Werte -- Optimierte Speichernutzung -- Ausfalltoleranz-Analyse +- Configurable k/m values +- Optimized storage usage +- Fault tolerance analysis -### Ausfalltoleranz-Analyse -- Node-Ausfalltoleranz -- OSD-Ausfalltoleranz -- Multi-Node Ausfalltoleranz -- Detaillierte Informationen zur Speichernutzung nach Ausfällen +### Fault Tolerance Analysis +- Node fault tolerance +- OSD fault tolerance +- Multi-node fault tolerance +- Detailed information about storage usage after failures -### Benutzerfreundlichkeit -- Intuitive Benutzeroberfläche +### User-Friendliness +- Intuitive user interface - Dark/Light Mode -- Tooltips mit Erklärungen -- Responsive Design -- Export/Import von Konfigurationen -- PDF-Report Generierung +- Tooltips with explanations +- Responsive design +- Export/Import of configurations +- PDF report generation -## Hinweis +## Note -Die Berechnungen basieren auf den allgemeinen Empfehlungen aus der Ceph-Dokumentation und -dienen als Richtwert. Für genaue Kapazitätsplanung sollten Sie die Ceph-Dokumentation konsultieren -und Ihre spezifischen Clusteranforderungen berücksichtigen. \ No newline at end of file +The calculations are based on general recommendations from the Ceph documentation and +serve as a guideline. For precise capacity planning, you should consult the Ceph documentation +and consider your specific cluster requirements. \ No newline at end of file diff --git a/app/routes/__pycache__/main.cpython-313.pyc b/app/routes/__pycache__/main.cpython-313.pyc index e883777..a669a15 100644 Binary files a/app/routes/__pycache__/main.cpython-313.pyc and b/app/routes/__pycache__/main.cpython-313.pyc differ diff --git a/app/routes/main.py b/app/routes/main.py index df465b8..ba0fb21 100644 --- a/app/routes/main.py +++ b/app/routes/main.py @@ -17,30 +17,44 @@ def calculate(): try: data = request.json if not data: - return jsonify({"error": "Keine Daten empfangen"}), 400 + return jsonify({"error": "No data received"}), 400 - replication_type = data.get('replication_type') # 'replication' oder 'erasure_coding' + replication_type = data.get('replication_type') # 'replication' or 'erasure_coding' if not replication_type: - return jsonify({"error": "Replikationstyp fehlt"}), 400 + return jsonify({"error": "Replication type missing"}), 400 - replicas = int(data.get('replicas', 3)) # Standardwert: 3 Replikate + # Basic parameter validation + try: + replicas = int(data.get('replicas', 3)) # Default value: 3 replicas + min_size = int(data.get('min_size', 2)) # Default value: 2 + + # For Erasure Coding + k = int(data.get('k', 0)) if replication_type == 'erasure_coding' else 0 + m = int(data.get('m', 0)) if replication_type == 'erasure_coding' else 0 + + # Nodes and OSDs + nodes = data.get('nodes', []) + if not nodes: + return jsonify({"error": "No nodes defined"}), 400 + + # Validate node data format + for node in nodes: + if 'osd_count' not in node or 'osd_size_gb' not in node: + return jsonify({"error": "Invalid node data format. Each node must have osd_count and osd_size_gb properties."}), 400 + + # Simple validity check + if int(node.get('osd_count', 0)) <= 0 or float(node.get('osd_size_gb', 0)) <= 0: + return jsonify({"error": "Invalid OSD data. Both count and size must be greater than 0."}), 400 + + # Storage unit + storage_unit = data.get('storage_unit', 'GB') + if storage_unit not in ['GB', 'TB']: + storage_unit = 'GB' # Default fallback + + except (ValueError, TypeError) as e: + return jsonify({"error": f"Invalid parameter: {str(e)}"}), 400 - # Für Erasure Coding - k = int(data.get('k', 0)) # Datenchunks - m = int(data.get('m', 0)) # Codierungschunks - - # Minimale Replikate für I/O-Operationen - min_size = int(data.get('min_size', 2)) # Standardwert: 2 - - # Nodes und OSDs - nodes = data.get('nodes', []) - if not nodes: - return jsonify({"error": "Keine Nodes definiert"}), 400 - - # Speichereinheit - storage_unit = data.get('storage_unit', 'GB') - - # Berechnung durchführen + # Perform calculation result = calculate_ceph_capacity( replication_type=replication_type, replicas=replicas, @@ -51,39 +65,40 @@ def calculate(): storage_unit=storage_unit ) + logger.info(f"Calculation performed successfully: {replication_type}, replicas={replicas}, nodes={len(nodes)}") return jsonify(result) except ValueError as e: - logger.error(f"Validierungsfehler bei der Berechnung: {str(e)}") + logger.error(f"Validation error during calculation: {str(e)}") return jsonify({"error": str(e)}), 400 except Exception as e: - logger.error(f"Fehler bei der Berechnung: {str(e)}\n{traceback.format_exc()}") - return jsonify({"error": "Ein unerwarteter Fehler ist aufgetreten"}), 500 + logger.error(f"Error during calculation: {str(e)}\n{traceback.format_exc()}") + return jsonify({"error": "An unexpected error occurred"}), 500 @bp.route('/generate-pdf', methods=['POST']) def generate_pdf(): try: data = request.get_json() if not data: - return jsonify({"error": "Keine Daten empfangen"}), 400 + return jsonify({"error": "No data received"}), 400 - # Validiere die Daten + # Validate data if not data.get('replication_type'): - return jsonify({"error": "Replikationstyp fehlt"}), 400 + return jsonify({"error": "Replication type missing"}), 400 if not data.get('nodes') or not isinstance(data['nodes'], list): - return jsonify({"error": "Ungültige Node-Daten"}), 400 + return jsonify({"error": "Invalid node data"}), 400 if not data.get('result') or not isinstance(data['result'], dict): - return jsonify({"error": "Ungültige Ergebnisdaten"}), 400 + return jsonify({"error": "Invalid result data"}), 400 if not data['result'].get('raw_total'): - return jsonify({"error": "Keine Berechnungsergebnisse vorhanden"}), 400 + return jsonify({"error": "No calculation results available"}), 400 - # Generiere PDF + # Generate PDF pdf_bytes = generate_pdf_report(data) if not pdf_bytes: - return jsonify({"error": "PDF-Generierung fehlgeschlagen"}), 500 + return jsonify({"error": "PDF generation failed"}), 500 return send_file( BytesIO(pdf_bytes), @@ -92,8 +107,8 @@ def generate_pdf(): download_name='ceph-report.pdf' ) except ValueError as e: - logger.error(f"Validierungsfehler bei der PDF-Generierung: {str(e)}") + logger.error(f"Validation error during PDF generation: {str(e)}") return jsonify({"error": str(e)}), 400 except Exception as e: - logger.error(f"Fehler bei der PDF-Generierung: {str(e)}\n{traceback.format_exc()}") - return jsonify({"error": "Ein unerwarteter Fehler ist aufgetreten"}), 500 \ No newline at end of file + logger.error(f"Error during PDF generation: {str(e)}\n{traceback.format_exc()}") + return jsonify({"error": "An unexpected error occurred"}), 500 \ No newline at end of file diff --git a/app/static/img/tk-logo.png b/app/static/img/tk-logo.png new file mode 100644 index 0000000..d4f8d79 Binary files /dev/null and b/app/static/img/tk-logo.png differ diff --git a/app/static/js/script.js.bak b/app/static/js/script.js.bak deleted file mode 100644 index aca56c4..0000000 --- a/app/static/js/script.js.bak +++ /dev/null @@ -1,167 +0,0 @@ -function cephCalculator() { - return { - replicationType: 'replication', - replicas: 3, - minSize: 2, - k: 4, - m: 2, - storageUnit: 'TB', - nodes: [ - { - osd_count: 4, - osd_size_gb: 1000 - } - ], - result: { - max_usage_percent: 0, - max_usage_gb: 0, - max_usage_tb: 0, - raw_total: 0 - }, - isCalculating: false, - - init() { - const savedConfig = localStorage.getItem('lastConfig'); - if (savedConfig) { - const config = JSON.parse(savedConfig); - Object.assign(this, config); - } - }, - - addNode() { - this.nodes.push({ - osd_count: 4, - osd_size_gb: this.storageUnit === 'TB' ? 1000 : 1 - }); - }, - - removeNode(nodeIndex) { - this.nodes.splice(nodeIndex, 1); - if (this.nodes.length === 0) { - this.addNode(); - } - }, - - getOsdSizeInGB(size) { - return this.storageUnit === 'TB' ? size * 1024 : size; - }, - - calculateCapacity() { - const data = { - replication_type: this.replicationType, - replicas: parseInt(this.replicas), - k: parseInt(this.k), - m: parseInt(this.m), - nodes: this.nodes.map(node => ({ - osd_count: parseInt(node.osd_count), - osd_size_gb: this.getOsdSizeInGB(parseFloat(node.osd_size_gb)) - })), - min_size: parseInt(this.minSize), - storage_unit: this.storageUnit - }; - - this.isCalculating = true; - - fetch('/calculate', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - 'X-CSRF-Token': document.querySelector('meta[name="csrf-token"]')?.getAttribute('content') - }, - body: JSON.stringify(data) - }) - .then(response => response.json()) - .then(data => { - this.result = data; - }) - .catch(error => { - console.error('Fehler bei der Berechnung:', error); - alert('Es ist ein Fehler bei der Berechnung aufgetreten. Bitte versuchen Sie es erneut.'); - }) - .finally(() => { - this.isCalculating = false; - }); - }, - - exportConfig() { - const config = { - replicationType: this.replicationType, - replicas: this.replicas, - minSize: this.minSize, - k: this.k, - m: this.m, - storageUnit: this.storageUnit, - nodes: this.nodes, - result: this.result - }; - - const blob = new Blob([JSON.stringify(config, null, 2)], { type: 'application/json' }); - const url = URL.createObjectURL(blob); - const a = document.createElement('a'); - a.href = url; - a.download = 'ceph-config.json'; - a.click(); - URL.revokeObjectURL(url); - }, - - importConfig(event) { - const file = event.target.files[0]; - if (!file) return; - - const reader = new FileReader(); - reader.onload = (e) => { - try { - const config = JSON.parse(e.target.result); - Object.assign(this, config); - // Berechne automatisch nach dem Import - this.calculateCapacity(); - } catch (error) { - alert('Fehler beim Importieren der Konfiguration. Bitte überprüfen Sie das Dateiformat.'); - } - }; - reader.readAsText(file); - }, - - generatePDF() { - if (!this.result.raw_total) { - alert('Bitte führen Sie zuerst eine Berechnung durch.'); - return; - } - - const data = { - replication_type: this.replicationType, - replicas: this.replicas, - min_size: this.minSize, - k: this.k, - m: this.m, - storage_unit: this.storageUnit, - nodes: this.nodes, - result: this.result - }; - - fetch('/generate-pdf', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - 'X-CSRF-Token': document.querySelector('meta[name="csrf-token"]')?.getAttribute('content') - }, - body: JSON.stringify(data) - }) - .then(response => response.blob()) - .then(blob => { - const url = window.URL.createObjectURL(blob); - const a = document.createElement('a'); - a.href = url; - a.download = 'ceph-report.pdf'; - document.body.appendChild(a); - a.click(); - window.URL.revokeObjectURL(url); - document.body.removeChild(a); - }) - .catch(error => { - console.error('Fehler bei der PDF-Generierung:', error); - alert('Es ist ein Fehler bei der PDF-Generierung aufgetreten. Bitte versuchen Sie es erneut.'); - }); - } - }; -} \ No newline at end of file diff --git a/app/templates/base.html b/app/templates/base.html index 02c9f37..081d505 100644 --- a/app/templates/base.html +++ b/app/templates/base.html @@ -1,5 +1,5 @@ - + @@ -9,6 +9,6 @@ - // ... existing code ... + \ No newline at end of file diff --git a/app/templates/index.html b/app/templates/index.html index 3e039bb..eb8f5ff 100644 --- a/app/templates/index.html +++ b/app/templates/index.html @@ -1,15 +1,15 @@ - - Ceph Max Storage Rechner - + Ceph Max Storage Calculator + - + - +
- -
- + +
+ Thomas-Krenn.AG Logo
-

Ceph Max Storage Kapazitätsrechner

-

Berechnen Sie die maximale Speichernutzung Ihres Ceph-Clusters

+

Ceph Max Storage Capacity Calculator

+

Calculate the maximum storage usage of your Ceph cluster

-
- -
-

Replikationstyp

-
- - -
-
- - -
-

Replikationskonfiguration

-
- -
- - -
-
-
- - - Minimale Anzahl von Kopien für I/O-Operationen -
-
- - -
-
- - -
-

Erasure Coding Konfiguration

-
-
- - -
-
- - -
-
-
- - -
-
- - -
-

Nodes und OSDs

+
+ +
+

Configuration

- +
+
- -
- - -
- -
-

Ergebnis

-
-
-
-

Gesamt-Rohspeicher:

-

- -

-
-
-

Maximale empfohlene Speichernutzung:

-

-

- -

-
+ +
+

Results

+ +
+
+

Total Raw Storage:

+

+ +

+
+

Maximum Recommended Usage:

+

+

+ +

+
+
- -
-

Ausfalltoleranz-Analyse

- - -
-
- Node-Ausfalltoleranz: - - -
-

-
-
- Größter Node: - -
-
- Rohspeicher nach Nodeausfall: - -
-
- Nutzbarer Speicher nach Ausfall: - -
-
- Nutzbarer Speicher nach max. Ausfällen: - -
-
- - -
-

- Min-Size: - - - Diese Einstellung bestimmt die minimale Anzahl von Replikaten für I/O-Operationen. - Der Cluster kann bis zu Nodes verlieren, - bevor die Daten unzugänglich werden. -

-
+ +
+

Fault Tolerance

+
+
+

+ Node Fault Tolerance: +

+

- - -
-
- OSD-Ausfalltoleranz: - -
-

-
- - -
-

Empfehlungen gemäß Ceph-Dokumentation:

-
    -
  • Stelle sicher, dass nach Nodeausfällen genügend Kapazität vorhanden ist, um die 'full ratio' nicht zu erreichen.
  • -
  • Bei Replikation sollte die Anzahl der Nodes größer sein als der Replikationsfaktor.
  • -
  • Bei Erasure Coding sollte die Anzahl der Nodes größer sein als k+m (Datenchunks + Codierungschunks).
  • -
  • Die Performance bei der Wiederherstellung hängt von der Netzwerk- und Speichergeschwindigkeit ab.
  • -
  • Beachte, dass nach einem Nodeausfall das Wiederausbalancieren des Clusters länger dauert, je größer der ausgefallene Node ist.
  • -
+
+

+ OSD Fault Tolerance: +

+

-
-
-
-

Basierend auf Ceph-Dokumentation | Ergebnisse dienen nur als Richtwert

-
+ +
+

Recommendations

+
+
    +
  • Ensure that after node failures, enough capacity remains to avoid reaching the 'full ratio'.
  • +
  • For replication, the number of nodes should be greater than the replication factor.
  • +
  • For erasure coding, the number of nodes should be greater than k+m (data chunks + coding chunks).
  • +
  • Recovery performance depends on network and storage speed.
  • +
  • Note that after a node failure, rebalancing the cluster takes longer the larger the failed node is.
  • +
+
+
+
+ + + +
@@ -325,38 +264,31 @@ m: 2, storageUnit: 'TB', nodes: [ - { - osd_count: 4, - osd_size_gb: 1 - } + { osd_count: 4, osd_size_gb: 1 }, + { osd_count: 4, osd_size_gb: 1 }, + { osd_count: 4, osd_size_gb: 1 } ], result: { + raw_total: 0, max_usage_percent: 0, max_usage_gb: 0, max_usage_tb: 0, - raw_total: 0 + node_failure_tolerance: false, + node_failure_info: '', + osd_failure_tolerance: false, + osd_failure_info: '', + multi_failure_tolerance: false, + max_failure_nodes: 0 }, isCalculating: false, - init() { - const savedConfig = localStorage.getItem('lastConfig'); - if (savedConfig) { - const config = JSON.parse(savedConfig); - Object.assign(this, config); - } - }, - addNode() { - this.nodes.push({ - osd_count: 4, - osd_size_gb: 1 - }); + this.nodes.push({ osd_count: 3, osd_size_gb: 2000 }); }, - removeNode(nodeIndex) { - this.nodes.splice(nodeIndex, 1); - if (this.nodes.length === 0) { - this.addNode(); + removeNode(index) { + if (this.nodes.length > 1) { + this.nodes.splice(index, 1); } }, @@ -365,120 +297,186 @@ }, calculateCapacity() { + this.isCalculating = true; + + // Validate Erasure Coding parameters + if (this.replicationType === 'erasure_coding') { + if (parseInt(this.k) <= 0 || parseInt(this.m) <= 0) { + alert('Für Erasure Coding müssen k und m größer als 0 sein.'); + this.isCalculating = false; + return; + } + if (parseInt(this.k) + parseInt(this.m) <= 0) { + alert('Die Summe von k und m muss größer als 0 sein.'); + this.isCalculating = false; + return; + } + if (this.nodes.length <= parseInt(this.k)) { + alert('Die Anzahl der Nodes muss größer als k sein.'); + this.isCalculating = false; + return; + } + } + + // Validate OSD sizes + for (let node of this.nodes) { + if (parseFloat(node.osd_size_gb) <= 0) { + alert('Die OSD-Größe muss größer als 0 sein.'); + this.isCalculating = false; + return; + } + if (parseInt(node.osd_count) <= 0) { + alert('Die Anzahl der OSDs muss größer als 0 sein.'); + this.isCalculating = false; + return; + } + } + + // Get CSRF token + const csrfToken = document.querySelector('meta[name="csrf-token"]').getAttribute('content'); + + // Prepare data for sending const data = { replication_type: this.replicationType, replicas: parseInt(this.replicas), + min_size: parseInt(this.minSize), k: parseInt(this.k), m: parseInt(this.m), nodes: this.nodes.map(node => ({ osd_count: parseInt(node.osd_count), osd_size_gb: this.getOsdSizeInGB(parseFloat(node.osd_size_gb)) })), - min_size: parseInt(this.minSize), storage_unit: this.storageUnit }; - this.isCalculating = true; - + // Send request to server fetch('/calculate', { method: 'POST', headers: { 'Content-Type': 'application/json', - 'X-CSRF-Token': document.querySelector('meta[name="csrf-token"]')?.getAttribute('content') + 'X-CSRFToken': csrfToken }, body: JSON.stringify(data) }) - .then(response => response.json()) - .then(data => { - this.result = data; + .then(response => { + if (!response.ok) { + throw new Error('Calculation failed'); + } + return response.json(); + }) + .then(result => { + this.result = result; + this.isCalculating = false; }) .catch(error => { - console.error('Fehler bei der Berechnung:', error); - alert('Es ist ein Fehler bei der Berechnung aufgetreten. Bitte versuchen Sie es erneut.'); - }) - .finally(() => { + console.error('Error:', error); this.isCalculating = false; + alert('Error during calculation: ' + error.message); }); }, - + exportConfig() { - const config = { - replicationType: this.replicationType, - replicas: this.replicas, - minSize: this.minSize, - k: this.k, - m: this.m, - storageUnit: this.storageUnit, - nodes: this.nodes, - result: this.result - }; - - const blob = new Blob([JSON.stringify(config, null, 2)], { type: 'application/json' }); - const url = URL.createObjectURL(blob); - const a = document.createElement('a'); - a.href = url; - a.download = 'ceph-config.json'; - a.click(); - URL.revokeObjectURL(url); - }, - - importConfig(event) { - const file = event.target.files[0]; - if (!file) return; - - const reader = new FileReader(); - reader.onload = (e) => { - try { - const config = JSON.parse(e.target.result); - Object.assign(this, config); - // Berechne automatisch nach dem Import - this.calculateCapacity(); - } catch (error) { - alert('Fehler beim Importieren der Konfiguration. Bitte überprüfen Sie das Dateiformat.'); - } - }; - reader.readAsText(file); - }, - - generatePDF() { - if (!this.result.raw_total) { - alert('Bitte führen Sie zuerst eine Berechnung durch.'); - return; - } - const data = { replication_type: this.replicationType, replicas: this.replicas, min_size: this.minSize, k: this.k, m: this.m, - storage_unit: this.storageUnit, nodes: this.nodes, + storage_unit: this.storageUnit, result: this.result }; - + + const dataStr = JSON.stringify(data, null, 2); + const dataUri = 'data:application/json;charset=utf-8,'+ encodeURIComponent(dataStr); + + const exportFileDefaultName = 'ceph-config.json'; + + const linkElement = document.createElement('a'); + linkElement.setAttribute('href', dataUri); + linkElement.setAttribute('download', exportFileDefaultName); + linkElement.click(); + }, + + importConfig(event) { + const file = event.target.files[0]; + if (!file) return; + + const reader = new FileReader(); + reader.onload = (e) => { + try { + const config = JSON.parse(e.target.result); + + // Update local state with imported configuration + this.replicationType = config.replication_type || 'replication'; + this.replicas = config.replicas || 3; + this.minSize = config.min_size || 2; + this.k = config.k || 4; + this.m = config.m || 2; + this.nodes = config.nodes || [{ osd_count: 3, osd_size_gb: 2000 }]; + this.storageUnit = config.storage_unit || 'TB'; + + // If results are included, update those too + if (config.result && config.result.raw_total) { + this.result = config.result; + } + + alert('Configuration successfully imported!'); + } catch (error) { + console.error('Error importing configuration:', error); + alert('Error importing configuration. Please check the file format.'); + } + }; + reader.readAsText(file); + }, + + generatePDF() { + // Get CSRF token + const csrfToken = document.querySelector('meta[name="csrf-token"]').getAttribute('content'); + + // Prepare data for sending + const data = { + replication_type: this.replicationType, + replicas: parseInt(this.replicas), + min_size: parseInt(this.minSize), + k: parseInt(this.k), + m: parseInt(this.m), + nodes: this.nodes.map(node => ({ + osd_count: parseInt(node.osd_count), + osd_size_gb: this.getOsdSizeInGB(parseFloat(node.osd_size_gb)) + })), + storage_unit: this.storageUnit, + result: this.result + }; + + // Send request to server fetch('/generate-pdf', { method: 'POST', headers: { 'Content-Type': 'application/json', - 'X-CSRF-Token': document.querySelector('meta[name="csrf-token"]')?.getAttribute('content') + 'X-CSRFToken': csrfToken }, body: JSON.stringify(data) }) - .then(response => response.blob()) + .then(response => { + if (!response.ok) { + throw new Error('PDF generation failed'); + } + return response.blob(); + }) .then(blob => { const url = window.URL.createObjectURL(blob); const a = document.createElement('a'); + a.style.display = 'none'; a.href = url; a.download = 'ceph-report.pdf'; document.body.appendChild(a); a.click(); window.URL.revokeObjectURL(url); - document.body.removeChild(a); }) .catch(error => { - console.error('Fehler bei der PDF-Generierung:', error); - alert('Es ist ein Fehler bei der PDF-Generierung aufgetreten. Bitte versuchen Sie es erneut.'); + console.error('Error:', error); + alert('Error generating PDF: ' + error.message); }); } }; diff --git a/app/utils/__pycache__/ceph_calculator.cpython-313.pyc b/app/utils/__pycache__/ceph_calculator.cpython-313.pyc index eb68309..f2ab2d8 100644 Binary files a/app/utils/__pycache__/ceph_calculator.cpython-313.pyc and b/app/utils/__pycache__/ceph_calculator.cpython-313.pyc differ diff --git a/app/utils/__pycache__/pdf_generator.cpython-313.pyc b/app/utils/__pycache__/pdf_generator.cpython-313.pyc index f82d199..5034ed4 100644 Binary files a/app/utils/__pycache__/pdf_generator.cpython-313.pyc and b/app/utils/__pycache__/pdf_generator.cpython-313.pyc differ diff --git a/app/utils/ceph_calculator.py b/app/utils/ceph_calculator.py index 07c53c6..7e117fc 100644 --- a/app/utils/ceph_calculator.py +++ b/app/utils/ceph_calculator.py @@ -1,19 +1,24 @@ +import logging + +logger = logging.getLogger(__name__) + def calculate_ceph_capacity(replication_type, replicas=3, k=0, m=0, nodes=None, min_size=2, storage_unit='GB'): """ - Berechnet die maximal zulässige Speichernutzung für ein Ceph-Cluster unter Berücksichtigung von Nodeausfällen. - - Parameter: - - replication_type: 'replication' oder 'erasure_coding' - - replicas: Anzahl der Replikate (Standard: 3) - - k: Anzahl der Datenchunks für EC - - m: Anzahl der Codierungschunks für EC - - nodes: Liste der Knoten mit Anzahl der OSDs und deren Größe - [{'osd_count': 4, 'osd_size_gb': 1000}, ...] - - min_size: Minimale Anzahl von Replikaten für I/O-Operationen (Standard: 2) - - storage_unit: 'GB' oder 'TB' (Standard: 'GB') + Calculate the maximum allowed storage usage for a Ceph cluster considering node failures. + Args: + replication_type (str): Either 'replication' or 'erasure_coding' + replicas (int): Number of replicas for replication pools (default: 3) + k (int): Number of data chunks for EC + m (int): Number of coding chunks for EC + nodes (list): List of dictionaries with 'osd_count' and 'osd_size_gb' keys + [{'osd_count': 4, 'osd_size_gb': 1000}, ...] + min_size (int): Minimum number of replicas for I/O operations (default: 2) + storage_unit (str): Storage unit, either 'GB' or 'TB' (default: 'GB') + Returns: - - Dictionary mit max_usage_percent, max_usage_gb, max_usage_tb, raw_total und zusätzlichen Informationen zur Ausfallsicherheit + dict: Dictionary with max_usage_percent, max_usage_gb, max_usage_tb, raw_total and additional + information about fault tolerance """ if nodes is None or len(nodes) == 0: return { @@ -22,14 +27,11 @@ def calculate_ceph_capacity(replication_type, replicas=3, k=0, m=0, nodes=None, 'max_usage_tb': 0, 'raw_total': 0, 'node_failure_tolerance': False, - 'node_failure_info': 'Keine Nodes im Cluster', + 'node_failure_info': 'No nodes in the cluster', 'storage_unit': storage_unit } - # Die Umrechnung von TB zu GB ist bereits im JavaScript-Code erfolgt - # Hier werden die Werte direkt verwendet - - # Berechne den gesamten Rohspeicher und Informationen zu jedem Node + # Calculate total raw storage and information for each node raw_total_gb = 0 node_capacities = [] @@ -40,80 +42,89 @@ def calculate_ceph_capacity(replication_type, replicas=3, k=0, m=0, nodes=None, node_capacities.append(node_capacity) raw_total_gb += node_capacity - # Größter Node (worst-case Szenario bei Ausfall) + # Largest node (worst-case scenario during failure) largest_node_capacity = max(node_capacities) if node_capacities else 0 - # Berechne die nutzbare Kapazität ohne Ausfall + # Calculate usable capacity without failure if replication_type == 'replication': - # Bei Replikation ist der nutzbare Speicher = Rohspeicher / Anzahl der Replikate + # For replication, usable storage = raw storage / number of replicas usable_capacity_gb = raw_total_gb / replicas else: # Erasure Coding - # Bei EC ist der nutzbare Speicher = Rohspeicher * (k / (k + m)) + # For EC, usable storage = raw storage * (k / (k + m)) + if k <= 0 or m <= 0 or (k + m) <= 0: + raise ValueError("Invalid Erasure Coding parameters: k and m must be positive and their sum must be greater than 0") usable_capacity_gb = raw_total_gb * (k / (k + m)) - # Die empfohlene maximale Auslastung für den Normalfall (ohne Ausfall) + # Recommended maximum utilization for normal case (without failure) max_recommended_usage_percent = 80 - # Berechne die OSD-Auslastung nach der Formel x = (s × p) / (s + 1) - # wobei s = Anzahl der OSDs pro Server und p = Prozentsatz der Gesamtauslastung - osds_per_server = nodes[0].get('osd_count', 0) if nodes else 0 + # Calculate OSD utilization using the formula x = (s × p) / (s + 1) + # where s = number of OSDs per server and p = percentage of total utilization + osds_per_server = int(nodes[0].get('osd_count', 0)) if nodes else 0 osd_usage_percent = (osds_per_server * max_recommended_usage_percent) / (osds_per_server + 1) - # Finde die größte OSD-Größe für die Berechnung der Kapazität nach OSD-Ausfall - largest_osd_size = max((node.get('osd_size_gb', 0) for node in nodes), default=0) + # Find largest OSD size for calculating capacity after OSD failure + largest_osd_size = max((float(node.get('osd_size_gb', 0)) for node in nodes), default=0) - # Berechne die nutzbare Kapazität nach OSD-Ausfall + # Calculate usable capacity after OSD failure raw_after_osd_failure = raw_total_gb - largest_osd_size if replication_type == 'replication': usable_after_osd_failure = raw_after_osd_failure / replicas else: usable_after_osd_failure = raw_after_osd_failure * (k / (k + m)) - # Berechne die maximale sichere Nutzung unter Berücksichtigung von OSD-Ausfall + # Calculate maximum safe usage considering OSD failure max_usage_gb = min( - usable_capacity_gb * (osd_usage_percent / 100), # OSD-Auslastung basierend auf der Formel - usable_after_osd_failure * 0.8 # 80% der Kapazität nach OSD-Ausfall + usable_capacity_gb * (osd_usage_percent / 100), # OSD utilization based on formula + usable_after_osd_failure * 0.8 # 80% of capacity after OSD failure ) - max_usage_tb = max_usage_gb / 1024 - # Berechnung der Ausfalltoleranz unter Berücksichtigung von min_size + # Convert to TB if storage_unit is TB + if storage_unit == 'TB': + max_usage_tb = max_usage_gb / 1024 + else: + max_usage_tb = max_usage_gb / 1024 # Always calculate TB for display + + # Calculate fault tolerance considering min_size if replication_type == 'replication': max_failure_nodes = min( - len(nodes) - min_size, # Maximale Ausfälle basierend auf min_size - replicas - min_size # Maximale Ausfälle basierend auf Replikationsfaktor + len(nodes) - min_size, # Maximum failures based on min_size + replicas - min_size # Maximum failures based on replication factor ) else: # Erasure Coding max_failure_nodes = min( - len(nodes) - (k + 1), # Mindestens k+1 Nodes müssen verfügbar bleiben - m # Maximale Anzahl von Codierungschunks die ausfallen können + len(nodes) - (k + 1), # At least k+1 nodes must remain available + m # Maximum number of coding chunks that can fail ) - # Sortiere die Nodes nach Größe absteigend für Worst-Case-Analyse + # Sort nodes by size in descending order for worst-case analysis node_capacities_sorted = sorted(node_capacities, reverse=True) - # Kapazität nach Ausfall der größten N Nodes + # Capacity after failure of the largest N nodes raw_after_max_failures_gb = raw_total_gb for i in range(min(max_failure_nodes, len(node_capacities_sorted))): raw_after_max_failures_gb -= node_capacities_sorted[i] - # Nutzbare Kapazität nach maximalen tolerierbaren Ausfällen + # Usable capacity after maximum tolerable failures if replication_type == 'replication': usable_after_max_failures_gb = raw_after_max_failures_gb / min_size else: # Erasure Coding remaining_m = m - max_failure_nodes + if remaining_m <= 0: + raise ValueError("Invalid Erasure Coding configuration: remaining coding chunks must be positive") usable_after_max_failures_gb = raw_after_max_failures_gb * (k / (k + remaining_m)) - # Berechne die nutzbare Kapazität nach Ausfall des größten Nodes + # Calculate usable capacity after failure of largest node raw_after_failure_gb = raw_total_gb - largest_node_capacity if replication_type == 'replication': usable_after_failure_gb = raw_after_failure_gb / min_size else: # Erasure Coding usable_after_failure_gb = raw_after_failure_gb * (k / (k + m)) - - # Prüfen, ob genügend Speicherplatz nach einem Nodeausfall vorhanden ist + + # Check if there is enough storage space after a node failure node_failure_tolerance = True - # Prüfe die Mindestanforderungen für Nodes + # Check minimum requirements for nodes if replication_type == 'replication': if len(nodes) < min_size: node_failure_tolerance = False @@ -125,7 +136,7 @@ def calculate_ceph_capacity(replication_type, replicas=3, k=0, m=0, nodes=None, elif usable_after_failure_gb < max_usage_gb: node_failure_tolerance = False - # Für multiple Ausfälle prüfen + # Check for multiple failures multi_failure_tolerance = False if max_failure_nodes > 0: multi_failure_tolerance = ( @@ -133,7 +144,7 @@ def calculate_ceph_capacity(replication_type, replicas=3, k=0, m=0, nodes=None, len(nodes) > max_failure_nodes ) - # Maximale sichere Nutzung unter Berücksichtigung eines möglichen Nodeausfalls + # Maximum safe usage considering a possible node failure safe_usage_percent = 0 safe_usage_gb = 0 safe_usage_tb = 0 @@ -145,57 +156,57 @@ def calculate_ceph_capacity(replication_type, replicas=3, k=0, m=0, nodes=None, safe_usage_tb = max_usage_tb if multi_failure_tolerance and max_failure_nodes > 1: - node_failure_info = f"Der Cluster kann den Ausfall von bis zu {max_failure_nodes} Nodes tolerieren (min_size={min_size})." + node_failure_info = f"The cluster can tolerate failure of up to {max_failure_nodes} nodes (min_size={min_size})." else: - node_failure_info = f"Der Cluster kann einen Ausfall des größten Nodes tolerieren (min_size={min_size})." + node_failure_info = f"The cluster can tolerate failure of the largest node (min_size={min_size})." else: safe_usage_percent = round((usable_after_failure_gb / usable_capacity_gb) * 100 * 0.8) safe_usage_gb = usable_after_failure_gb * 0.8 safe_usage_tb = safe_usage_gb / 1024 if len(nodes) <= (min_size if replication_type == 'replication' else k + m - min(m, 1)): - node_failure_info = f"KRITISCH: Zu wenige Nodes ({len(nodes)}) für die konfigurierte min_size={min_size}. " - node_failure_info += f"Mindestens {min_size + 1 if replication_type == 'replication' else k + m + 1 - min(m, 1)} Nodes benötigt." + node_failure_info = f"CRITICAL: Too few nodes ({len(nodes)}) for the configured min_size={min_size}. " + node_failure_info += f"At least {min_size + 1 if replication_type == 'replication' else k + m + 1 - min(m, 1)} nodes needed." else: - # Unit für die Anzeige + # Unit for display unit_display = "TB" if storage_unit == "TB" else "GB" node_size_display = round(largest_node_capacity / 1024, 2) if storage_unit == "TB" else round(largest_node_capacity, 2) - node_failure_info = (f"WARNUNG: Der Cluster hat nicht genügend freie Kapazität, um einen Ausfall des größten Nodes " - f"({node_size_display} {unit_display}) zu tolerieren. " - f"Maximale sichere Nutzung: {safe_usage_percent}%") + node_failure_info = (f"WARNING: The cluster does not have enough free capacity to tolerate a failure of the largest node " + f"({node_size_display} {unit_display}). " + f"Maximum safe usage: {safe_usage_percent}%") - # Berechnung für den Ausfall eines einzelnen OSD + # Calculate for single OSD failure osd_failure_tolerance = False - osd_failure_info = "Keine OSDs im Cluster" + osd_failure_info = "No OSDs in the cluster" - if nodes and any(node.get('osd_count', 0) > 0 for node in nodes): + if nodes and any(int(node.get('osd_count', 0)) > 0 for node in nodes): osd_failure_tolerance = usable_after_osd_failure >= max_usage_gb - # Unit für die Anzeige + # Unit for display unit_display = "TB" if storage_unit == "TB" else "GB" osd_size_display = round(largest_osd_size / 1024, 2) if storage_unit == "TB" else round(largest_osd_size, 2) - osd_failure_info = f"Der Cluster kann den Ausfall des größten OSD tolerieren (min_size={min_size})." if osd_failure_tolerance else \ - f"WARNUNG: Der Cluster hat nicht genügend freie Kapazität, um den Ausfall des größten OSD ({osd_size_display} {unit_display}) zu tolerieren." + osd_failure_info = f"The cluster can tolerate failure of the largest OSD (min_size={min_size})." if osd_failure_tolerance else \ + f"WARNING: The cluster does not have enough free capacity to tolerate failure of the largest OSD ({osd_size_display} {unit_display})." - # Rückgabewerte + # Return values with proper unit conversion result = { 'max_usage_percent': round(osd_usage_percent, 2), - 'max_usage_gb': round(max_usage_gb if node_failure_tolerance else safe_usage_gb, 2), - 'max_usage_tb': round(max_usage_tb if node_failure_tolerance else safe_usage_tb, 2), - 'raw_total': round(raw_total_gb, 2), + 'max_usage_gb': round(max_usage_gb, 2), + 'max_usage_tb': round(max_usage_tb, 2), + 'raw_total': round(raw_total_gb / 1024, 2) if storage_unit == 'TB' else round(raw_total_gb, 2), 'node_failure_tolerance': node_failure_tolerance, 'node_failure_info': node_failure_info, 'multi_failure_tolerance': multi_failure_tolerance, 'max_failure_nodes': max_failure_nodes, 'osd_failure_tolerance': osd_failure_tolerance, 'osd_failure_info': osd_failure_info, - 'largest_node_gb': round(largest_node_capacity, 2), - 'raw_after_failure_gb': round(raw_after_failure_gb, 2), - 'usable_after_failure_gb': round(usable_after_failure_gb, 2), - 'raw_after_max_failures_gb': round(raw_after_max_failures_gb, 2), - 'usable_after_max_failures_gb': round(usable_after_max_failures_gb, 2), + 'largest_node_gb': round(largest_node_capacity / 1024, 2) if storage_unit == 'TB' else round(largest_node_capacity, 2), + 'raw_after_failure_gb': round(raw_after_failure_gb / 1024, 2) if storage_unit == 'TB' else round(raw_after_failure_gb, 2), + 'usable_after_failure_gb': round(usable_after_failure_gb / 1024, 2) if storage_unit == 'TB' else round(usable_after_failure_gb, 2), + 'raw_after_max_failures_gb': round(raw_after_max_failures_gb / 1024, 2) if storage_unit == 'TB' else round(raw_after_max_failures_gb, 2), + 'usable_after_max_failures_gb': round(usable_after_max_failures_gb / 1024, 2) if storage_unit == 'TB' else round(usable_after_max_failures_gb, 2), 'min_size': min_size, 'osds_per_server': osds_per_server, 'storage_unit': storage_unit diff --git a/app/utils/pdf_generator.py b/app/utils/pdf_generator.py index 0bdcc8e..728d0d8 100644 --- a/app/utils/pdf_generator.py +++ b/app/utils/pdf_generator.py @@ -1,68 +1,78 @@ from reportlab.lib import colors from reportlab.lib.pagesizes import A4 -from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle +from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle, Image from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle from reportlab.lib.units import inch from datetime import datetime import logging +import os +from reportlab.pdfgen import canvas logger = logging.getLogger(__name__) +# Thomas-Krenn Farbschema +TK_ORANGE = colors.HexColor('#F7941D') +TK_DARK_GRAY = colors.HexColor('#333333') +TK_LIGHT_GRAY = colors.HexColor('#F5F5F5') +TK_WHITE = colors.white +TK_GREEN = colors.HexColor('#059669') +TK_RED = colors.HexColor('#dc2626') + def validate_data(data): - """Überprüft, ob alle erforderlichen Daten vorhanden sind.""" + """Checks if all required data is present.""" required_fields = ['replication_type', 'nodes', 'result'] for field in required_fields: if field not in data: - raise ValueError(f"Fehlendes Feld: {field}") + raise ValueError(f"Missing field: {field}") if not data['nodes']: - raise ValueError("Keine Nodes definiert") + raise ValueError("No nodes defined") if not data['result'].get('raw_total'): - raise ValueError("Keine Berechnungsergebnisse vorhanden") + raise ValueError("No calculation results available") def safe_int(value, default=0): - """Konvertiert einen Wert sicher in einen Integer.""" + """Safely converts a value to an integer.""" if value is None: return default try: return int(str(value).strip()) except (ValueError, TypeError): - logger.warning(f"Konnte Wert '{value}' nicht in Integer konvertieren. Verwende Standardwert {default}") + logger.warning(f"Could not convert value '{value}' to integer. Using default value {default}") return default def safe_float(value, default=0.0): - """Konvertiert einen Wert sicher in einen Float.""" + """Safely converts a value to a float.""" if value is None: return default try: return float(str(value).strip()) except (ValueError, TypeError): - logger.warning(f"Konnte Wert '{value}' nicht in Float konvertieren. Verwende Standardwert {default}") + logger.warning(f"Could not convert value '{value}' to float. Using default value {default}") return default def generate_pdf_report(data): """ - Generiert einen PDF-Report mit den Ceph-Konfigurationsdaten und Ergebnissen. + Generates a PDF report with Ceph configuration data and results. Args: - data (dict): Dictionary mit Konfigurations- und Ergebnisdaten + data (dict): Dictionary with configuration and result data Returns: - bytes: PDF-Datei als Bytes + bytes: PDF file as bytes Raises: - ValueError: Wenn erforderliche Daten fehlen + ValueError: If required data is missing """ try: - # Validiere Eingabedaten + # Validate input data validate_data(data) - # Erstelle einen temporären Buffer für die PDF + # Create a temporary buffer for the PDF from io import BytesIO buffer = BytesIO() - # Erstelle das PDF-Dokument + # Create the PDF document doc = SimpleDocTemplate( buffer, pagesize=A4, @@ -70,148 +80,247 @@ def generate_pdf_report(data): leftMargin=72, topMargin=72, bottomMargin=72, - title="Ceph Storage Kapazitätsreport" + title="Ceph Storage Capacity Report" ) - # Styles definieren + # Define styles styles = getSampleStyleSheet() - title_style = styles['Heading1'] - heading_style = styles['Heading2'] - normal_style = styles['Normal'] - # Custom Style für Warnungen + # Custom styles mit weißer Textfarbe aufgrund des grauen Hintergrunds + title_style = ParagraphStyle( + 'CustomTitle', + parent=styles['Heading1'], + fontSize=24, + spaceAfter=30, + textColor=TK_WHITE + ) + + heading_style = ParagraphStyle( + 'CustomHeading', + parent=styles['Heading2'], + fontSize=16, + spaceAfter=12, + textColor=TK_ORANGE + ) + + normal_style = ParagraphStyle( + 'CustomNormal', + parent=styles['Normal'], + fontSize=10, + spaceAfter=6, + textColor=TK_WHITE + ) + + # Custom Style for warnings warning_style = ParagraphStyle( 'Warning', parent=styles['Normal'], - textColor=colors.red, - fontSize=10 + textColor=TK_RED, + fontSize=10, + spaceAfter=6 ) - # Story (Inhalt) erstellen + # Create story (content) story = [] - # Titel - story.append(Paragraph("Ceph Storage Kapazitätsreport", title_style)) - story.append(Spacer(1, 12)) - story.append(Paragraph(f"Generiert am: {datetime.now().strftime('%d.%m.%Y %H:%M')}", normal_style)) + # Hintergrundfarbe und Canvas-Anpassung + # Definiere die Hintergrundfarbe + TK_BACKGROUND_GRAY = colors.HexColor('#1f2937') # Dunkelgrau wie in der Website (dark-bg) + + # Funktion zum Zeichnen des Hintergrunds + def add_page_background(canvas, doc): + canvas.saveState() + canvas.setFillColor(TK_BACKGROUND_GRAY) + canvas.rect(0, 0, doc.pagesize[0], doc.pagesize[1], fill=True, stroke=False) + canvas.restoreState() + + # Title with modern styling + story.append(Paragraph("Ceph Storage Capacity Report", title_style)) + story.append(Paragraph(f"Generated on: {datetime.now().strftime('%d.%m.%Y %H:%M')}", normal_style)) story.append(Spacer(1, 20)) - # Konfigurationsübersicht - story.append(Paragraph("Konfigurationsübersicht", heading_style)) + # Configuration overview with modern styling + story.append(Paragraph("Configuration Overview", heading_style)) story.append(Spacer(1, 12)) - # Replikationstyp + # Replication type with modern table style config_data = [ - ["Replikationstyp", data['replication_type']], - ["Speichereinheit", data.get('storage_unit', 'GB')] + ["Replication Type", data['replication_type']], + ["Storage Unit", data.get('storage_unit', 'GB')] ] + # Add replication-specific parameters if data['replication_type'] == 'replication': config_data.extend([ - ["Anzahl Replikate", str(safe_int(data.get('replicas', 3)))], - ["Min. Replikate (min_size)", str(safe_int(data.get('min_size', 2)))] + ["Number of Replicas", str(data.get('replicas', 3))], + ["Minimum Size", str(data.get('min_size', 2))] ]) - else: + else: # Erasure Coding config_data.extend([ - ["Datenchunks (k)", str(safe_int(data.get('k', 4)))], - ["Codierungschunks (m)", str(safe_int(data.get('m', 2)))] + ["Data Chunks (k)", str(data.get('k', 4))], + ["Coding Chunks (m)", str(data.get('m', 2))] ]) - # Nodes und OSDs - story.append(Paragraph("Nodes und OSDs", heading_style)) - story.append(Spacer(1, 12)) - - for i, node in enumerate(data['nodes'], 1): - story.append(Paragraph(f"Node {i}", heading_style)) - story.append(Spacer(1, 6)) - - osd_count = safe_int(node.get('osd_count', 1)) - osd_size = safe_float(node.get('osd_size_gb', 0)) - - if osd_count <= 0: - logger.warning(f"Ungültige OSD-Anzahl für Node {i}: {osd_count}. Verwende Standardwert 1.") - osd_count = 1 - - osd_data = [] - for j in range(osd_count): - osd_data.append([f"OSD {j+1}", f"{osd_size} {data.get('storage_unit', 'GB')}"]) - - t = Table(osd_data, colWidths=[2*inch, 2*inch]) - t.setStyle(TableStyle([ - ('BACKGROUND', (0, 0), (-1, -1), colors.white), - ('TEXTCOLOR', (0, 0), (-1, -1), colors.black), - ('ALIGN', (0, 0), (-1, -1), 'LEFT'), - ('FONTNAME', (0, 0), (-1, -1), 'Helvetica'), - ('FONTSIZE', (0, 0), (-1, -1), 10), - ('BOTTOMPADDING', (0, 0), (-1, -1), 12), - ('GRID', (0, 0), (-1, -1), 1, colors.black) - ])) - story.append(t) - story.append(Spacer(1, 12)) - - # Ergebnisse - story.append(Paragraph("Ergebnisse", heading_style)) - story.append(Spacer(1, 12)) - - result_data = [ - ["Gesamt-Rohspeicher", f"{safe_float(data['result']['raw_total'])} GB ({(safe_float(data['result']['raw_total']) / 1024):.2f} TB)"], - ["Max. empfohlene Nutzung", f"{safe_float(data['result'].get('max_usage_percent', 0))}%"], - ["Max. nutzbarer Speicher", f"{safe_float(data['result'].get('max_usage_gb', 0))} GB ({safe_float(data['result'].get('max_usage_tb', 0))} TB)"] - ] - - t = Table(result_data, colWidths=[3*inch, 3*inch]) + # Modern table style for configurations + t = Table(config_data, colWidths=[3*inch, 3*inch]) t.setStyle(TableStyle([ - ('BACKGROUND', (0, 0), (-1, -1), colors.white), - ('TEXTCOLOR', (0, 0), (-1, -1), colors.black), + ('BACKGROUND', (0, 0), (-1, 0), TK_ORANGE), + ('TEXTCOLOR', (0, 0), (-1, 0), TK_WHITE), ('ALIGN', (0, 0), (-1, -1), 'LEFT'), - ('FONTNAME', (0, 0), (-1, -1), 'Helvetica'), - ('FONTSIZE', (0, 0), (-1, -1), 10), - ('BOTTOMPADDING', (0, 0), (-1, -1), 12), - ('GRID', (0, 0), (-1, -1), 1, colors.black) + ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'), + ('FONTSIZE', (0, 0), (-1, 0), 12), + ('BOTTOMPADDING', (0, 0), (-1, 0), 12), + ('BACKGROUND', (0, 1), (-1, -1), TK_DARK_GRAY), + ('TEXTCOLOR', (0, 1), (-1, -1), TK_WHITE), + ('FONTNAME', (0, 1), (-1, -1), 'Helvetica'), + ('FONTSIZE', (0, 1), (-1, -1), 10), + ('ALIGN', (0, 1), (-1, -1), 'LEFT'), + ('GRID', (0, 0), (-1, -1), 1, TK_LIGHT_GRAY) ])) story.append(t) - # Ausfalltoleranz + # Add nodes information with modern styling story.append(Spacer(1, 20)) - story.append(Paragraph("Ausfalltoleranz-Analyse", heading_style)) + story.append(Paragraph("Nodes Configuration", heading_style)) story.append(Spacer(1, 12)) - # Node-Ausfalltoleranz - node_tolerance = "Vorhanden" if data['result'].get('node_failure_tolerance', False) else "Nicht vorhanden" - story.append(Paragraph(f"Node-Ausfalltoleranz: {node_tolerance}", normal_style)) - story.append(Paragraph(data['result'].get('node_failure_info', 'Keine Informationen verfügbar'), normal_style)) + nodes_data = [["Node", "OSD Count", f"OSD Size ({data.get('storage_unit', 'GB')})"]] + for i, node in enumerate(data.get('nodes', []), 1): + osd_size = safe_float(node.get('osd_size_gb', 0)) + if data.get('storage_unit') == 'TB': + osd_size = osd_size / 1024 # Convert GB to TB for display + nodes_data.append([ + f"Node {i}", + str(node.get('osd_count', 0)), + f"{osd_size:.2f}" + ]) + + # Nodes table + t = Table(nodes_data, colWidths=[2*inch, 2*inch, 2*inch]) + t.setStyle(TableStyle([ + ('BACKGROUND', (0, 0), (-1, 0), TK_ORANGE), + ('TEXTCOLOR', (0, 0), (-1, 0), TK_WHITE), + ('ALIGN', (0, 0), (-1, -1), 'CENTER'), + ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'), + ('FONTSIZE', (0, 0), (-1, 0), 12), + ('BOTTOMPADDING', (0, 0), (-1, 0), 12), + ('BACKGROUND', (0, 1), (-1, -1), TK_DARK_GRAY), + ('TEXTCOLOR', (0, 1), (-1, -1), TK_WHITE), + ('FONTNAME', (0, 1), (-1, -1), 'Helvetica'), + ('FONTSIZE', (0, 1), (-1, -1), 10), + ('ALIGN', (0, 1), (-1, -1), 'CENTER'), + ('GRID', (0, 0), (-1, -1), 1, TK_LIGHT_GRAY) + ])) + story.append(t) + + # Results with modern styling + story.append(Spacer(1, 20)) + story.append(Paragraph("Results", heading_style)) + story.append(Spacer(1, 12)) + + result_data = [ + ["Total Raw Storage", f"{safe_float(data['result']['raw_total'])} {data.get('storage_unit', 'GB')}"], + ["Max. Recommended Usage", f"{safe_float(data['result'].get('max_usage_percent', 0))}%"], + ["Max. Usable Storage", f"{safe_float(data['result'].get('max_usage_tb' if data.get('storage_unit') == 'TB' else 'max_usage_gb', 0))} {data.get('storage_unit', 'GB')}"] + ] + + # Results table + t = Table(result_data, colWidths=[3*inch, 3*inch]) + t.setStyle(TableStyle([ + ('BACKGROUND', (0, 0), (-1, 0), TK_ORANGE), + ('TEXTCOLOR', (0, 0), (-1, 0), TK_WHITE), + ('ALIGN', (0, 0), (-1, -1), 'LEFT'), + ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'), + ('FONTSIZE', (0, 0), (-1, 0), 12), + ('BOTTOMPADDING', (0, 0), (-1, 0), 12), + ('BACKGROUND', (0, 1), (-1, -1), TK_DARK_GRAY), + ('TEXTCOLOR', (0, 1), (-1, -1), TK_WHITE), + ('FONTNAME', (0, 1), (-1, -1), 'Helvetica'), + ('FONTSIZE', (0, 1), (-1, -1), 10), + ('ALIGN', (0, 1), (-1, -1), 'LEFT'), + ('GRID', (0, 0), (-1, -1), 1, TK_LIGHT_GRAY) + ])) + story.append(t) + + # Fault tolerance with modern styling + story.append(Spacer(1, 20)) + story.append(Paragraph("Fault Tolerance Analysis", heading_style)) + story.append(Spacer(1, 12)) + + # Node fault tolerance with status indicator + node_tolerance = "Available" if data['result'].get('node_failure_tolerance', False) else "Not Available" + status_color = TK_GREEN if node_tolerance == "Available" else TK_RED + status_style = ParagraphStyle( + 'Status', + parent=normal_style, + textColor=status_color, + fontSize=12, + spaceAfter=6 + ) + story.append(Paragraph(f"Node Fault Tolerance: {node_tolerance}", status_style)) + story.append(Paragraph(data['result'].get('node_failure_info', 'No information available'), normal_style)) if data['result'].get('multi_failure_tolerance', False): - story.append(Paragraph(f"Multi-Node Ausfalltoleranz: {safe_int(data['result'].get('max_failure_nodes', 0))} Nodes", normal_style)) + story.append(Paragraph(f"Multi-Node Fault Tolerance: {safe_int(data['result'].get('max_failure_nodes', 0))} Nodes", normal_style)) - # OSD-Ausfalltoleranz + # OSD fault tolerance with status indicator story.append(Spacer(1, 12)) - osd_tolerance = "Vorhanden" if data['result'].get('osd_failure_tolerance', False) else "Eingeschränkt" - story.append(Paragraph(f"OSD-Ausfalltoleranz: {osd_tolerance}", normal_style)) - story.append(Paragraph(data['result'].get('osd_failure_info', 'Keine Informationen verfügbar'), normal_style)) + osd_tolerance = "Available" if data['result'].get('osd_failure_tolerance', False) else "Limited" + status_color = TK_GREEN if osd_tolerance == "Available" else TK_RED + status_style = ParagraphStyle( + 'Status', + parent=normal_style, + textColor=status_color, + fontSize=12, + spaceAfter=6 + ) + story.append(Paragraph(f"OSD Fault Tolerance: {osd_tolerance}", status_style)) + story.append(Paragraph(data['result'].get('osd_failure_info', 'No information available'), normal_style)) - # Empfehlungen + # Recommendations mit besserem Kontrast für den dunklen Hintergrund story.append(Spacer(1, 20)) - story.append(Paragraph("Empfehlungen", heading_style)) + story.append(Paragraph("Recommendations", heading_style)) story.append(Spacer(1, 12)) recommendations = [ - "Stelle sicher, dass nach Nodeausfällen genügend Kapazität vorhanden ist, um die 'full ratio' nicht zu erreichen.", - "Bei Replikation sollte die Anzahl der Nodes größer sein als der Replikationsfaktor.", - "Bei Erasure Coding sollte die Anzahl der Nodes größer sein als k+m (Datenchunks + Codierungschunks).", - "Die Performance bei der Wiederherstellung hängt von der Netzwerk- und Speichergeschwindigkeit ab.", - "Beachte, dass nach einem Nodeausfall das Wiederausbalancieren des Clusters länger dauert, je größer der ausgefallene Node ist." + "Ensure that after node failures, enough capacity is available to avoid reaching the 'full ratio'.", + "For replication, the number of nodes should be greater than the replication factor.", + "For erasure coding, the number of nodes should be greater than k+m (data chunks + coding chunks).", + "Recovery performance depends on network and storage speed.", + "Note that after a node failure, rebalancing the cluster takes longer the larger the failed node is." ] + # Erstelle Box für Empfehlungen mit besserer Lesbarkeit + recommendations_text = [] for rec in recommendations: - story.append(Paragraph(f"• {rec}", normal_style)) + recommendations_text.append(Paragraph(f"• {rec}", normal_style)) - # PDF generieren - doc.build(story) + recommendation_table = Table([[item] for item in recommendations_text], colWidths=[6*inch]) + recommendation_table.setStyle(TableStyle([ + ('BACKGROUND', (0, 0), (-1, -1), TK_DARK_GRAY), + ('TEXTCOLOR', (0, 0), (-1, -1), TK_WHITE), + ('ALIGN', (0, 0), (-1, -1), 'LEFT'), + ('VALIGN', (0, 0), (-1, -1), 'TOP'), + ('GRID', (0, 0), (-1, -1), 1, TK_LIGHT_GRAY) + ])) + story.append(recommendation_table) - # PDF-Bytes zurückgeben + # Footer with Thomas-Krenn.AG branding + story.append(Spacer(1, 30)) + footer_style = ParagraphStyle( + 'Footer', + parent=normal_style, + fontSize=8, + textColor=colors.HexColor('#9ca3af') # Helleres Grau für bessere Lesbarkeit auf dunklem Hintergrund + ) + story.append(Paragraph("Generated by Thomas-Krenn.AG Ceph Storage Calculator", footer_style)) + + # Generate PDF with background + doc.build(story, onFirstPage=add_page_background, onLaterPages=add_page_background) + + # Return PDF bytes return buffer.getvalue() except Exception as e: - logger.error(f"Fehler bei der PDF-Generierung: {str(e)}") + logger.error(f"Error during PDF generation: {str(e)}") raise \ No newline at end of file