Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import csv
- import os
- import time
- import zipfile
- import logging
- import math
- from datetime import datetime, timezone
- from flask import Flask, jsonify, request
- from flask_cors import CORS
- import pandas as pd
- # logowanie
- logging.basicConfig(
- level=logging.INFO,
- format="%(asctime)s - %(levelname)s - %(message)s",
- datefmt="%Y-%m-%d %H:%M:%S"
- )
- # Stałe
- CACHE_DURATION = 5 #
- BASE_DIR = r"D:\skrypty\Grafanaskrypt_prealpha"
- # Globalne zmienne
- cached_data = None
- historical_data = [] # dane po lokalu
- last_cache_time = 0
- app = Flask(__name__)
- CORS(app)
- def get_file_path():
- """data entry."""
- while True:
- file_name = input("Podaj nazwę pliku (np. data.csv lub data.zip): ").strip()
- file_path = os.path.join(BASE_DIR, file_name)
- if not os.path.exists(file_path):
- logging.error(f"Plik nie istnieje: {file_path}")
- continue
- if file_path.lower().endswith('.zip'):
- return extract_zip(file_path)
- logging.info(f"Wybrano plik: {file_path}")
- return file_path
- def extract_zip(zip_path):
- """Rozpakowuje archiwum ZIP i zwraca ścieżkę do pierwszego pliku CSV lub TXT."""
- extract_dir = os.path.join(BASE_DIR, "extracted")
- os.makedirs(extract_dir, exist_ok=True)
- try:
- with zipfile.ZipFile(zip_path, 'r') as zip_ref:
- zip_ref.extractall(extract_dir)
- for file in os.listdir(extract_dir):
- file_path = os.path.join(extract_dir, file)
- if os.path.isfile(file_path) and file.lower().endswith(('.csv', '.txt')):
- logging.info(f"Wybrano plik z archiwum: {file_path}")
- return file_path
- logging.error("Brak plików CSV lub TXT w archiwum ZIP")
- return None
- except Exception as e:
- logging.error(f"Błąd rozpakowywania ZIP: {str(e)}")
- return None
- def parse_timestamp(raw_timestamp):
- """Parsuje czas w różnych formatach."""
- raw_timestamp = str(raw_timestamp).strip()
- # Próba parsowania jako timestamp Unix
- try:
- ts = float(raw_timestamp.replace(',', '.'))
- return datetime.fromtimestamp(ts, tz=timezone.utc).isoformat()
- except (ValueError, OverflowError):
- pass
- # parsowanie
- formats = [
- "%d.%m.%Y %H:%M:%S", # np. 23.02.2025 12:34:56
- "%Y-%m-%d %H:%M:%S", # np. 2025-02-23 12:34:56
- "%Y/%m/%d %H:%M:%S", # np. 2025/02/23 12:34:56
- "%d-%m-%Y %H:%M:%S", # np. 23-02-2025 12:34:56
- "%Y%m%d %H%M%S" # np. 20250223 123456
- ]
- for fmt in formats:
- try:
- dt = datetime.strptime(raw_timestamp, fmt)
- return dt.replace(tzinfo=timezone.utc).isoformat()
- except ValueError:
- continue
- logging.error(f"Nieznany format czasu: {raw_timestamp}")
- return datetime.now(timezone.utc).isoformat()
- def detect_delimiter(file_path):
- """Wykrywa separator w pliku CSV."""
- with open(file_path, 'r', encoding='utf-8-sig') as f:
- first_line = f.readline()
- for delim in [';', ',', '\t']:
- if delim in first_line:
- return delim
- return ';'
- def load_and_parse_data(file_path):
- """
- restruktura gdy zle entry
- """
- try:
- delimiter = detect_delimiter(file_path)
- df = pd.read_csv(
- file_path,
- delimiter=delimiter,
- skiprows=[1],
- dtype=str,
- on_bad_lines='warn'
- )
- df = df.rename(columns=lambda x: x.strip())
- if 'Aufnahme Zeit' not in df.columns:
- raise ValueError("Brak kolumny 'Aufnahme Zeit'")
- # Lista wszystkich kolumn wartości (wszystkie oprócz "Aufnahme Zeit")
- value_columns = [col for col in df.columns if col != 'Aufnahme Zeit']
- results = []
- for _, row in df.iterrows():
- try:
- timestamp = parse_timestamp(row['Aufnahme Zeit'])
- values = {}
- # Dla każdej kolumny z listy value_columns, uzupełniamy brakujące wartości wartością None
- for col in value_columns:
- raw_value = row[col] if col in row else ""
- raw_value = str(raw_value).strip().replace(',', '.')
- if raw_value == "":
- values[col] = None
- else:
- try:
- num_value = float(raw_value)
- if math.isnan(num_value):
- num_value = None
- values[col] = num_value
- except ValueError:
- values[col] = None
- logging.warning(f"Niepoprawna wartość w kolumnie '{col}': {raw_value}")
- results.append({
- 'timestamp': timestamp,
- 'values': values
- })
- except Exception as e:
- logging.error(f"Błąd przetwarzania wiersza: {str(e)}")
- continue
- return results
- except Exception as e:
- logging.error(f"Błąd przetwarzania pliku: {str(e)}")
- return None
- def update_cache():
- """Aktualizuje cache danych i log insertz."""
- global cached_data, historical_data, last_cache_time
- file_path = get_file_path()
- if not file_path:
- return
- new_data = load_and_parse_data(file_path)
- if new_data:
- if cached_data is None:
- cached_data = new_data
- else:
- # duplikat extra
- cached_data.extend(new_data)
- historical_data = cached_data
- last_cache_time = time.time()
- logging.info("Cache zaktualizowany i dane zostały dołączone do historycznych.")
- else:
- logging.error("Nie udało się załadować danych")
- @app.route('/health', methods=['GET'])
- def health_check():
- return jsonify({'status': 'ok'})
- # row data
- @app.route('/', methods=['GET'])
- def get_raw_data():
- global historical_data
- if not historical_data:
- return jsonify({'error': 'Brak danych'}), 500
- return jsonify(historical_data)
- # y GET oraz POST
- @app.route('/query', methods=['GET', 'POST'])
- def query_all_variables():
- global historical_data
- if not historical_data:
- return jsonify({'error': 'Brak danych'}), 500
- series = {}
- for entry in historical_data:
- try:
- # Konwersja timestamp na milisekundy
- ts = int(datetime.fromisoformat(entry['timestamp']).timestamp() * 1000)
- for metric, value in entry['values'].items():
- series.setdefault(metric, []).append([value, ts])
- except Exception as e:
- logging.error(f"Błąd przetwarzania wpisu: {str(e)}")
- continue
- formatted = [{
- 'target': metric,
- 'datapoints': sorted(points, key=lambda x: x[1])
- } for metric, points in series.items()]
- return jsonify(formatted)
- if __name__ == '__main__':
- update_cache()
- app.run(host='0.0.0.0', port=5000, debug=False)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement