ソースを参照

Save-Files aufteilen, um Platz zu sparen

robert 1 年間 前
コミット
64deb29905
2 ファイル変更119 行追加0 行削除
  1. 0 0
      save/2021_V1_20201012215917.json
  2. 119 0
      webservice/planner_split_files.py

ファイルの差分が大きいため隠しています
+ 0 - 0
save/2021_V1_20201012215917.json


+ 119 - 0
webservice/planner_split_files.py

@@ -0,0 +1,119 @@
+import hashlib
+from plan_values2 import VALUES2_HEADER
+
+import json
+import csv
+from pathlib import Path
+
+
+def split_file(filename, username):
+    with open(filename, "r") as frh:
+        structure = json.load(frh)
+
+    input_file = Path(filename)
+    export_values2(input_file, structure)
+    export_user_config(input_file, structure, username)
+    export_options(input_file, structure)
+    export_scaffold(input_file, structure)
+
+
+def export_values2(input_file, structure):
+    result = []
+
+    values2 = dict([(s["id"], s["values2"]) for s in structure])
+    for id, s_entry in values2.items():
+        for department, v2 in s_entry.items():
+            result.append([id, department, *v2])
+
+    output_filename = f"data_{input_file.name[:-5]}.csv"
+    output_file = input_file.parent / output_filename
+
+    with open(output_file, "w") as fwh:
+        csv_writer = csv.writer(fwh, delimiter="\t")
+        csv_writer.writerow(VALUES2_HEADER)
+        csv_writer.writerows(result)
+
+
+def export_user_config(input_file, structure, username):
+    res = {}
+    res["drilldown"] = dict([(s["id"], s["drilldown"]) for s in structure])
+
+    timestamp = input_file.name.split("_")[2][:-5]
+    output_filename = f"user_{username}_{timestamp}.json"
+    output_file = input_file.parent / output_filename
+
+    with open(output_file, "w") as fwh:
+        json.dump(res, fwh, indent=2)
+
+
+def export_options(input_file, structure):
+    res = dict([(s["id"], s["options"]) for s in structure])
+
+    output_filename = f"options_{input_file.name}"
+    output_file = input_file.parent / output_filename
+
+    with open(output_file, "w") as fwh:
+        json.dump(res, fwh, indent=2)
+
+
+def export_scaffold(input_file, structure):
+    res = []
+    for s in structure:
+        res.append(
+            {
+                "id": s["id"],
+                "text": s["text"],
+                "children": s["children"],
+                "children2": s["children2"],
+                "parents": s["parents"],
+                "accounts": s["accounts"],
+                "costcenter": s["costcenter"],
+                "level": s["level"],
+                "form": s["form"],
+                "planlevel": s.get("planlevel", False),
+            }
+        )
+
+    output_filename = f"struct_{input_file.name}"
+    output_file = input_file.parent / output_filename
+
+    with open(output_file, "w") as fwh:
+        json.dump(res, fwh, indent=2)
+
+
+def convert_folder(base_dir):
+    for filename in Path(base_dir).glob("20*.json"):
+        split_file(filename, "global")
+
+
+def hash_folder(base_dir):
+    res = []
+
+    for filename in Path(base_dir).glob("*.*"):
+        if not filename.is_file():
+            continue
+        with filename.open("rb") as frh:
+            digest = sha256_file_digest(frh.read())
+            res.append([filename.name, digest])
+
+    res.sort()
+    most_recent = dict([(e[1], e[0]) for e in res])
+    for e in res:
+        e.append(e[0] == most_recent.get(e[1], None))
+
+    with open(base_dir + "/_hash.csv", "w") as fwh:
+        csv_writer = csv.writer(fwh, delimiter="\t")
+        csv_writer.writerow(["Datei", "Hashwert", "Original"])
+        csv_writer.writerows(res)
+
+
+def sha256_file_digest(text):
+    m = hashlib.new("sha256")
+    m.update(text)
+    return m.hexdigest()
+
+
+if __name__ == "__main__":
+    # split_file("save/2024_V1_20231114102855.json", "global")
+    # convert_folder("save/")
+    hash_folder("save/")

この差分においてかなりの量のファイルが変更されているため、一部のファイルを表示していません