planner_split_files.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. import hashlib
  2. from plan_values2 import VALUES2_HEADER
  3. import json
  4. import csv
  5. from pathlib import Path
  6. def split_file(filename, username):
  7. with open(filename, "r") as frh:
  8. structure = json.load(frh)
  9. input_file = Path(filename)
  10. export_values2(input_file, structure)
  11. export_user_config(input_file, structure, username)
  12. export_options(input_file, structure)
  13. export_scaffold(input_file, structure)
  14. def export_values2(input_file, structure):
  15. result = []
  16. values2 = dict([(s["id"], s["values2"]) for s in structure])
  17. for id, s_entry in values2.items():
  18. for department, v2 in s_entry.items():
  19. result.append([id, department, *v2])
  20. output_filename = f"data_{input_file.name[:-5]}.csv"
  21. output_file = input_file.parent / output_filename
  22. with open(output_file, "w") as fwh:
  23. csv_writer = csv.writer(fwh, delimiter="\t")
  24. csv_writer.writerow(VALUES2_HEADER)
  25. csv_writer.writerows(result)
  26. def export_user_config(input_file, structure, username):
  27. res = {}
  28. res["drilldown"] = dict([(s["id"], s["drilldown"]) for s in structure])
  29. timestamp = input_file.name.split("_")[2][:-5]
  30. output_filename = f"user_{username}_{timestamp}.json"
  31. output_file = input_file.parent / output_filename
  32. with open(output_file, "w") as fwh:
  33. json.dump(res, fwh, indent=2)
  34. def export_options(input_file, structure):
  35. res = dict([(s["id"], s["options"]) for s in structure])
  36. output_filename = f"options_{input_file.name}"
  37. output_file = input_file.parent / output_filename
  38. with open(output_file, "w") as fwh:
  39. json.dump(res, fwh, indent=2)
  40. def export_scaffold(input_file, structure):
  41. res = []
  42. for s in structure:
  43. res.append(
  44. {
  45. "id": s["id"],
  46. "text": s["text"],
  47. "children": s["children"],
  48. "children2": s["children2"],
  49. "parents": s["parents"],
  50. "accounts": s["accounts"],
  51. "costcenter": s["costcenter"],
  52. "level": s["level"],
  53. "form": s["form"],
  54. "planlevel": s.get("planlevel", False),
  55. }
  56. )
  57. output_filename = f"struct_{input_file.name}"
  58. output_file = input_file.parent / output_filename
  59. with open(output_file, "w") as fwh:
  60. json.dump(res, fwh, indent=2)
  61. def convert_folder(base_dir):
  62. for filename in Path(base_dir).glob("20*.json"):
  63. split_file(filename, "global")
  64. def hash_folder(base_dir):
  65. res = []
  66. for filename in Path(base_dir).glob("*.*"):
  67. if not filename.is_file():
  68. continue
  69. with filename.open("rb") as frh:
  70. digest = sha256_file_digest(frh.read())
  71. res.append([filename.name, digest])
  72. res.sort()
  73. most_recent = dict([(e[1], e[0]) for e in res])
  74. for e in res:
  75. e.append(e[0] == most_recent.get(e[1], None))
  76. with open(base_dir + "/_hash.csv", "w") as fwh:
  77. csv_writer = csv.writer(fwh, delimiter="\t")
  78. csv_writer.writerow(["Datei", "Hashwert", "Original"])
  79. csv_writer.writerows(res)
  80. def sha256_file_digest(text):
  81. m = hashlib.new("sha256")
  82. m.update(text)
  83. return m.hexdigest()
  84. if __name__ == "__main__":
  85. # split_file("save/2024_V1_20231114102855.json", "global")
  86. # convert_folder("save/")
  87. hash_folder("save/")