gcstruct.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690
  1. import pandas as pd
  2. import numpy as np
  3. import xml.etree.ElementTree as ET
  4. import json
  5. import csv
  6. import re
  7. import chevron
  8. # from shutil import copyfile
  9. from bs4 import BeautifulSoup
  10. from functools import reduce
  11. from pathlib import Path
  12. def get_flat(node):
  13. result = [
  14. {
  15. "id": node["id"],
  16. "text": node["text"],
  17. "children": [x["id"] for x in node["children"]],
  18. "children2": [],
  19. "parents": node["parents"],
  20. "accounts": node["accounts"],
  21. "costcenter": "",
  22. "level": node["level"],
  23. "drilldown": node["level"] < 2, # (node['level'] != 2 and len(node['accounts']) == 0),
  24. "form": node["form"],
  25. "accountlevel": False,
  26. "absolute": True,
  27. "seasonal": True,
  28. "status": "0",
  29. "values": [],
  30. "values2": {},
  31. }
  32. ]
  33. for child in node["children"]:
  34. result += get_flat(child)
  35. return result
  36. def get_parents_list(p_list):
  37. id = ";".join(p_list) + ";" * (10 - len(p_list))
  38. if len(p_list) > 0:
  39. return [id] + get_parents_list(p_list[:-1])
  40. return [";" * 9]
  41. def structure_from_tree(node):
  42. result = []
  43. result.append(node["id"])
  44. for child in node["children"]:
  45. result.extend(structure_from_tree(child))
  46. return result
  47. def xml_from_tree(xml_node, tree_node):
  48. for child in tree_node["children"]:
  49. element = ET.SubElement(xml_node, "Ebene")
  50. element.set("Name", child["text"])
  51. xml_from_tree(element, child)
  52. def split_it(text, index):
  53. try:
  54. return re.findall(r"([^;]+) - ([^;]*);;", text)[0][index]
  55. except Exception:
  56. return ""
  57. def last_layer(text):
  58. try:
  59. return re.findall(r"([^;]+);;", text)[0]
  60. except Exception:
  61. return ""
  62. def get_default_cols(i):
  63. return ["Ebene" + str(i) for i in range(i * 10 + 1, (i + 1) * 10 + 1)]
  64. def get_structure_exports(s):
  65. result = {
  66. "files": {},
  67. "format": {
  68. "KontoFormat": "{0} - {1}",
  69. "HerstellerkontoFormat": "{{Herstellerkonto_Nr}}",
  70. "HerstellerBezeichnungFormat": "{{Herstellerkonto_Bez}}",
  71. "NeueHerstellerkontenAnlegen": False,
  72. },
  73. }
  74. export_files = [
  75. "ExportStk",
  76. "ExportStrukturenStk",
  77. "ExportAdjazenz",
  78. "ExportUebersetzung",
  79. "ExportUebersetzungStk",
  80. "ExportHerstellerKontenrahmen",
  81. ]
  82. export_format = [
  83. "KontoFormat",
  84. "HerstellerkontoFormat",
  85. "HerstellerBezeichnungFormat",
  86. "NeueHerstellerkontenAnlegen",
  87. ]
  88. for e in export_files:
  89. if s.find(e) is not None and s.find(e).text is not None and s.find(e).text[-4:] == ".csv":
  90. result["files"][e] = s.find(e).text
  91. for e in export_format:
  92. if s.find(e) is not None and s.find(e).text != "":
  93. result["format"][e] = s.find(e).text
  94. result["format"]["NeueHerstellerkontenAnlegen"] = result["format"]["NeueHerstellerkontenAnlegen"] == "true"
  95. return result
  96. class GCStruct:
  97. config = {
  98. "path": "c:/projekte/python/gcstruct",
  99. "path2": "c:/projekte/python/gcstruct",
  100. "file": "c:/projekte/python/gcstruct/config/config.xml",
  101. "output": "gcstruct.json",
  102. "default": [],
  103. "special": {},
  104. "special2": {"Planner": ["Kostenstelle", "Ebene1", "Ebene2"], "Test": ["Ebene1", "Ebene2"]},
  105. "columns": [
  106. "Konto_Nr",
  107. "Konto_Bezeichnung",
  108. "Konto_Art",
  109. "Konto_KST",
  110. "Konto_STK",
  111. "Konto_1",
  112. "Konto_2",
  113. "Konto_3",
  114. "Konto_4",
  115. "Konto_5",
  116. ],
  117. "struct": {},
  118. "export": {},
  119. }
  120. json_result = {"accounts": {}, "tree": {}, "flat": {}, "struct_export": {}, "skr51_vars": {}}
  121. structure_ids = []
  122. translate = {
  123. "Konto_Nr": "SKR51",
  124. "Kostenstelle": "KST",
  125. "Absatzkanal": "ABS",
  126. "Kostenträger": "KTR",
  127. "Marke": "MAR",
  128. "Standort": "STA",
  129. "Marke_HBV": "MAR",
  130. "Standort_HBV": "BMC",
  131. }
  132. def __init__(self, struct_dir, export_dir=None):
  133. self.config["path"] = struct_dir
  134. self.config["path2"] = struct_dir + "/export" if export_dir is None else export_dir
  135. self.config["file"] = f"{self.config['path']}/config/gcstruct.xml"
  136. if not Path(self.config["file"]).exists():
  137. self.config["file"] = f"{self.config['path']}/config/config.xml"
  138. cfg = ET.parse(self.config["file"])
  139. self.config["default"] = [
  140. s.find("Name").text for s in cfg.getroot().find("Strukturdefinitionen").findall("Struktur")
  141. ]
  142. self.config["export"] = dict(
  143. [
  144. (s.find("Name").text, get_structure_exports(s))
  145. for s in cfg.getroot().find("Strukturdefinitionen").findall("Struktur")
  146. ]
  147. )
  148. struct = dict([(x, get_default_cols(i)) for (i, x) in enumerate(self.config["default"])])
  149. struct.update(self.config["special"])
  150. self.config["struct"] = struct
  151. # print(self.config['struct'])
  152. def export_header(self, filetype):
  153. return {
  154. "ExportStk": [],
  155. "ExportStrukturenStk": [],
  156. "ExportAdjazenz": [],
  157. "ExportUebersetzung": ["Konto_Nr_Hersteller", "Konto_Nr_Split", "Konto_Nr_Haendler", "Info"],
  158. "ExportUebersetzungStk": ["Konto_Nr_Hersteller", "Konto_Nr_Split", "Konto_Nr_Haendler", "Info"],
  159. "ExportHerstellerKontenrahmen": ["Konto_Nr", "Konto_Bezeichnung", "Case", "Info"],
  160. }[filetype]
  161. def accounts_from_csv(self, struct):
  162. max_rows = (len(self.config["default"]) + 1) * 10
  163. with open(f"{self.config['path']}/Kontenrahmen/Kontenrahmen.csv", "r", encoding="latin-1") as f:
  164. csv_reader = csv.reader(f, delimiter=";")
  165. imported_csv = [row[:max_rows] for row in csv_reader]
  166. df = pd.DataFrame.from_records(np.array(imported_csv[1:], dtype="object"), columns=imported_csv[0]).fillna(
  167. value=""
  168. )
  169. df = df.rename(columns={"Kostenstelle": "Konto_KST", "STK": "Konto_STK"})
  170. for i, (s, cols) in enumerate(struct.items()):
  171. df[s] = reduce(lambda x, y: x + ";" + df[y], cols, "")
  172. df[s] = df[s].apply(lambda x: x[1:])
  173. df["LetzteEbene" + str(i + 1)] = df[s].apply(lambda x: last_layer(x))
  174. df["LetzteEbene" + str(i + 1) + "_Nr"] = df[s].apply(lambda x: split_it(x, 0))
  175. df["LetzteEbene" + str(i + 1) + "_Bez"] = df[s].apply(lambda x: split_it(x, 1))
  176. df["Herstellerkonto_Nr"] = df["LetzteEbene1_Nr"]
  177. df["Herstellerkonto_Bez"] = df["LetzteEbene1_Bez"]
  178. return df
  179. def tree_from_xml(self, struct, df):
  180. result = {}
  181. for s, cols in struct.items():
  182. try:
  183. tree = ET.parse(f"{self.config['path']}/Xml/{s}.xml")
  184. result[s] = self.get_tree_root(tree.getroot(), s)
  185. except FileNotFoundError:
  186. print("XML-Datei fehlt")
  187. used_entries = [x.split(";")[1:] for x in set(df[s].to_numpy())]
  188. print(used_entries)
  189. root = ET.Element("Ebene")
  190. root.set("Name", s)
  191. result[s] = self.get_tree_root(root, s)
  192. # self.json_result["tree"][s] = get_tree_from_accounts(cols, [])
  193. return result
  194. def get_structure_and_tree(self):
  195. df = self.accounts_from_csv(self.config["struct"])
  196. self.json_result["accounts"] = df.to_dict("records")
  197. self.structure_ids = df.melt(
  198. id_vars=["Konto_Nr"], value_vars=self.config["struct"].keys(), var_name="Struktur", value_name="id"
  199. ).groupby(by=["Struktur", "id"])
  200. self.json_result["tree"] = self.tree_from_xml(self.config["struct"], df)
  201. for s, cols in self.config["struct"].items():
  202. self.json_result["flat"][s] = get_flat(self.json_result["tree"][s])
  203. for s, entries in self.json_result["flat"].items():
  204. cols = self.config["struct"][s]
  205. df_temp = pd.DataFrame([x["id"].split(";") for x in entries], columns=cols)
  206. self.json_result["struct_export"][s] = df_temp.to_dict(orient="records")
  207. # {'accounts': {}, 'tree': {}, 'flat': {}, 'struct_export': {}, 'skr51_vars': {}}
  208. json.dump(self.json_result, open(f"{self.config['path2']}/{self.config['output']}", "w"), indent=2)
  209. return self.json_result
  210. def get_accounts(self, structure, id):
  211. return [x["Konto_Nr"] for x in self.json_result["accounts"] if x[structure] == id]
  212. # return []
  213. # res = self.structure_ids.groups.get((structure, id))
  214. # if res is None:
  215. # return []
  216. # return res.values
  217. def export(self):
  218. for s in self.config["export"].keys():
  219. for filetype, filename in self.config["export"][s]["files"].items():
  220. with open(self.config["path2"] + "/" + filename, "w") as fwh:
  221. fwh.write("Konto_Nr_Hersteller;Konto_Nr_Split;Konto_Nr_Haendler;Info\n")
  222. # 'Hersteller'Konto_Nr;Konto_Bezeichnung;Case;Info'
  223. for a in self.json_result["accounts"]:
  224. if a["Herstellerkonto_Nr"] != "":
  225. account = chevron.render(
  226. self.config["export"]["SKR51"]["format"]["HerstellerkontoFormat"], a
  227. )
  228. fwh.write(
  229. account + ";" + account + ";" + a["Konto_Nr"] + ";" + "\n"
  230. ) # a['Herstellerkonto_Bez']
  231. def get_tree(self, node, parents, structure):
  232. result = []
  233. for child in node:
  234. p = get_parents_list(parents)
  235. parents.append(child.attrib["Name"])
  236. id = ";".join(parents) + ";" * (10 - len(parents))
  237. result.append(
  238. {
  239. "id": id,
  240. "text": child.attrib["Name"],
  241. "children": self.get_tree(child, parents, structure),
  242. "parents": p,
  243. "accounts": self.get_accounts(structure, id),
  244. "level": len(parents),
  245. "form": child.attrib.get("Split", ""),
  246. }
  247. )
  248. parents.pop()
  249. return result
  250. def get_tree_root(self, node, structure):
  251. id = ";" * 9
  252. return {
  253. "id": id,
  254. "text": node.attrib["Name"],
  255. "children": self.get_tree(node, [], structure),
  256. "parents": [],
  257. "accounts": [],
  258. "level": 0,
  259. "form": "",
  260. }
  261. def post_structure_and_tree(self):
  262. json_post = json.load(open(f"{self.config['path']}/{self.config['output']}", "r"))
  263. # Kontenrahmen.csv
  264. ebenen = ["Ebene" + str(i) for i in range(1, len(self.config["default"]) * 10 + 1)]
  265. header = ";".join(self.config["columns"] + ebenen)
  266. cols = self.config["columns"] + self.config["default"]
  267. with open(self.config["path"] + "/Kontenrahmen/Kontenrahmen_out.csv", "w", encoding="latin-1") as f:
  268. f.write(header + "\n")
  269. for row in json_post["Kontenrahmen"]:
  270. f.write(";".join([row[e] for e in cols]) + "\n")
  271. # print(header)
  272. # xml und evtl. Struktur.csv
  273. for i, s in enumerate(self.config["default"]):
  274. with open(f"{self.config['path']}/Strukturen/Kontenrahmen.csv/{s}_out.csv", "w", encoding="latin-1") as f:
  275. f.write(";".join(["Ebene" + str(i * 10 + j) for j in range(1, 11)]) + "\n")
  276. rows = structure_from_tree({"id": ";" * 9, "children": json_post[s]})
  277. f.write("\n".join(rows))
  278. # with open(self.config['path'] + "/Strukturen/Kontenrahmen.csv/" +
  279. # structure + "_2.csv", "w", encoding="latin-1") as f:
  280. root = ET.Element("Ebene")
  281. root.set("Name", s)
  282. xml_from_tree(root, {"id": ";" * 9, "children": json_post[s]})
  283. with open(f"{self.config['path']}/Xml/{s}_out.xml", "w", encoding="utf-8") as f:
  284. f.write(BeautifulSoup(ET.tostring(root), "xml").prettify())
  285. def skr51_translate(self, accounts_combined_files):
  286. df = self.accounts_from_csv(self.config["struct"])
  287. df_translate = {}
  288. for i, (t_from, t_to) in enumerate(self.translate.items()):
  289. last = "LetzteEbene" + str(i + 1)
  290. from_label = [
  291. "Konto_Nr",
  292. last,
  293. last + "_Nr",
  294. last + "_Bez",
  295. "Ebene" + str(i * 10 + 1),
  296. "Ebene" + str(i * 10 + 2),
  297. ]
  298. to_label = [t_to, t_to + "_Ebene", t_to + "_Nr", t_to + "_Bez", "Ebene1", "Ebene2"]
  299. df_translate[t_from] = df[df[last + "_Nr"] != ""][from_label].rename(
  300. columns=dict(zip(from_label, to_label))
  301. )
  302. # print(df_translate[t_to].head())
  303. df2 = []
  304. for ac_file in accounts_combined_files:
  305. df2.append(
  306. pd.read_csv(
  307. ac_file, decimal=",", sep=";", encoding="latin-1", converters={i: str for i in range(0, 200)}
  308. )
  309. )
  310. df_source = pd.concat(df2)
  311. df3 = df_source.copy()
  312. df3["Konto_Nr"] = df3["Konto_Nr"] + "_STK"
  313. df_source = pd.concat([df_source, df3])
  314. for t_from, t_to in self.translate.items():
  315. if t_to == "SKR51":
  316. df_source["SKR51"] = df_source["Konto_Nr"]
  317. elif t_from in ["Marke_HBV"]:
  318. df_source["Marke_HBV"] = df_source["Marke"]
  319. elif t_from in ["Standort_HBV"]:
  320. df_source["Standort_HBV"] = df_source["Standort"] + "_" + df_source["Marke"]
  321. df_source["BMC"] = "BMC_" + df_source["Standort_HBV"]
  322. elif t_to == "KTR":
  323. df_source["KTR"] = np.where(
  324. df_source["Kostenträger_Quelle"] == "TZ", "KTR_TZ_" + df_source["Kostenträger"], "KTR_00"
  325. )
  326. df_source["KTR"] = np.where(
  327. df_source["Kostenträger_Quelle"].isin(["NW", "SC"]),
  328. "KTR_"
  329. + df_source["Kostenträger_Quelle"]
  330. + "_"
  331. + df_source["Marke"]
  332. + "_"
  333. + df_source["Kostenträger"],
  334. df_source["KTR"],
  335. )
  336. else:
  337. df_source[t_to] = t_to + "_" + df_source[t_from]
  338. df_source = df_source.merge(df_translate[t_from], how="left", on=[t_to], suffixes=(None, "_" + t_to))
  339. df_source[t_to + "_Nr"] = np.where(
  340. df_source[t_to + "_Nr"].isna(), df_source[t_from], df_source[t_to + "_Nr"]
  341. )
  342. df_source["Konto_Nr_SKR51"] = (
  343. df_source["MAR_Nr"]
  344. + "-"
  345. + df_source["STA_Nr"]
  346. + "-"
  347. + df_source["SKR51_Nr"]
  348. + "-"
  349. + df_source["KST_Nr"]
  350. + "-"
  351. + df_source["ABS_Nr"]
  352. + "-"
  353. + df_source["KTR_Nr"]
  354. )
  355. df_source["Konto_Nr_Händler"] = (
  356. df_source["Marke"]
  357. + "-"
  358. + df_source["Standort"]
  359. + "-"
  360. + df_source["Konto_Nr"]
  361. + "-"
  362. + df_source["Kostenstelle"]
  363. + "-"
  364. + df_source["Absatzkanal"]
  365. + "-"
  366. + df_source["Kostenträger"]
  367. )
  368. # df_source.to_csv(f"{self.config['path2']}/SKR51_Uebersetzung.csv", sep=';', encoding='latin-1', index=False)
  369. df_source["MAR_Nr_MAR"] = np.where(df_source["MAR_Nr_MAR"].isna(), "0000", df_source["MAR_Nr_MAR"])
  370. from_label = [
  371. "MAR_Nr",
  372. "STA_Nr",
  373. "SKR51_Nr",
  374. "KST_Nr",
  375. "ABS_Nr",
  376. "KTR_Nr",
  377. "KTR_Ebene",
  378. "Konto_Nr_Händler",
  379. "Konto_Nr_SKR51",
  380. "MAR_Nr_MAR",
  381. "BMC_Nr",
  382. ]
  383. to_label = [
  384. "Marke",
  385. "Standort",
  386. "Konto_Nr",
  387. "Kostenstelle",
  388. "Absatzkanal",
  389. "Kostenträger",
  390. "Kostenträger_Ebene",
  391. "Konto_Nr_Händler",
  392. "Konto_Nr_SKR51",
  393. "Marke_HBV",
  394. "Standort_HBV",
  395. ]
  396. df_combined = df_source[from_label].rename(columns=dict(zip(from_label, to_label)))
  397. df_combined.to_csv(
  398. f"{self.config['path2']}/Kontenrahmen_uebersetzt.csv", sep=";", encoding="latin-1", index=False
  399. )
  400. def skr51_translate2(self, accounts_combined_file):
  401. df = self.accounts_from_csv(self.config["struct"])
  402. df_list = []
  403. for i, s in enumerate(self.config["struct"].keys()):
  404. from_label = ["Konto_Nr", "Ebene" + str(i * 10 + 1), "Ebene" + str(i * 10 + 2), "Ebene" + str(i * 10 + 3)]
  405. to_label = ["Konto_Nr", "key", "value", "value2"]
  406. df_temp = df[from_label].rename(columns=dict(zip(from_label, to_label)))
  407. df_temp["key"] = "{" + s + "/" + df_temp["key"] + "}"
  408. df_list.append(df_temp[df_temp["value"] != ""])
  409. df_translate = pd.concat(df_list)
  410. # df_translate.to_csv(f"{self.config['path2']}/SKR51_Variablen.csv", sep=';', encoding='latin-1', index=False)
  411. df_source = pd.read_csv(
  412. accounts_combined_file, decimal=",", sep=";", encoding="latin-1", converters={i: str for i in range(0, 200)}
  413. )
  414. df_source = df_source[df_source["Konto_Nr"].str.contains("_STK") == False]
  415. df_source["Konto_Nr_Gesamt"] = df_source["Konto_Nr"]
  416. df_source["Konto_Nr"] = np.where(
  417. df_source["Konto_Nr"].str.contains(r"^[4578]"),
  418. df_source["Konto_Nr"] + "_" + df_source["Kostenstelle"].str.slice(stop=1),
  419. df_source["Konto_Nr"],
  420. )
  421. df_source["Konto_Nr"] = np.where(
  422. df_source["Konto_Nr"].str.contains(r"^5\d+_4"),
  423. df_source["Konto_Nr"] + df_source["Kostenstelle"].str.slice(start=1, stop=2),
  424. df_source["Konto_Nr"],
  425. )
  426. df_source = df_source.merge(df, how="left", on=["Konto_Nr"])
  427. # rows = df_source.shape[0]
  428. df_source["value"] = ""
  429. cols = get_default_cols(0)
  430. for t_from, t_to in self.translate.items():
  431. if t_from in ["Marke_HBV", "Standort_HBV"]:
  432. continue
  433. if t_from == "Konto_Nr":
  434. df_source[t_to] = df_source[t_from]
  435. else:
  436. df_source[t_to] = t_to + "_" + df_source[t_from]
  437. for e in cols:
  438. df_source = df_source.merge(
  439. df_translate,
  440. how="left",
  441. left_on=[t_to, e],
  442. right_on=["Konto_Nr", "key"],
  443. suffixes=(None, "_" + t_to + "_" + e),
  444. )
  445. df_source[e] = np.where(
  446. df_source["value_" + t_to + "_" + e].notna(), df_source["value_" + t_to + "_" + e], df_source[e]
  447. )
  448. # if df_source.shape[0] > rows:
  449. # print(t_to + '_' + e + ': ' + str(df_source.shape[0]))
  450. # df_source.to_csv(f"{self.config['path2']}/SKR51_Variablen2.csv", sep=';', encoding='latin-1', index=False)
  451. # df_source[t_to + '_Nr'] = np.where(df_source[t_to + '_Nr'].isna(), df_source[t_from], df_source[t_to + '_Nr'])
  452. for e in cols:
  453. df_source[e] = np.where(
  454. df_source[e].str.startswith("{"),
  455. df_source[e].str.extract(r"\/(.*)}", expand=False) + " falsch",
  456. df_source[e],
  457. ) # df_source[e].str.extract(r'/(.*)}') +
  458. df_source[e] = np.where(df_source[e] == "[KTR]", df_source["Kostenträger_Ebene"], df_source[e])
  459. # df_all[df_all['Ebene1'] == ]
  460. # print(df_source.head())
  461. df_source["Konto_neu"] = (
  462. df_source["Marke"]
  463. + "-"
  464. + df_source["Standort"]
  465. + "-"
  466. + df_source["Konto_Nr"]
  467. + "-"
  468. + df_source["Kostenstelle"]
  469. + "-"
  470. + df_source["Absatzkanal"]
  471. + "-"
  472. + df_source["Kostenträger"]
  473. + " - "
  474. + df_source["Konto_Bezeichnung"]
  475. )
  476. df_source["Ebene1_empty"] = df_source["Ebene1"].isna() # , df_source['Ebene1'].map(lambda x: x == ''))
  477. df_source["Konto_neu"] = np.where(df_source["Ebene1_empty"], "keine Zuordnung", df_source["Konto_neu"])
  478. df_source["Ebene1"] = np.where(df_source["Ebene1_empty"], "keine Zuordnung", df_source["Ebene1"])
  479. df_source["Konto_Gruppe"] = df_source["Konto_Nr"] + " - " + df_source["Konto_Bezeichnung"]
  480. df_source["Konto_Gruppe"] = np.where(df_source["Ebene1_empty"], "keine Zuordnung", df_source["Konto_Gruppe"])
  481. df_source["Konto_Gesamt"] = df_source["Konto_Nr_Gesamt"] + " - " + df_source["Konto_Bezeichnung"]
  482. df_amount = df_source[df_source["Ebene1"] == "Umsatzerlöse"].reset_index()
  483. df_amount["Ebene1"] = "verkaufte Stückzahlen"
  484. df_amount["Ebene72"] = "verkaufte Stückzahlen"
  485. df_amount["Konto_neu"] = "STK " + df_amount["Konto_neu"]
  486. df_amount["Konto_Nr_Händler"] = df_amount["Konto_Nr_Händler"] + "_STK"
  487. df_amount["Konto_Gruppe"] = "STK " + df_amount["Konto_Gruppe"]
  488. df_amount["Konto_Gesamt"] = "STK " + df_amount["Konto_Gesamt"]
  489. df_source = pd.concat([df_source, df_amount])
  490. df_source["GuV"] = df_source["Ebene71"] == "GuV"
  491. df_source["Ebene81"] = np.where(df_source["GuV"], df_source["Ebene72"], "Bilanz")
  492. df_source["Ebene82"] = np.where(df_source["GuV"], df_source["Ebene73"], "")
  493. df_source["Ebene83"] = np.where(df_source["GuV"], df_source["Ebene74"], "")
  494. df_source["Ebene84"] = np.where(df_source["GuV"], df_source["Ebene75"], "")
  495. df_source["Ebene85"] = np.where(df_source["GuV"], df_source["Ebene76"], "")
  496. df_source["Ebene86"] = np.where(df_source["GuV"], df_source["Ebene77"], "")
  497. df_source["Ebene87"] = np.where(df_source["GuV"], df_source["Ebene78"], "")
  498. df_source["Ebene88"] = np.where(df_source["GuV"], df_source["Ebene79"], "")
  499. df_source["Ebene89"] = np.where(df_source["GuV"], df_source["Ebene80"], "")
  500. df_source["Ebene90"] = ""
  501. df_source["Ebene71"] = np.where(df_source["GuV"], "GuV", df_source["Ebene72"])
  502. df_source["Ebene72"] = np.where(df_source["GuV"], "", df_source["Ebene73"])
  503. df_source["Ebene73"] = np.where(df_source["GuV"], "", df_source["Ebene74"])
  504. df_source["Ebene74"] = np.where(df_source["GuV"], "", df_source["Ebene75"])
  505. df_source["Ebene75"] = np.where(df_source["GuV"], "", df_source["Ebene76"])
  506. df_source["Ebene76"] = np.where(df_source["GuV"], "", df_source["Ebene77"])
  507. df_source["Ebene77"] = np.where(df_source["GuV"], "", df_source["Ebene78"])
  508. df_source["Ebene78"] = np.where(df_source["GuV"], "", df_source["Ebene79"])
  509. df_source["Ebene79"] = np.where(df_source["GuV"], "", df_source["Ebene80"])
  510. df_source["Ebene80"] = ""
  511. df_source["Susa"] = df_source["Konto_Gruppe"].str.slice(stop=1)
  512. df_source["Konto_KST"] = ""
  513. df_source["GuV_Bilanz"] = df_source["Konto_Art"]
  514. from_label = ["Konto_neu", "Konto_Nr_Händler"]
  515. to_label = ["Konto", "Acct_Nr"]
  516. df_source = df_source.rename(columns=dict(zip(from_label, to_label)))
  517. df_source = df_source[
  518. [
  519. "Konto",
  520. "Acct_Nr",
  521. "Konto_Bezeichnung",
  522. "GuV_Bilanz",
  523. "Konto_KST",
  524. "Konto_STK",
  525. "Konto_1",
  526. "Konto_2",
  527. "Konto_3",
  528. "Konto_4",
  529. "Konto_5",
  530. ]
  531. + get_default_cols(0)
  532. + get_default_cols(7)
  533. + get_default_cols(8)
  534. + ["Konto_Gruppe", "Konto_Nr_Gesamt", "Konto_Gesamt", "Susa"]
  535. ]
  536. df_source.to_csv(f"{self.config['path2']}/SKR51_Uebersetzung.csv", sep=";", encoding="latin-1", index=False)
  537. def skr51_vars(self):
  538. self.get_structure_and_tree()
  539. cols = get_default_cols(0)
  540. df_temp = pd.read_csv(
  541. f"{self.config['path']}/Export/Kostentraeger.csv",
  542. decimal=",",
  543. sep=";",
  544. encoding="latin-1",
  545. converters={i: str for i in range(0, 200)},
  546. )
  547. df_temp["value"] = df_temp["Ebene33"]
  548. df_temp["key"] = "[KTR]"
  549. df_temp = df_temp[df_temp["value"].str.contains(" - ")]
  550. df_list = [df_temp[["key", "value"]]]
  551. for s, entries in self.json_result["flat"].items():
  552. df = pd.DataFrame([x["id"].split(";") for x in entries], columns=cols)
  553. df["key"] = df[cols[0]].apply(lambda x: "{" + s + "/" + x + "}")
  554. df["value"] = df[cols[1]]
  555. df_list.append(df[["key", "value"]])
  556. df = pd.concat(df_list)
  557. df_vars = df[df["value"] != ""]
  558. # df_vars.to_csv(f"{self.config['path2']}/SKR51_Variablen2.csv", sep=';', encoding='latin-1', index=False)
  559. df_main = pd.DataFrame([x["id"].split(";") for x in self.json_result["flat"]["SKR51"]], columns=cols)
  560. df_main["value"] = ""
  561. for c in cols:
  562. df_main = df_main.merge(df_vars, how="left", left_on=c, right_on="key", suffixes=(None, "_" + c))
  563. df_main[c] = np.where(df_main["value_" + c].isna(), df_main[c], df_main["value_" + c])
  564. df_amount = df_main[df_main["Ebene1"] == "Umsatzerlöse"].reset_index()
  565. df_amount["Ebene1"] = "verkaufte Stückzahlen"
  566. df_main = pd.concat([df_main, df_amount])
  567. # from_label = cols
  568. to_label = cols # get_default_cols(9)
  569. # df_main = df_main.rename(columns=dict(zip(from_label, to_label)))
  570. df_main[to_label].to_csv(
  571. f"{self.config['path2']}/SKR51_Struktur.csv", sep=";", encoding="latin-1", index_label="Sortierung"
  572. )
  573. def gcstruct_uebersetzung():
  574. # base_dir = 'P:/SKR51_GCStruct/'
  575. base_dir = Path(".").absolute()
  576. import_dir = base_dir
  577. if base_dir.name == "scripts":
  578. if base_dir.parent.parent.name == "Portal":
  579. base_dir = base_dir.parent.parent.parent
  580. import_dir = base_dir.joinpath("Portal/System/IQD/Belege/Kontenrahmen")
  581. else:
  582. base_dir = base_dir.parent.parent
  583. import_dir = base_dir.joinpath("System/OPTIMA/Export")
  584. elif not base_dir.joinpath("GCStruct_Aufbereitung").exists():
  585. base_dir = Path("//192.168.2.21/verwaltung/Kunden/Luchtenberg/1 Umstellung SKR51/")
  586. if not base_dir.exists():
  587. base_dir = Path("//media/fileserver1/verwaltung/Kunden/Luchtenberg/1 Umstellung SKR51/")
  588. import_dir = base_dir
  589. struct = GCStruct(str(base_dir.joinpath("GCStruct_Aufbereitung")))
  590. struct.skr51_translate(import_dir.glob("Kontenrahmen_kombiniert*.csv"))
  591. print("Kontenrahmen_uebersetzt.csv erstellt.")
  592. # copyfile('c:/Projekte/Python/Gcstruct/Kontenrahmen_kombiniert.csv',
  593. # base_dir + 'GCStruct_Modell/Export/Kontenrahmen_kombiniert.csv')
  594. struct2 = GCStruct(str(base_dir.joinpath("GCStruct_Modell")))
  595. struct2.skr51_translate2(str(base_dir.joinpath("GCStruct_Aufbereitung/Export/Kontenrahmen_uebersetzt.csv")))
  596. print("SKR51_Uebersetzung.csv erstellt.")
  597. struct2.skr51_vars()
  598. print("SKR51_Struktur.csv erstellt.")
  599. def dresen():
  600. struct = GCStruct("c:/projekte/GCHRStruct_Hyundai_Export")
  601. struct.get_structure_and_tree()
  602. struct.export()
  603. def reisacher():
  604. struct = GCStruct("D:/GAPS_BMW/GCStruct_neue_Struktur_Planung", "D:/Planung/Planner2022/export")
  605. struct.get_structure_and_tree()
  606. # json.dump(res['flat'], open(f"{self.config['path2']}/{self.config['output']}", 'w'), indent=2)
  607. if __name__ == "__main__":
  608. # struct = GCStruct('c:/projekte/gcstruct_dresen')
  609. # struct = GCStruct('c:/projekte/python/gcstruct')
  610. # struct = GCStruct('c:/projekte/python/gcstruct_reisacher_planung')
  611. reisacher()
  612. # dresen()