gcstruct.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795
  1. import pandas as pd
  2. import numpy as np
  3. import xml.etree.ElementTree as ET
  4. import json
  5. import csv
  6. import re
  7. import chevron
  8. # from shutil import copyfile
  9. from bs4 import BeautifulSoup
  10. from functools import reduce
  11. from pathlib import Path
  12. def get_flat(node):
  13. result = [
  14. {
  15. "id": node["id"],
  16. "text": node["text"],
  17. "children": [x["id"] for x in node["children"]],
  18. "children2": [],
  19. "parents": node["parents"],
  20. "accounts": node["accounts"],
  21. "costcenter": "",
  22. "level": node["level"],
  23. "drilldown": node["level"] < 2, # (node['level'] != 2 and len(node['accounts']) == 0),
  24. "form": node["form"],
  25. "accountlevel": False,
  26. "absolute": True,
  27. "seasonal": True,
  28. "status": "0",
  29. "values": [],
  30. "values2": {},
  31. }
  32. ]
  33. for child in node["children"]:
  34. result += get_flat(child)
  35. return result
  36. def get_parents_list(p_list):
  37. id = ";".join(p_list) + ";" * (10 - len(p_list))
  38. if len(p_list) > 0:
  39. return [id] + get_parents_list(p_list[:-1])
  40. return [";" * 9]
  41. def structure_from_tree(node):
  42. result = []
  43. result.append(node["id"])
  44. for child in node["children"]:
  45. result.extend(structure_from_tree(child))
  46. return result
  47. def xml_from_tree(xml_node, tree_node):
  48. for child in tree_node["children"]:
  49. element = ET.SubElement(xml_node, "Ebene")
  50. element.set("Name", child["text"])
  51. xml_from_tree(element, child)
  52. def split_it(text, index):
  53. try:
  54. return re.findall(r"([^;]+) - ([^;]*);;", text)[0][index]
  55. except Exception:
  56. return ""
  57. def last_layer(text):
  58. try:
  59. return re.findall(r"([^;]+);;", text)[0]
  60. except Exception:
  61. return ""
  62. def get_default_cols(i):
  63. return ["Ebene" + str(i) for i in range(i * 10 + 1, (i + 1) * 10 + 1)]
  64. def get_structure_exports(s):
  65. result = {
  66. "files": {},
  67. "format": {
  68. "KontoFormat": "{0} - {1}",
  69. "HerstellerkontoFormat": "{{Herstellerkonto_Nr}}",
  70. "HerstellerBezeichnungFormat": "{{Herstellerkonto_Bez}}",
  71. "NeueHerstellerkontenAnlegen": False,
  72. },
  73. }
  74. export_files = [
  75. "ExportStk",
  76. "ExportStrukturenStk",
  77. "ExportAdjazenz",
  78. "ExportUebersetzung",
  79. "ExportUebersetzungStk",
  80. "ExportHerstellerKontenrahmen",
  81. ]
  82. export_format = [
  83. "KontoFormat",
  84. "HerstellerkontoFormat",
  85. "HerstellerBezeichnungFormat",
  86. "NeueHerstellerkontenAnlegen",
  87. ]
  88. for e in export_files:
  89. if s.find(e) is not None and s.find(e).text is not None and s.find(e).text[-4:] == ".csv":
  90. result["files"][e] = s.find(e).text
  91. for e in export_format:
  92. if s.find(e) is not None and s.find(e).text != "":
  93. result["format"][e] = s.find(e).text
  94. result["format"]["NeueHerstellerkontenAnlegen"] = result["format"]["NeueHerstellerkontenAnlegen"] == "true"
  95. return result
  96. class GCStruct:
  97. config = {
  98. "path": "c:/projekte/python/gcstruct",
  99. "path2": "c:/projekte/python/gcstruct",
  100. "file": "c:/projekte/python/gcstruct/config/config.xml",
  101. "output": "gcstruct.json",
  102. "default": [],
  103. "special": {},
  104. "special2": {
  105. "Planner": ["Kostenstelle", "Ebene1", "Ebene2"],
  106. "Test": ["Ebene1", "Ebene2"],
  107. },
  108. "columns": [
  109. "Konto_Nr",
  110. "Konto_Bezeichnung",
  111. "Konto_Art",
  112. "Konto_KST",
  113. "Konto_STK",
  114. "Konto_1",
  115. "Konto_2",
  116. "Konto_3",
  117. "Konto_4",
  118. "Konto_5",
  119. ],
  120. "struct": {},
  121. "export": {},
  122. }
  123. json_result = {
  124. "accounts": {},
  125. "tree": {},
  126. "flat": {},
  127. "struct_export": {},
  128. "skr51_vars": {},
  129. }
  130. structure_ids = []
  131. translate = {
  132. "Konto_Nr": "SKR51",
  133. "Kostenstelle": "KST",
  134. "Absatzkanal": "ABS",
  135. "Kostenträger": "KTR",
  136. "Marke": "MAR",
  137. "Standort": "STA",
  138. "Marke_HBV": "MAR",
  139. "Standort_HBV": "BMC",
  140. }
  141. def __init__(self, struct_dir, export_dir=None):
  142. self.config["path"] = struct_dir
  143. self.config["path2"] = struct_dir + "/export" if export_dir is None else export_dir
  144. self.config["file"] = f"{self.config['path']}/config/gcstruct.xml"
  145. if not Path(self.config["file"]).exists():
  146. self.config["file"] = f"{self.config['path']}/config/config.xml"
  147. cfg = ET.parse(self.config["file"])
  148. self.config["default"] = [
  149. s.find("Name").text for s in cfg.getroot().find("Strukturdefinitionen").findall("Struktur")
  150. ]
  151. self.config["export"] = dict(
  152. [
  153. (s.find("Name").text, get_structure_exports(s))
  154. for s in cfg.getroot().find("Strukturdefinitionen").findall("Struktur")
  155. ]
  156. )
  157. struct = dict([(x, get_default_cols(i)) for (i, x) in enumerate(self.config["default"])])
  158. struct.update(self.config["special"])
  159. self.config["struct"] = struct
  160. # print(self.config['struct'])
  161. def export_header(self, filetype):
  162. return {
  163. "ExportStk": [],
  164. "ExportStrukturenStk": [],
  165. "ExportAdjazenz": [],
  166. "ExportUebersetzung": [
  167. "Konto_Nr_Hersteller",
  168. "Konto_Nr_Split",
  169. "Konto_Nr_Haendler",
  170. "Info",
  171. ],
  172. "ExportUebersetzungStk": [
  173. "Konto_Nr_Hersteller",
  174. "Konto_Nr_Split",
  175. "Konto_Nr_Haendler",
  176. "Info",
  177. ],
  178. "ExportHerstellerKontenrahmen": [
  179. "Konto_Nr",
  180. "Konto_Bezeichnung",
  181. "Case",
  182. "Info",
  183. ],
  184. }[filetype]
  185. def accounts_from_csv(self, struct):
  186. max_rows = (len(self.config["default"]) + 1) * 10
  187. with open(
  188. f"{self.config['path']}/Kontenrahmen/Kontenrahmen.csv",
  189. "r",
  190. encoding="latin-1",
  191. ) as f:
  192. csv_reader = csv.reader(f, delimiter=";")
  193. imported_csv = [row[:max_rows] for row in csv_reader]
  194. df = pd.DataFrame.from_records(np.array(imported_csv[1:], dtype="object"), columns=imported_csv[0]).fillna(
  195. value=""
  196. )
  197. df = df.rename(columns={"Kostenstelle": "Konto_KST", "STK": "Konto_STK"})
  198. for i, (s, cols) in enumerate(struct.items()):
  199. df[s] = reduce(lambda x, y: x + ";" + df[y], cols, "")
  200. df[s] = df[s].apply(lambda x: x[1:])
  201. df["LetzteEbene" + str(i + 1)] = df[s].apply(lambda x: last_layer(x))
  202. df["LetzteEbene" + str(i + 1) + "_Nr"] = df[s].apply(lambda x: split_it(x, 0))
  203. df["LetzteEbene" + str(i + 1) + "_Bez"] = df[s].apply(lambda x: split_it(x, 1))
  204. df["Herstellerkonto_Nr"] = df["LetzteEbene1_Nr"]
  205. df["Herstellerkonto_Bez"] = df["LetzteEbene1_Bez"]
  206. return df
  207. def tree_from_xml(self, struct, df):
  208. result = {}
  209. for s, cols in struct.items():
  210. try:
  211. tree = ET.parse(f"{self.config['path']}/Xml/{s}.xml")
  212. result[s] = self.get_tree_root(tree.getroot(), s)
  213. except FileNotFoundError:
  214. print("XML-Datei fehlt")
  215. used_entries = [x.split(";")[1:] for x in set(df[s].to_numpy())]
  216. print(used_entries)
  217. root = ET.Element("Ebene")
  218. root.set("Name", s)
  219. result[s] = self.get_tree_root(root, s)
  220. # self.json_result["tree"][s] = get_tree_from_accounts(cols, [])
  221. return result
  222. def get_structure_and_tree(self):
  223. df = self.accounts_from_csv(self.config["struct"])
  224. self.json_result["accounts"] = df.to_dict("records")
  225. self.structure_ids = df.melt(
  226. id_vars=["Konto_Nr"],
  227. value_vars=self.config["struct"].keys(),
  228. var_name="Struktur",
  229. value_name="id",
  230. ).groupby(by=["Struktur", "id"])
  231. self.json_result["tree"] = self.tree_from_xml(self.config["struct"], df)
  232. for s, cols in self.config["struct"].items():
  233. self.json_result["flat"][s] = get_flat(self.json_result["tree"][s])
  234. for s, entries in self.json_result["flat"].items():
  235. cols = self.config["struct"][s]
  236. df_temp = pd.DataFrame([x["id"].split(";") for x in entries], columns=cols)
  237. self.json_result["struct_export"][s] = df_temp.to_dict(orient="records")
  238. # {'accounts': {}, 'tree': {}, 'flat': {}, 'struct_export': {}, 'skr51_vars': {}}
  239. return self.json_result
  240. def export_structure_and_tree(self):
  241. json.dump(
  242. self.json_result,
  243. open(f"{self.config['path2']}/{self.config['output']}", "w"),
  244. indent=2,
  245. )
  246. def get_accounts(self, structure, id):
  247. return [x["Konto_Nr"] for x in self.json_result["accounts"] if x[structure] == id]
  248. # return []
  249. # res = self.structure_ids.groups.get((structure, id))
  250. # if res is None:
  251. # return []
  252. # return res.values
  253. def export(self):
  254. for s in self.config["export"].keys():
  255. for filetype, filename in self.config["export"][s]["files"].items():
  256. with open(self.config["path2"] + "/" + filename, "w") as fwh:
  257. fwh.write("Konto_Nr_Hersteller;Konto_Nr_Split;Konto_Nr_Haendler;Info\n")
  258. # 'Hersteller'Konto_Nr;Konto_Bezeichnung;Case;Info'
  259. for a in self.json_result["accounts"]:
  260. if a["Herstellerkonto_Nr"] != "":
  261. account = chevron.render(
  262. self.config["export"]["SKR51"]["format"]["HerstellerkontoFormat"],
  263. a,
  264. )
  265. fwh.write(
  266. account + ";" + account + ";" + a["Konto_Nr"] + ";" + "\n"
  267. ) # a['Herstellerkonto_Bez']
  268. def get_tree(self, node, parents, structure):
  269. result = []
  270. for child in node:
  271. p = get_parents_list(parents)
  272. parents.append(child.attrib["Name"])
  273. id = ";".join(parents) + ";" * (10 - len(parents))
  274. result.append(
  275. {
  276. "id": id,
  277. "text": child.attrib["Name"],
  278. "children": self.get_tree(child, parents, structure),
  279. "parents": p,
  280. "accounts": self.get_accounts(structure, id),
  281. "level": len(parents),
  282. "form": child.attrib.get("Split", ""),
  283. }
  284. )
  285. parents.pop()
  286. return result
  287. def get_tree_root(self, node, structure):
  288. id = ";" * 9
  289. return {
  290. "id": id,
  291. "text": node.attrib["Name"],
  292. "children": self.get_tree(node, [], structure),
  293. "parents": [],
  294. "accounts": [],
  295. "level": 0,
  296. "form": "",
  297. }
  298. def post_structure_and_tree(self):
  299. json_post = json.load(open(f"{self.config['path']}/{self.config['output']}", "r"))
  300. # Kontenrahmen.csv
  301. ebenen = ["Ebene" + str(i) for i in range(1, len(self.config["default"]) * 10 + 1)]
  302. header = ";".join(self.config["columns"] + ebenen)
  303. cols = self.config["columns"] + self.config["default"]
  304. with open(
  305. self.config["path"] + "/Kontenrahmen/Kontenrahmen_out.csv",
  306. "w",
  307. encoding="latin-1",
  308. ) as f:
  309. f.write(header + "\n")
  310. for row in json_post["Kontenrahmen"]:
  311. f.write(";".join([row[e] for e in cols]) + "\n")
  312. # print(header)
  313. # xml und evtl. Struktur.csv
  314. for i, s in enumerate(self.config["default"]):
  315. with open(
  316. f"{self.config['path']}/Strukturen/Kontenrahmen.csv/{s}_out.csv",
  317. "w",
  318. encoding="latin-1",
  319. ) as f:
  320. f.write(";".join(["Ebene" + str(i * 10 + j) for j in range(1, 11)]) + "\n")
  321. rows = structure_from_tree({"id": ";" * 9, "children": json_post[s]})
  322. f.write("\n".join(rows))
  323. # with open(self.config['path'] + "/Strukturen/Kontenrahmen.csv/" + structure + "_2.csv", "w", encoding="latin-1") as f:
  324. root = ET.Element("Ebene")
  325. root.set("Name", s)
  326. xml_from_tree(root, {"id": ";" * 9, "children": json_post[s]})
  327. with open(f"{self.config['path']}/Xml/{s}_out.xml", "w", encoding="utf-8") as f:
  328. f.write(BeautifulSoup(ET.tostring(root), "xml").prettify())
  329. def skr51_translate(self, accounts_combined_files):
  330. df = self.accounts_from_csv(self.config["struct"])
  331. df_translate = {}
  332. for i, (t_from, t_to) in enumerate(self.translate.items()):
  333. last = "LetzteEbene" + str(i + 1)
  334. from_label = [
  335. "Konto_Nr",
  336. last,
  337. last + "_Nr",
  338. last + "_Bez",
  339. "Ebene" + str(i * 10 + 1),
  340. "Ebene" + str(i * 10 + 2),
  341. ]
  342. to_label = [
  343. t_to,
  344. t_to + "_Ebene",
  345. t_to + "_Nr",
  346. t_to + "_Bez",
  347. "Ebene1",
  348. "Ebene2",
  349. ]
  350. df_translate[t_from] = df[df[last + "_Nr"] != ""][from_label].rename(
  351. columns=dict(zip(from_label, to_label))
  352. )
  353. # print(df_translate[t_to].head())
  354. df2 = []
  355. for ac_file in accounts_combined_files:
  356. df2.append(
  357. pd.read_csv(
  358. ac_file,
  359. decimal=",",
  360. sep=";",
  361. encoding="latin-1",
  362. converters={i: str for i in range(0, 200)},
  363. )
  364. )
  365. df_source = pd.concat(df2)
  366. df3 = df_source.copy()
  367. df3["Konto_Nr"] = df3["Konto_Nr"] + "_STK"
  368. df_source = pd.concat([df_source, df3])
  369. for t_from, t_to in self.translate.items():
  370. if t_to == "SKR51":
  371. df_source["SKR51"] = df_source["Konto_Nr"]
  372. elif t_from in ["Marke_HBV"]:
  373. df_source["Marke_HBV"] = df_source["Marke"]
  374. elif t_from in ["Standort_HBV"]:
  375. df_source["Standort_HBV"] = df_source["Standort"] + "_" + df_source["Marke"]
  376. df_source["BMC"] = "BMC_" + df_source["Standort_HBV"]
  377. elif t_to == "KTR":
  378. df_source["KTR"] = np.where(
  379. df_source["Kostenträger_Quelle"] == "TZ",
  380. "KTR_TZ_" + df_source["Kostenträger"],
  381. "KTR_00",
  382. )
  383. df_source["KTR"] = np.where(
  384. df_source["Kostenträger_Quelle"].isin(["NW", "SC"]),
  385. "KTR_"
  386. + df_source["Kostenträger_Quelle"]
  387. + "_"
  388. + df_source["Marke"]
  389. + "_"
  390. + df_source["Kostenträger"],
  391. df_source["KTR"],
  392. )
  393. else:
  394. df_source[t_to] = t_to + "_" + df_source[t_from]
  395. df_source = df_source.merge(df_translate[t_from], how="left", on=[t_to], suffixes=(None, "_" + t_to))
  396. df_source[t_to + "_Nr"] = np.where(
  397. df_source[t_to + "_Nr"].isna(),
  398. df_source[t_from],
  399. df_source[t_to + "_Nr"],
  400. )
  401. df_source["Konto_Nr_SKR51"] = (
  402. df_source["MAR_Nr"]
  403. + "-"
  404. + df_source["STA_Nr"]
  405. + "-"
  406. + df_source["SKR51_Nr"]
  407. + "-"
  408. + df_source["KST_Nr"]
  409. + "-"
  410. + df_source["ABS_Nr"]
  411. + "-"
  412. + df_source["KTR_Nr"]
  413. )
  414. df_source["Konto_Nr_Händler"] = (
  415. df_source["Marke"]
  416. + "-"
  417. + df_source["Standort"]
  418. + "-"
  419. + df_source["Konto_Nr"]
  420. + "-"
  421. + df_source["Kostenstelle"]
  422. + "-"
  423. + df_source["Absatzkanal"]
  424. + "-"
  425. + df_source["Kostenträger"]
  426. )
  427. # df_source.to_csv(f"{self.config['path2']}/SKR51_Uebersetzung.csv", sep=';', encoding='latin-1', index=False)
  428. df_source["MAR_Nr_MAR"] = np.where(df_source["MAR_Nr_MAR"].isna(), "0000", df_source["MAR_Nr_MAR"])
  429. from_label = [
  430. "MAR_Nr",
  431. "STA_Nr",
  432. "SKR51_Nr",
  433. "KST_Nr",
  434. "ABS_Nr",
  435. "KTR_Nr",
  436. "KTR_Ebene",
  437. "Konto_Nr_Händler",
  438. "Konto_Nr_SKR51",
  439. "MAR_Nr_MAR",
  440. "BMC_Nr",
  441. ]
  442. to_label = [
  443. "Marke",
  444. "Standort",
  445. "Konto_Nr",
  446. "Kostenstelle",
  447. "Absatzkanal",
  448. "Kostenträger",
  449. "Kostenträger_Ebene",
  450. "Konto_Nr_Händler",
  451. "Konto_Nr_SKR51",
  452. "Marke_HBV",
  453. "Standort_HBV",
  454. ]
  455. df_combined = df_source[from_label].rename(columns=dict(zip(from_label, to_label)))
  456. df_combined.to_csv(
  457. f"{self.config['path2']}/Kontenrahmen_uebersetzt.csv",
  458. sep=";",
  459. encoding="latin-1",
  460. index=False,
  461. )
  462. def skr51_translate2(self, accounts_combined_file):
  463. df = self.accounts_from_csv(self.config["struct"])
  464. df_list = []
  465. for i, s in enumerate(self.config["struct"].keys()):
  466. from_label = [
  467. "Konto_Nr",
  468. "Ebene" + str(i * 10 + 1),
  469. "Ebene" + str(i * 10 + 2),
  470. "Ebene" + str(i * 10 + 3),
  471. ]
  472. to_label = ["Konto_Nr", "key", "value", "value2"]
  473. df_temp = df[from_label].rename(columns=dict(zip(from_label, to_label)))
  474. df_temp["key"] = "{" + s + "/" + df_temp["key"] + "}"
  475. df_list.append(df_temp[df_temp["value"] != ""])
  476. df_translate = pd.concat(df_list)
  477. # df_translate.to_csv(f"{self.config['path2']}/SKR51_Variablen.csv", sep=';', encoding='latin-1', index=False)
  478. df_source = pd.read_csv(
  479. accounts_combined_file,
  480. decimal=",",
  481. sep=";",
  482. encoding="latin-1",
  483. converters={i: str for i in range(0, 200)},
  484. )
  485. df_source = df_source[df_source["Konto_Nr"].str.contains("_STK") == False]
  486. df_source["Konto_Nr_Gesamt"] = df_source["Konto_Nr"]
  487. df_source["Konto_Nr"] = np.where(
  488. df_source["Konto_Nr"].str.contains(r"^[4578]"),
  489. df_source["Konto_Nr"] + "_" + df_source["Kostenstelle"].str.slice(stop=1),
  490. df_source["Konto_Nr"],
  491. )
  492. df_source["Konto_Nr"] = np.where(
  493. df_source["Konto_Nr"].str.contains(r"^5\d+_4"),
  494. df_source["Konto_Nr"] + df_source["Kostenstelle"].str.slice(start=1, stop=2),
  495. df_source["Konto_Nr"],
  496. )
  497. df_source = df_source.merge(df, how="left", on=["Konto_Nr"])
  498. # rows = df_source.shape[0]
  499. df_source["value"] = ""
  500. cols = get_default_cols(0)
  501. for t_from, t_to in self.translate.items():
  502. if t_from in ["Marke_HBV", "Standort_HBV"]:
  503. continue
  504. if t_from == "Konto_Nr":
  505. df_source[t_to] = df_source[t_from]
  506. else:
  507. df_source[t_to] = t_to + "_" + df_source[t_from]
  508. for e in cols:
  509. df_source = df_source.merge(
  510. df_translate,
  511. how="left",
  512. left_on=[t_to, e],
  513. right_on=["Konto_Nr", "key"],
  514. suffixes=(None, "_" + t_to + "_" + e),
  515. )
  516. df_source[e] = np.where(
  517. df_source["value_" + t_to + "_" + e].notna(),
  518. df_source["value_" + t_to + "_" + e],
  519. df_source[e],
  520. )
  521. # if df_source.shape[0] > rows:
  522. # print(t_to + '_' + e + ': ' + str(df_source.shape[0]))
  523. # df_source.to_csv(f"{self.config['path2']}/SKR51_Variablen2.csv", sep=';', encoding='latin-1', index=False)
  524. # df_source[t_to + '_Nr'] = np.where(df_source[t_to + '_Nr'].isna(), df_source[t_from], df_source[t_to + '_Nr'])
  525. for e in cols:
  526. df_source[e] = np.where(
  527. df_source[e].str.startswith("{"),
  528. df_source[e].str.extract(r"\/(.*)}", expand=False) + " falsch",
  529. df_source[e],
  530. ) # df_source[e].str.extract(r'/(.*)}') +
  531. df_source[e] = np.where(df_source[e] == "[KTR]", df_source["Kostenträger_Ebene"], df_source[e])
  532. # df_all[df_all['Ebene1'] == ]
  533. # print(df_source.head())
  534. df_source["Konto_neu"] = (
  535. df_source["Marke"]
  536. + "-"
  537. + df_source["Standort"]
  538. + "-"
  539. + df_source["Konto_Nr"]
  540. + "-"
  541. + df_source["Kostenstelle"]
  542. + "-"
  543. + df_source["Absatzkanal"]
  544. + "-"
  545. + df_source["Kostenträger"]
  546. + " - "
  547. + df_source["Konto_Bezeichnung"]
  548. )
  549. df_source["Ebene1_empty"] = df_source["Ebene1"].isna() # , df_source['Ebene1'].map(lambda x: x == ''))
  550. df_source["Konto_neu"] = np.where(df_source["Ebene1_empty"], "keine Zuordnung", df_source["Konto_neu"])
  551. df_source["Ebene1"] = np.where(df_source["Ebene1_empty"], "keine Zuordnung", df_source["Ebene1"])
  552. df_source["Konto_Gruppe"] = df_source["Konto_Nr"] + " - " + df_source["Konto_Bezeichnung"]
  553. df_source["Konto_Gruppe"] = np.where(df_source["Ebene1_empty"], "keine Zuordnung", df_source["Konto_Gruppe"])
  554. df_source["Konto_Gesamt"] = df_source["Konto_Nr_Gesamt"] + " - " + df_source["Konto_Bezeichnung"]
  555. df_amount = df_source[df_source["Ebene1"] == "Umsatzerlöse"].reset_index()
  556. df_amount["Ebene1"] = "verkaufte Stückzahlen"
  557. df_amount["Ebene72"] = "verkaufte Stückzahlen"
  558. df_amount["Konto_neu"] = "STK " + df_amount["Konto_neu"]
  559. df_amount["Konto_Nr_Händler"] = df_amount["Konto_Nr_Händler"] + "_STK"
  560. df_amount["Konto_Gruppe"] = "STK " + df_amount["Konto_Gruppe"]
  561. df_amount["Konto_Gesamt"] = "STK " + df_amount["Konto_Gesamt"]
  562. df_source = pd.concat([df_source, df_amount])
  563. df_source["BWA"] = "Ebene81" in df_source.columns
  564. df_source["Ebene91"] = np.where(df_source["BWA"], df_source["Ebene81"], "")
  565. df_source["Ebene92"] = np.where(df_source["BWA"], df_source["Ebene82"], "")
  566. df_source["Ebene93"] = np.where(df_source["BWA"], df_source["Ebene83"], "")
  567. df_source["Ebene94"] = np.where(df_source["BWA"], df_source["Ebene84"], "")
  568. df_source["Ebene95"] = np.where(df_source["BWA"], df_source["Ebene85"], "")
  569. df_source["Ebene96"] = np.where(df_source["BWA"], df_source["Ebene86"], "")
  570. df_source["Ebene97"] = np.where(df_source["BWA"], df_source["Ebene87"], "")
  571. df_source["Ebene98"] = np.where(df_source["BWA"], df_source["Ebene88"], "")
  572. df_source["Ebene99"] = np.where(df_source["BWA"], df_source["Ebene89"], "")
  573. df_source["Ebene100"] = np.where(df_source["BWA"], df_source["Ebene90"], "")
  574. df_source["GuV"] = df_source["Ebene71"] == "GuV"
  575. df_source["Ebene81"] = np.where(df_source["GuV"], df_source["Ebene72"], "Bilanz")
  576. df_source["Ebene82"] = np.where(df_source["GuV"], df_source["Ebene73"], "")
  577. df_source["Ebene83"] = np.where(df_source["GuV"], df_source["Ebene74"], "")
  578. df_source["Ebene84"] = np.where(df_source["GuV"], df_source["Ebene75"], "")
  579. df_source["Ebene85"] = np.where(df_source["GuV"], df_source["Ebene76"], "")
  580. df_source["Ebene86"] = np.where(df_source["GuV"], df_source["Ebene77"], "")
  581. df_source["Ebene87"] = np.where(df_source["GuV"], df_source["Ebene78"], "")
  582. df_source["Ebene88"] = np.where(df_source["GuV"], df_source["Ebene79"], "")
  583. df_source["Ebene89"] = np.where(df_source["GuV"], df_source["Ebene80"], "")
  584. df_source["Ebene90"] = ""
  585. df_source["Ebene71"] = np.where(df_source["GuV"], "GuV", df_source["Ebene72"])
  586. df_source["Ebene72"] = np.where(df_source["GuV"], "", df_source["Ebene73"])
  587. df_source["Ebene73"] = np.where(df_source["GuV"], "", df_source["Ebene74"])
  588. df_source["Ebene74"] = np.where(df_source["GuV"], "", df_source["Ebene75"])
  589. df_source["Ebene75"] = np.where(df_source["GuV"], "", df_source["Ebene76"])
  590. df_source["Ebene76"] = np.where(df_source["GuV"], "", df_source["Ebene77"])
  591. df_source["Ebene77"] = np.where(df_source["GuV"], "", df_source["Ebene78"])
  592. df_source["Ebene78"] = np.where(df_source["GuV"], "", df_source["Ebene79"])
  593. df_source["Ebene79"] = np.where(df_source["GuV"], "", df_source["Ebene80"])
  594. df_source["Ebene80"] = ""
  595. df_source["Susa"] = df_source["Konto_Gruppe"].str.slice(stop=1)
  596. df_source["Konto_KST"] = ""
  597. df_source["GuV_Bilanz"] = df_source["Konto_Art"]
  598. from_label = ["Konto_neu", "Konto_Nr_Händler"]
  599. to_label = ["Konto", "Acct_Nr"]
  600. df_source = df_source.rename(columns=dict(zip(from_label, to_label)))
  601. df_source = df_source[
  602. [
  603. "Konto",
  604. "Acct_Nr",
  605. "Konto_Bezeichnung",
  606. "GuV_Bilanz",
  607. "Konto_KST",
  608. "Konto_STK",
  609. "Konto_1",
  610. "Konto_2",
  611. "Konto_3",
  612. "Konto_4",
  613. "Konto_5",
  614. ]
  615. + get_default_cols(0)
  616. + get_default_cols(7)
  617. + get_default_cols(8)
  618. + get_default_cols(9)
  619. + ["Konto_Gruppe", "Konto_Nr_Gesamt", "Konto_Gesamt", "Susa"]
  620. ]
  621. df_source.to_csv(
  622. f"{self.config['path2']}/SKR51_Uebersetzung.csv",
  623. sep=";",
  624. encoding="latin-1",
  625. index=False,
  626. )
  627. def skr51_vars(self):
  628. self.get_structure_and_tree()
  629. cols = get_default_cols(0)
  630. df_temp = pd.read_csv(
  631. f"{self.config['path']}/Export/Kostentraeger.csv",
  632. decimal=",",
  633. sep=";",
  634. encoding="latin-1",
  635. converters={i: str for i in range(0, 200)},
  636. )
  637. df_temp["value"] = df_temp["Ebene33"]
  638. df_temp["key"] = "[KTR]"
  639. df_temp = df_temp[df_temp["value"].str.contains(" - ")]
  640. df_list = [df_temp[["key", "value"]]]
  641. for s, entries in self.json_result["flat"].items():
  642. df = pd.DataFrame([x["id"].split(";") for x in entries], columns=cols)
  643. df["key"] = df[cols[0]].apply(lambda x: "{" + s + "/" + x + "}")
  644. df["value"] = df[cols[1]]
  645. df_list.append(df[["key", "value"]])
  646. df = pd.concat(df_list)
  647. df_vars = df[df["value"] != ""]
  648. # df_vars.to_csv(f"{self.config['path2']}/SKR51_Variablen2.csv", sep=';', encoding='latin-1', index=False)
  649. df_main = pd.DataFrame(
  650. [x["id"].split(";") for x in self.json_result["flat"]["SKR51"]],
  651. columns=cols,
  652. )
  653. df_main["value"] = ""
  654. for c in cols:
  655. df_main = df_main.merge(df_vars, how="left", left_on=c, right_on="key", suffixes=(None, "_" + c))
  656. df_main[c] = np.where(df_main["value_" + c].isna(), df_main[c], df_main["value_" + c])
  657. df_amount = df_main[df_main["Ebene1"] == "Umsatzerlöse"].reset_index()
  658. df_amount["Ebene1"] = "verkaufte Stückzahlen"
  659. df_main = pd.concat([df_main, df_amount])
  660. # from_label = cols
  661. to_label = cols # get_default_cols(9)
  662. # df_main = df_main.rename(columns=dict(zip(from_label, to_label)))
  663. df_main[to_label].to_csv(
  664. f"{self.config['path2']}/SKR51_Struktur.csv",
  665. sep=";",
  666. encoding="latin-1",
  667. index_label="Sortierung",
  668. )
  669. def gcstruct_uebersetzung(base_dir=None):
  670. # base_dir = 'P:/SKR51_GCStruct/'
  671. if base_dir is None:
  672. base_dir = Path(".").absolute()
  673. else:
  674. base_dir = Path(base_dir)
  675. import_dir = base_dir
  676. if base_dir.name == "scripts":
  677. if base_dir.parent.parent.name == "Portal":
  678. base_dir = base_dir.parent.parent.parent
  679. import_dir = base_dir.joinpath("Portal/System/IQD/Belege/Kontenrahmen")
  680. else:
  681. base_dir = base_dir.parent.parent
  682. import_dir = base_dir.joinpath("System/OPTIMA/Export")
  683. elif not base_dir.joinpath("GCStruct_Aufbereitung").exists():
  684. base_dir = Path("//192.168.2.21/verwaltung/Kunden/Luchtenberg/1 Umstellung SKR51/")
  685. if not base_dir.exists():
  686. base_dir = Path("//media/fileserver1/verwaltung/Kunden/Luchtenberg/1 Umstellung SKR51/")
  687. import_dir = base_dir
  688. struct = GCStruct(str(base_dir.joinpath("GCStruct_Aufbereitung")))
  689. struct.skr51_translate(import_dir.glob("Kontenrahmen_kombiniert*.csv"))
  690. print("Kontenrahmen_uebersetzt.csv erstellt.")
  691. # copyfile('c:/Projekte/Python/Gcstruct/Kontenrahmen_kombiniert.csv', base_dir + 'GCStruct_Modell/Export/Kontenrahmen_kombiniert.csv')
  692. struct2 = GCStruct(str(base_dir.joinpath("GCStruct_Modell")))
  693. struct2.skr51_translate2(str(base_dir.joinpath("GCStruct_Aufbereitung/Export/Kontenrahmen_uebersetzt.csv")))
  694. print("SKR51_Uebersetzung.csv erstellt.")
  695. struct2.skr51_vars()
  696. print("SKR51_Struktur.csv erstellt.")
  697. def dresen():
  698. struct = GCStruct("c:/projekte/GCHRStruct_Hyundai_Export")
  699. struct.get_structure_and_tree()
  700. struct.export_structure_and_tree()
  701. struct.export()
  702. def reisacher():
  703. base_dir = "X:/Robert/Planung Reisacher/GCStruct_neue_Struktur_Planung"
  704. if not Path(base_dir).exists():
  705. base_dir = "/media/fileserver1/austausch/Robert/Planung Reisacher/GCStruct_neue_Struktur_Planung"
  706. struct = GCStruct(base_dir)
  707. struct.get_structure_and_tree()
  708. struct.export_structure_and_tree()
  709. # json.dump(res['flat'], open(f"{self.config['path2']}/{self.config['output']}", 'w'), indent=2)
  710. if __name__ == "__main__":
  711. # struct = GCStruct('c:/projekte/gcstruct_dresen')
  712. # struct = GCStruct('c:/projekte/python/gcstruct')
  713. # struct = GCStruct('c:/projekte/python/gcstruct_reisacher_planung')
  714. reisacher()
  715. # dresen()