gcstruct.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851
  1. import pandas as pd
  2. import numpy as np
  3. import xml.etree.ElementTree as ET
  4. import json
  5. import csv
  6. import re
  7. import chevron
  8. # from shutil import copyfile
  9. from bs4 import BeautifulSoup
  10. from functools import reduce
  11. from pathlib import Path
  12. def get_flat(node):
  13. result = [
  14. {
  15. "id": node["id"],
  16. "text": node["text"],
  17. "children": [x["id"] for x in node["children"]],
  18. "children2": [],
  19. "parents": node["parents"],
  20. "accounts": node["accounts"],
  21. "costcenter": "",
  22. "level": node["level"],
  23. "drilldown": node["level"]
  24. < 2, # (node['level'] != 2 and len(node['accounts']) == 0),
  25. "form": node["form"],
  26. "accountlevel": False,
  27. "absolute": True,
  28. "seasonal": True,
  29. "status": "0",
  30. "values": [],
  31. "values2": {},
  32. }
  33. ]
  34. for child in node["children"]:
  35. result += get_flat(child)
  36. return result
  37. def get_parents_list(p_list):
  38. id = ";".join(p_list) + ";" * (10 - len(p_list))
  39. if len(p_list) > 0:
  40. return [id] + get_parents_list(p_list[:-1])
  41. return [";" * 9]
  42. def structure_from_tree(node):
  43. result = []
  44. result.append(node["id"])
  45. for child in node["children"]:
  46. result.extend(structure_from_tree(child))
  47. return result
  48. def xml_from_tree(xml_node, tree_node):
  49. for child in tree_node["children"]:
  50. element = ET.SubElement(xml_node, "Ebene")
  51. element.set("Name", child["text"])
  52. xml_from_tree(element, child)
  53. def split_it(text, index):
  54. try:
  55. return re.findall(r"([^;]+) - ([^;]*);;", text)[0][index]
  56. except Exception:
  57. return ""
  58. def last_layer(text):
  59. try:
  60. return re.findall(r"([^;]+);;", text)[0]
  61. except Exception:
  62. return ""
  63. def get_default_cols(i):
  64. return ["Ebene" + str(i) for i in range(i * 10 + 1, (i + 1) * 10 + 1)]
  65. def get_structure_exports(s):
  66. result = {
  67. "files": {},
  68. "format": {
  69. "KontoFormat": "{0} - {1}",
  70. "HerstellerkontoFormat": "{{Herstellerkonto_Nr}}",
  71. "HerstellerBezeichnungFormat": "{{Herstellerkonto_Bez}}",
  72. "NeueHerstellerkontenAnlegen": False,
  73. },
  74. }
  75. export_files = [
  76. "ExportStk",
  77. "ExportStrukturenStk",
  78. "ExportAdjazenz",
  79. "ExportUebersetzung",
  80. "ExportUebersetzungStk",
  81. "ExportHerstellerKontenrahmen",
  82. ]
  83. export_format = [
  84. "KontoFormat",
  85. "HerstellerkontoFormat",
  86. "HerstellerBezeichnungFormat",
  87. "NeueHerstellerkontenAnlegen",
  88. ]
  89. for e in export_files:
  90. if (
  91. s.find(e) is not None
  92. and s.find(e).text is not None
  93. and s.find(e).text[-4:] == ".csv"
  94. ):
  95. result["files"][e] = s.find(e).text
  96. for e in export_format:
  97. if s.find(e) is not None and s.find(e).text != "":
  98. result["format"][e] = s.find(e).text
  99. result["format"]["NeueHerstellerkontenAnlegen"] = (
  100. result["format"]["NeueHerstellerkontenAnlegen"] == "true"
  101. )
  102. return result
  103. class GCStruct:
  104. config = {
  105. "path": "c:/projekte/python/gcstruct",
  106. "path2": "c:/projekte/python/gcstruct",
  107. "file": "c:/projekte/python/gcstruct/config/config.xml",
  108. "output": "gcstruct.json",
  109. "default": [],
  110. "special": {},
  111. "special2": {
  112. "Planner": ["Kostenstelle", "Ebene1", "Ebene2"],
  113. "Test": ["Ebene1", "Ebene2"],
  114. },
  115. "columns": [
  116. "Konto_Nr",
  117. "Konto_Bezeichnung",
  118. "Konto_Art",
  119. "Konto_KST",
  120. "Konto_STK",
  121. "Konto_1",
  122. "Konto_2",
  123. "Konto_3",
  124. "Konto_4",
  125. "Konto_5",
  126. ],
  127. "struct": {},
  128. "export": {},
  129. }
  130. json_result = {
  131. "accounts": {},
  132. "tree": {},
  133. "flat": {},
  134. "struct_export": {},
  135. "skr51_vars": {},
  136. }
  137. structure_ids = []
  138. translate = {
  139. "Konto_Nr": "SKR51",
  140. "Kostenstelle": "KST",
  141. "Absatzkanal": "ABS",
  142. "Kostenträger": "KTR",
  143. "Marke": "MAR",
  144. "Standort": "STA",
  145. "Marke_HBV": "MAR",
  146. "Standort_HBV": "BMC",
  147. }
  148. def __init__(self, struct_dir, export_dir=None):
  149. self.config["path"] = struct_dir
  150. self.config["path2"] = (
  151. struct_dir + "/export" if export_dir is None else export_dir
  152. )
  153. self.config["file"] = f"{self.config['path']}/config/gcstruct.xml"
  154. if not Path(self.config["file"]).exists():
  155. self.config["file"] = f"{self.config['path']}/config/config.xml"
  156. cfg = ET.parse(self.config["file"])
  157. self.config["default"] = [
  158. s.find("Name").text
  159. for s in cfg.getroot().find("Strukturdefinitionen").findall("Struktur")
  160. ]
  161. self.config["export"] = dict(
  162. [
  163. (s.find("Name").text, get_structure_exports(s))
  164. for s in cfg.getroot().find("Strukturdefinitionen").findall("Struktur")
  165. ]
  166. )
  167. struct = dict(
  168. [(x, get_default_cols(i)) for (i, x) in enumerate(self.config["default"])]
  169. )
  170. struct.update(self.config["special"])
  171. self.config["struct"] = struct
  172. # print(self.config['struct'])
  173. def export_header(self, filetype):
  174. return {
  175. "ExportStk": [],
  176. "ExportStrukturenStk": [],
  177. "ExportAdjazenz": [],
  178. "ExportUebersetzung": [
  179. "Konto_Nr_Hersteller",
  180. "Konto_Nr_Split",
  181. "Konto_Nr_Haendler",
  182. "Info",
  183. ],
  184. "ExportUebersetzungStk": [
  185. "Konto_Nr_Hersteller",
  186. "Konto_Nr_Split",
  187. "Konto_Nr_Haendler",
  188. "Info",
  189. ],
  190. "ExportHerstellerKontenrahmen": [
  191. "Konto_Nr",
  192. "Konto_Bezeichnung",
  193. "Case",
  194. "Info",
  195. ],
  196. }[filetype]
  197. def accounts_from_csv(self, struct):
  198. max_rows = (len(self.config["default"]) + 1) * 10
  199. with open(
  200. f"{self.config['path']}/Kontenrahmen/Kontenrahmen.csv",
  201. "r",
  202. encoding="latin-1",
  203. ) as f:
  204. csv_reader = csv.reader(f, delimiter=";")
  205. imported_csv = [row[:max_rows] for row in csv_reader]
  206. df = pd.DataFrame.from_records(
  207. np.array(imported_csv[1:], dtype="object"), columns=imported_csv[0]
  208. ).fillna(value="")
  209. df = df.rename(columns={"Kostenstelle": "Konto_KST", "STK": "Konto_STK"})
  210. for i, (s, cols) in enumerate(struct.items()):
  211. df[s] = reduce(lambda x, y: x + ";" + df[y], cols, "")
  212. df[s] = df[s].apply(lambda x: x[1:])
  213. df["LetzteEbene" + str(i + 1)] = df[s].apply(lambda x: last_layer(x))
  214. df["LetzteEbene" + str(i + 1) + "_Nr"] = df[s].apply(
  215. lambda x: split_it(x, 0)
  216. )
  217. df["LetzteEbene" + str(i + 1) + "_Bez"] = df[s].apply(
  218. lambda x: split_it(x, 1)
  219. )
  220. df["Herstellerkonto_Nr"] = df["LetzteEbene1_Nr"]
  221. df["Herstellerkonto_Bez"] = df["LetzteEbene1_Bez"]
  222. return df
  223. def tree_from_xml(self, struct, df):
  224. result = {}
  225. for s, cols in struct.items():
  226. try:
  227. tree = ET.parse(f"{self.config['path']}/Xml/{s}.xml")
  228. result[s] = self.get_tree_root(tree.getroot(), s)
  229. except FileNotFoundError:
  230. print("XML-Datei fehlt")
  231. used_entries = [x.split(";")[1:] for x in set(df[s].to_numpy())]
  232. print(used_entries)
  233. root = ET.Element("Ebene")
  234. root.set("Name", s)
  235. result[s] = self.get_tree_root(root, s)
  236. # self.json_result["tree"][s] = get_tree_from_accounts(cols, [])
  237. return result
  238. def get_structure_and_tree(self):
  239. df = self.accounts_from_csv(self.config["struct"])
  240. self.json_result["accounts"] = df.to_dict("records")
  241. self.structure_ids = df.melt(
  242. id_vars=["Konto_Nr"],
  243. value_vars=self.config["struct"].keys(),
  244. var_name="Struktur",
  245. value_name="id",
  246. ).groupby(by=["Struktur", "id"])
  247. self.json_result["tree"] = self.tree_from_xml(self.config["struct"], df)
  248. for s, cols in self.config["struct"].items():
  249. self.json_result["flat"][s] = get_flat(self.json_result["tree"][s])
  250. for s, entries in self.json_result["flat"].items():
  251. cols = self.config["struct"][s]
  252. df_temp = pd.DataFrame([x["id"].split(";") for x in entries], columns=cols)
  253. self.json_result["struct_export"][s] = df_temp.to_dict(orient="records")
  254. # {'accounts': {}, 'tree': {}, 'flat': {}, 'struct_export': {}, 'skr51_vars': {}}
  255. json.dump(
  256. self.json_result,
  257. open(f"{self.config['path2']}/{self.config['output']}", "w"),
  258. indent=2,
  259. )
  260. return self.json_result
  261. def get_accounts(self, structure, id):
  262. return [
  263. x["Konto_Nr"] for x in self.json_result["accounts"] if x[structure] == id
  264. ]
  265. # return []
  266. # res = self.structure_ids.groups.get((structure, id))
  267. # if res is None:
  268. # return []
  269. # return res.values
  270. def export(self):
  271. for s in self.config["export"].keys():
  272. for filetype, filename in self.config["export"][s]["files"].items():
  273. with open(self.config["path2"] + "/" + filename, "w") as fwh:
  274. fwh.write(
  275. "Konto_Nr_Hersteller;Konto_Nr_Split;Konto_Nr_Haendler;Info\n"
  276. )
  277. # 'Hersteller'Konto_Nr;Konto_Bezeichnung;Case;Info'
  278. for a in self.json_result["accounts"]:
  279. if a["Herstellerkonto_Nr"] != "":
  280. account = chevron.render(
  281. self.config["export"]["SKR51"]["format"][
  282. "HerstellerkontoFormat"
  283. ],
  284. a,
  285. )
  286. fwh.write(
  287. account
  288. + ";"
  289. + account
  290. + ";"
  291. + a["Konto_Nr"]
  292. + ";"
  293. + "\n"
  294. ) # a['Herstellerkonto_Bez']
  295. def get_tree(self, node, parents, structure):
  296. result = []
  297. for child in node:
  298. p = get_parents_list(parents)
  299. parents.append(child.attrib["Name"])
  300. id = ";".join(parents) + ";" * (10 - len(parents))
  301. result.append(
  302. {
  303. "id": id,
  304. "text": child.attrib["Name"],
  305. "children": self.get_tree(child, parents, structure),
  306. "parents": p,
  307. "accounts": self.get_accounts(structure, id),
  308. "level": len(parents),
  309. "form": child.attrib.get("Split", ""),
  310. }
  311. )
  312. parents.pop()
  313. return result
  314. def get_tree_root(self, node, structure):
  315. id = ";" * 9
  316. return {
  317. "id": id,
  318. "text": node.attrib["Name"],
  319. "children": self.get_tree(node, [], structure),
  320. "parents": [],
  321. "accounts": [],
  322. "level": 0,
  323. "form": "",
  324. }
  325. def post_structure_and_tree(self):
  326. json_post = json.load(
  327. open(f"{self.config['path']}/{self.config['output']}", "r")
  328. )
  329. # Kontenrahmen.csv
  330. ebenen = [
  331. "Ebene" + str(i) for i in range(1, len(self.config["default"]) * 10 + 1)
  332. ]
  333. header = ";".join(self.config["columns"] + ebenen)
  334. cols = self.config["columns"] + self.config["default"]
  335. with open(
  336. self.config["path"] + "/Kontenrahmen/Kontenrahmen_out.csv",
  337. "w",
  338. encoding="latin-1",
  339. ) as f:
  340. f.write(header + "\n")
  341. for row in json_post["Kontenrahmen"]:
  342. f.write(";".join([row[e] for e in cols]) + "\n")
  343. # print(header)
  344. # xml und evtl. Struktur.csv
  345. for i, s in enumerate(self.config["default"]):
  346. with open(
  347. f"{self.config['path']}/Strukturen/Kontenrahmen.csv/{s}_out.csv",
  348. "w",
  349. encoding="latin-1",
  350. ) as f:
  351. f.write(
  352. ";".join(["Ebene" + str(i * 10 + j) for j in range(1, 11)]) + "\n"
  353. )
  354. rows = structure_from_tree({"id": ";" * 9, "children": json_post[s]})
  355. f.write("\n".join(rows))
  356. # with open(self.config['path'] + "/Strukturen/Kontenrahmen.csv/" + structure + "_2.csv", "w", encoding="latin-1") as f:
  357. root = ET.Element("Ebene")
  358. root.set("Name", s)
  359. xml_from_tree(root, {"id": ";" * 9, "children": json_post[s]})
  360. with open(
  361. f"{self.config['path']}/Xml/{s}_out.xml", "w", encoding="utf-8"
  362. ) as f:
  363. f.write(BeautifulSoup(ET.tostring(root), "xml").prettify())
  364. def skr51_translate(self, accounts_combined_files):
  365. df = self.accounts_from_csv(self.config["struct"])
  366. df_translate = {}
  367. for i, (t_from, t_to) in enumerate(self.translate.items()):
  368. last = "LetzteEbene" + str(i + 1)
  369. from_label = [
  370. "Konto_Nr",
  371. last,
  372. last + "_Nr",
  373. last + "_Bez",
  374. "Ebene" + str(i * 10 + 1),
  375. "Ebene" + str(i * 10 + 2),
  376. ]
  377. to_label = [
  378. t_to,
  379. t_to + "_Ebene",
  380. t_to + "_Nr",
  381. t_to + "_Bez",
  382. "Ebene1",
  383. "Ebene2",
  384. ]
  385. df_translate[t_from] = df[df[last + "_Nr"] != ""][from_label].rename(
  386. columns=dict(zip(from_label, to_label))
  387. )
  388. # print(df_translate[t_to].head())
  389. df2 = []
  390. for ac_file in accounts_combined_files:
  391. df2.append(
  392. pd.read_csv(
  393. ac_file,
  394. decimal=",",
  395. sep=";",
  396. encoding="latin-1",
  397. converters={i: str for i in range(0, 200)},
  398. )
  399. )
  400. df_source = pd.concat(df2)
  401. df3 = df_source.copy()
  402. df3["Konto_Nr"] = df3["Konto_Nr"] + "_STK"
  403. df_source = pd.concat([df_source, df3])
  404. for t_from, t_to in self.translate.items():
  405. if t_to == "SKR51":
  406. df_source["SKR51"] = df_source["Konto_Nr"]
  407. elif t_from in ["Marke_HBV"]:
  408. df_source["Marke_HBV"] = df_source["Marke"]
  409. elif t_from in ["Standort_HBV"]:
  410. df_source["Standort_HBV"] = (
  411. df_source["Standort"] + "_" + df_source["Marke"]
  412. )
  413. df_source["BMC"] = "BMC_" + df_source["Standort_HBV"]
  414. elif t_to == "KTR":
  415. df_source["KTR"] = np.where(
  416. df_source["Kostenträger_Quelle"] == "TZ",
  417. "KTR_TZ_" + df_source["Kostenträger"],
  418. "KTR_00",
  419. )
  420. df_source["KTR"] = np.where(
  421. df_source["Kostenträger_Quelle"].isin(["NW", "SC"]),
  422. "KTR_"
  423. + df_source["Kostenträger_Quelle"]
  424. + "_"
  425. + df_source["Marke"]
  426. + "_"
  427. + df_source["Kostenträger"],
  428. df_source["KTR"],
  429. )
  430. else:
  431. df_source[t_to] = t_to + "_" + df_source[t_from]
  432. df_source = df_source.merge(
  433. df_translate[t_from], how="left", on=[t_to], suffixes=(None, "_" + t_to)
  434. )
  435. df_source[t_to + "_Nr"] = np.where(
  436. df_source[t_to + "_Nr"].isna(),
  437. df_source[t_from],
  438. df_source[t_to + "_Nr"],
  439. )
  440. df_source["Konto_Nr_SKR51"] = (
  441. df_source["MAR_Nr"]
  442. + "-"
  443. + df_source["STA_Nr"]
  444. + "-"
  445. + df_source["SKR51_Nr"]
  446. + "-"
  447. + df_source["KST_Nr"]
  448. + "-"
  449. + df_source["ABS_Nr"]
  450. + "-"
  451. + df_source["KTR_Nr"]
  452. )
  453. df_source["Konto_Nr_Händler"] = (
  454. df_source["Marke"]
  455. + "-"
  456. + df_source["Standort"]
  457. + "-"
  458. + df_source["Konto_Nr"]
  459. + "-"
  460. + df_source["Kostenstelle"]
  461. + "-"
  462. + df_source["Absatzkanal"]
  463. + "-"
  464. + df_source["Kostenträger"]
  465. )
  466. # df_source.to_csv(f"{self.config['path2']}/SKR51_Uebersetzung.csv", sep=';', encoding='latin-1', index=False)
  467. df_source["MAR_Nr_MAR"] = np.where(
  468. df_source["MAR_Nr_MAR"].isna(), "0000", df_source["MAR_Nr_MAR"]
  469. )
  470. from_label = [
  471. "MAR_Nr",
  472. "STA_Nr",
  473. "SKR51_Nr",
  474. "KST_Nr",
  475. "ABS_Nr",
  476. "KTR_Nr",
  477. "KTR_Ebene",
  478. "Konto_Nr_Händler",
  479. "Konto_Nr_SKR51",
  480. "MAR_Nr_MAR",
  481. "BMC_Nr",
  482. ]
  483. to_label = [
  484. "Marke",
  485. "Standort",
  486. "Konto_Nr",
  487. "Kostenstelle",
  488. "Absatzkanal",
  489. "Kostenträger",
  490. "Kostenträger_Ebene",
  491. "Konto_Nr_Händler",
  492. "Konto_Nr_SKR51",
  493. "Marke_HBV",
  494. "Standort_HBV",
  495. ]
  496. df_combined = df_source[from_label].rename(
  497. columns=dict(zip(from_label, to_label))
  498. )
  499. df_combined.to_csv(
  500. f"{self.config['path2']}/Kontenrahmen_uebersetzt.csv",
  501. sep=";",
  502. encoding="latin-1",
  503. index=False,
  504. )
  505. def skr51_translate2(self, accounts_combined_file):
  506. df = self.accounts_from_csv(self.config["struct"])
  507. df_list = []
  508. for i, s in enumerate(self.config["struct"].keys()):
  509. from_label = [
  510. "Konto_Nr",
  511. "Ebene" + str(i * 10 + 1),
  512. "Ebene" + str(i * 10 + 2),
  513. "Ebene" + str(i * 10 + 3),
  514. ]
  515. to_label = ["Konto_Nr", "key", "value", "value2"]
  516. df_temp = df[from_label].rename(columns=dict(zip(from_label, to_label)))
  517. df_temp["key"] = "{" + s + "/" + df_temp["key"] + "}"
  518. df_list.append(df_temp[df_temp["value"] != ""])
  519. df_translate = pd.concat(df_list)
  520. # df_translate.to_csv(f"{self.config['path2']}/SKR51_Variablen.csv", sep=';', encoding='latin-1', index=False)
  521. df_source = pd.read_csv(
  522. accounts_combined_file,
  523. decimal=",",
  524. sep=";",
  525. encoding="latin-1",
  526. converters={i: str for i in range(0, 200)},
  527. )
  528. df_source = df_source[df_source["Konto_Nr"].str.contains("_STK") == False]
  529. df_source["Konto_Nr_Gesamt"] = df_source["Konto_Nr"]
  530. df_source["Konto_Nr"] = np.where(
  531. df_source["Konto_Nr"].str.contains(r"^[4578]"),
  532. df_source["Konto_Nr"] + "_" + df_source["Kostenstelle"].str.slice(stop=1),
  533. df_source["Konto_Nr"],
  534. )
  535. df_source["Konto_Nr"] = np.where(
  536. df_source["Konto_Nr"].str.contains(r"^5\d+_4"),
  537. df_source["Konto_Nr"]
  538. + df_source["Kostenstelle"].str.slice(start=1, stop=2),
  539. df_source["Konto_Nr"],
  540. )
  541. df_source = df_source.merge(df, how="left", on=["Konto_Nr"])
  542. # rows = df_source.shape[0]
  543. df_source["value"] = ""
  544. cols = get_default_cols(0)
  545. for t_from, t_to in self.translate.items():
  546. if t_from in ["Marke_HBV", "Standort_HBV"]:
  547. continue
  548. if t_from == "Konto_Nr":
  549. df_source[t_to] = df_source[t_from]
  550. else:
  551. df_source[t_to] = t_to + "_" + df_source[t_from]
  552. for e in cols:
  553. df_source = df_source.merge(
  554. df_translate,
  555. how="left",
  556. left_on=[t_to, e],
  557. right_on=["Konto_Nr", "key"],
  558. suffixes=(None, "_" + t_to + "_" + e),
  559. )
  560. df_source[e] = np.where(
  561. df_source["value_" + t_to + "_" + e].notna(),
  562. df_source["value_" + t_to + "_" + e],
  563. df_source[e],
  564. )
  565. # if df_source.shape[0] > rows:
  566. # print(t_to + '_' + e + ': ' + str(df_source.shape[0]))
  567. # df_source.to_csv(f"{self.config['path2']}/SKR51_Variablen2.csv", sep=';', encoding='latin-1', index=False)
  568. # df_source[t_to + '_Nr'] = np.where(df_source[t_to + '_Nr'].isna(), df_source[t_from], df_source[t_to + '_Nr'])
  569. for e in cols:
  570. df_source[e] = np.where(
  571. df_source[e].str.startswith("{"),
  572. df_source[e].str.extract(r"\/(.*)}", expand=False) + " falsch",
  573. df_source[e],
  574. ) # df_source[e].str.extract(r'/(.*)}') +
  575. df_source[e] = np.where(
  576. df_source[e] == "[KTR]", df_source["Kostenträger_Ebene"], df_source[e]
  577. )
  578. # df_all[df_all['Ebene1'] == ]
  579. # print(df_source.head())
  580. df_source["Konto_neu"] = (
  581. df_source["Marke"]
  582. + "-"
  583. + df_source["Standort"]
  584. + "-"
  585. + df_source["Konto_Nr"]
  586. + "-"
  587. + df_source["Kostenstelle"]
  588. + "-"
  589. + df_source["Absatzkanal"]
  590. + "-"
  591. + df_source["Kostenträger"]
  592. + " - "
  593. + df_source["Konto_Bezeichnung"]
  594. )
  595. df_source["Ebene1_empty"] = df_source[
  596. "Ebene1"
  597. ].isna() # , df_source['Ebene1'].map(lambda x: x == ''))
  598. df_source["Konto_neu"] = np.where(
  599. df_source["Ebene1_empty"], "keine Zuordnung", df_source["Konto_neu"]
  600. )
  601. df_source["Ebene1"] = np.where(
  602. df_source["Ebene1_empty"], "keine Zuordnung", df_source["Ebene1"]
  603. )
  604. df_source["Konto_Gruppe"] = (
  605. df_source["Konto_Nr"] + " - " + df_source["Konto_Bezeichnung"]
  606. )
  607. df_source["Konto_Gruppe"] = np.where(
  608. df_source["Ebene1_empty"], "keine Zuordnung", df_source["Konto_Gruppe"]
  609. )
  610. df_source["Konto_Gesamt"] = (
  611. df_source["Konto_Nr_Gesamt"] + " - " + df_source["Konto_Bezeichnung"]
  612. )
  613. df_amount = df_source[df_source["Ebene1"] == "Umsatzerlöse"].reset_index()
  614. df_amount["Ebene1"] = "verkaufte Stückzahlen"
  615. df_amount["Ebene72"] = "verkaufte Stückzahlen"
  616. df_amount["Konto_neu"] = "STK " + df_amount["Konto_neu"]
  617. df_amount["Konto_Nr_Händler"] = df_amount["Konto_Nr_Händler"] + "_STK"
  618. df_amount["Konto_Gruppe"] = "STK " + df_amount["Konto_Gruppe"]
  619. df_amount["Konto_Gesamt"] = "STK " + df_amount["Konto_Gesamt"]
  620. df_source = pd.concat([df_source, df_amount])
  621. df_source["GuV"] = df_source["Ebene71"] == "GuV"
  622. df_source["Ebene81"] = np.where(
  623. df_source["GuV"], df_source["Ebene72"], "Bilanz"
  624. )
  625. df_source["Ebene82"] = np.where(df_source["GuV"], df_source["Ebene73"], "")
  626. df_source["Ebene83"] = np.where(df_source["GuV"], df_source["Ebene74"], "")
  627. df_source["Ebene84"] = np.where(df_source["GuV"], df_source["Ebene75"], "")
  628. df_source["Ebene85"] = np.where(df_source["GuV"], df_source["Ebene76"], "")
  629. df_source["Ebene86"] = np.where(df_source["GuV"], df_source["Ebene77"], "")
  630. df_source["Ebene87"] = np.where(df_source["GuV"], df_source["Ebene78"], "")
  631. df_source["Ebene88"] = np.where(df_source["GuV"], df_source["Ebene79"], "")
  632. df_source["Ebene89"] = np.where(df_source["GuV"], df_source["Ebene80"], "")
  633. df_source["Ebene90"] = ""
  634. df_source["Ebene71"] = np.where(df_source["GuV"], "GuV", df_source["Ebene72"])
  635. df_source["Ebene72"] = np.where(df_source["GuV"], "", df_source["Ebene73"])
  636. df_source["Ebene73"] = np.where(df_source["GuV"], "", df_source["Ebene74"])
  637. df_source["Ebene74"] = np.where(df_source["GuV"], "", df_source["Ebene75"])
  638. df_source["Ebene75"] = np.where(df_source["GuV"], "", df_source["Ebene76"])
  639. df_source["Ebene76"] = np.where(df_source["GuV"], "", df_source["Ebene77"])
  640. df_source["Ebene77"] = np.where(df_source["GuV"], "", df_source["Ebene78"])
  641. df_source["Ebene78"] = np.where(df_source["GuV"], "", df_source["Ebene79"])
  642. df_source["Ebene79"] = np.where(df_source["GuV"], "", df_source["Ebene80"])
  643. df_source["Ebene80"] = ""
  644. df_source["Susa"] = df_source["Konto_Gruppe"].str.slice(stop=1)
  645. df_source["Konto_KST"] = ""
  646. df_source["GuV_Bilanz"] = df_source["Konto_Art"]
  647. from_label = ["Konto_neu", "Konto_Nr_Händler"]
  648. to_label = ["Konto", "Acct_Nr"]
  649. df_source = df_source.rename(columns=dict(zip(from_label, to_label)))
  650. df_source = df_source[
  651. [
  652. "Konto",
  653. "Acct_Nr",
  654. "Konto_Bezeichnung",
  655. "GuV_Bilanz",
  656. "Konto_KST",
  657. "Konto_STK",
  658. "Konto_1",
  659. "Konto_2",
  660. "Konto_3",
  661. "Konto_4",
  662. "Konto_5",
  663. ]
  664. + get_default_cols(0)
  665. + get_default_cols(7)
  666. + get_default_cols(8)
  667. + ["Konto_Gruppe", "Konto_Nr_Gesamt", "Konto_Gesamt", "Susa"]
  668. ]
  669. df_source.to_csv(
  670. f"{self.config['path2']}/SKR51_Uebersetzung.csv",
  671. sep=";",
  672. encoding="latin-1",
  673. index=False,
  674. )
  675. def skr51_vars(self):
  676. self.get_structure_and_tree()
  677. cols = get_default_cols(0)
  678. df_temp = pd.read_csv(
  679. f"{self.config['path']}/Export/Kostentraeger.csv",
  680. decimal=",",
  681. sep=";",
  682. encoding="latin-1",
  683. converters={i: str for i in range(0, 200)},
  684. )
  685. df_temp["value"] = df_temp["Ebene33"]
  686. df_temp["key"] = "[KTR]"
  687. df_temp = df_temp[df_temp["value"].str.contains(" - ")]
  688. df_list = [df_temp[["key", "value"]]]
  689. for s, entries in self.json_result["flat"].items():
  690. df = pd.DataFrame([x["id"].split(";") for x in entries], columns=cols)
  691. df["key"] = df[cols[0]].apply(lambda x: "{" + s + "/" + x + "}")
  692. df["value"] = df[cols[1]]
  693. df_list.append(df[["key", "value"]])
  694. df = pd.concat(df_list)
  695. df_vars = df[df["value"] != ""]
  696. # df_vars.to_csv(f"{self.config['path2']}/SKR51_Variablen2.csv", sep=';', encoding='latin-1', index=False)
  697. df_main = pd.DataFrame(
  698. [x["id"].split(";") for x in self.json_result["flat"]["SKR51"]],
  699. columns=cols,
  700. )
  701. df_main["value"] = ""
  702. for c in cols:
  703. df_main = df_main.merge(
  704. df_vars, how="left", left_on=c, right_on="key", suffixes=(None, "_" + c)
  705. )
  706. df_main[c] = np.where(
  707. df_main["value_" + c].isna(), df_main[c], df_main["value_" + c]
  708. )
  709. df_amount = df_main[df_main["Ebene1"] == "Umsatzerlöse"].reset_index()
  710. df_amount["Ebene1"] = "verkaufte Stückzahlen"
  711. df_main = pd.concat([df_main, df_amount])
  712. # from_label = cols
  713. to_label = cols # get_default_cols(9)
  714. # df_main = df_main.rename(columns=dict(zip(from_label, to_label)))
  715. df_main[to_label].to_csv(
  716. f"{self.config['path2']}/SKR51_Struktur.csv",
  717. sep=";",
  718. encoding="latin-1",
  719. index_label="Sortierung",
  720. )
  721. def gcstruct_uebersetzung(base_dir=None):
  722. # base_dir = 'P:/SKR51_GCStruct/'
  723. if base_dir is None:
  724. base_dir = Path(".").absolute()
  725. import_dir = base_dir
  726. if base_dir.name == "scripts":
  727. if base_dir.parent.parent.name == "Portal":
  728. base_dir = base_dir.parent.parent.parent
  729. import_dir = base_dir.joinpath("Portal/System/IQD/Belege/Kontenrahmen")
  730. else:
  731. base_dir = base_dir.parent.parent
  732. import_dir = base_dir.joinpath("System/OPTIMA/Export")
  733. elif not base_dir.joinpath("GCStruct_Aufbereitung").exists():
  734. base_dir = Path(
  735. "//192.168.2.21/verwaltung/Kunden/Luchtenberg/1 Umstellung SKR51/"
  736. )
  737. if not base_dir.exists():
  738. base_dir = Path(
  739. "//media/fileserver1/verwaltung/Kunden/Luchtenberg/1 Umstellung SKR51/"
  740. )
  741. import_dir = base_dir
  742. struct = GCStruct(str(base_dir.joinpath("GCStruct_Aufbereitung")))
  743. struct.skr51_translate(import_dir.glob("Kontenrahmen_kombiniert*.csv"))
  744. print("Kontenrahmen_uebersetzt.csv erstellt.")
  745. # copyfile('c:/Projekte/Python/Gcstruct/Kontenrahmen_kombiniert.csv', base_dir + 'GCStruct_Modell/Export/Kontenrahmen_kombiniert.csv')
  746. struct2 = GCStruct(str(base_dir.joinpath("GCStruct_Modell")))
  747. struct2.skr51_translate2(
  748. str(
  749. base_dir.joinpath(
  750. "GCStruct_Aufbereitung/Export/Kontenrahmen_uebersetzt.csv"
  751. )
  752. )
  753. )
  754. print("SKR51_Uebersetzung.csv erstellt.")
  755. struct2.skr51_vars()
  756. print("SKR51_Struktur.csv erstellt.")
  757. def dresen():
  758. struct = GCStruct("c:/projekte/GCHRStruct_Hyundai_Export")
  759. struct.get_structure_and_tree()
  760. struct.export()
  761. def reisacher():
  762. base_dir = "X:/Robert/Planung Reisacher/GCStruct_neue_Struktur_Planung"
  763. if not Path(base_dir).exists():
  764. base_dir = "/media/fileserver1/austausch/Robert/Planung Reisacher/GCStruct_neue_Struktur_Planung"
  765. struct = GCStruct(base_dir)
  766. struct.get_structure_and_tree()
  767. # json.dump(res['flat'], open(f"{self.config['path2']}/{self.config['output']}", 'w'), indent=2)
  768. if __name__ == "__main__":
  769. # struct = GCStruct('c:/projekte/gcstruct_dresen')
  770. # struct = GCStruct('c:/projekte/python/gcstruct')
  771. # struct = GCStruct('c:/projekte/python/gcstruct_reisacher_planung')
  772. reisacher()
  773. # dresen()