gchr.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493
  1. import pandas as pd
  2. import numpy as np
  3. import xml.etree.ElementTree as ET
  4. import csv
  5. from xml.dom import minidom
  6. from datetime import datetime
  7. import logging
  8. from pathlib import Path
  9. import os
  10. ACCOUNT_INFO = [
  11. "Account",
  12. "Make",
  13. "Site",
  14. "Origin",
  15. "SalesChannel",
  16. "CostCarrier",
  17. "CostAccountingString",
  18. ]
  19. class GCHR:
  20. def __init__(self, base_dir) -> None:
  21. self.base_dir = base_dir
  22. self.account_translation = f"{self.base_dir}/data/Kontenrahmen_uebersetzt.csv"
  23. self.account_bookings = list(Path(self.base_dir).joinpath("data").glob("GuV_Bilanz_Salden*.csv"))
  24. self.first_month_of_financial_year = "01"
  25. pd.set_option("display.max_rows", 500)
  26. pd.set_option("display.float_format", lambda x: "%.2f" % x)
  27. def set_bookkeep_period(self, year, month):
  28. self.current_year = year
  29. self.current_month = month
  30. period = f"{year}-{month}"
  31. prot_file = f"{self.export_info_dir}/protokoll_{period}.log"
  32. logging.basicConfig(
  33. filename=prot_file,
  34. filemode="w",
  35. encoding="utf-8",
  36. level=logging.DEBUG,
  37. force=True,
  38. )
  39. self.debug_file = f"{self.export_info_dir}/debug_{period}.csv"
  40. self.account_ignored = f"{self.export_info_dir}/ignoriert_{period}.csv"
  41. self.account_invalid = f"{self.export_info_dir}/ungueltig_{period}.csv"
  42. self.last_year = str(int(self.current_year) - 1)
  43. self.last_year2 = str(int(self.current_year) - 2)
  44. self.next_year = str(int(self.current_year) + 1)
  45. def header(self, makes, sites):
  46. return {
  47. "Country": "DE",
  48. "MainBmCode": sites[0]["Standort_HBV"],
  49. "Month": self.current_month,
  50. "Year": self.current_year,
  51. "Currency": "EUR",
  52. "NumberOfMakes": len(makes),
  53. "NumberOfSites": len(sites),
  54. "ExtractionDate": datetime.now().strftime("%d.%m.%Y"),
  55. "ExtractionTime": datetime.now().strftime("%H:%M:%S"),
  56. "BeginFiscalYear": self.first_month_of_financial_year,
  57. }
  58. @property
  59. def bookkeep_filter(self):
  60. period = [self.current_year + str(i).zfill(2) for i in range(1, 13)]
  61. if self.first_month_of_financial_year != "01":
  62. if self.first_month_of_financial_year > self.current_month:
  63. period = [self.last_year + str(i).zfill(2) for i in range(1, 13)] + period
  64. else:
  65. period = period + [self.next_year + str(i).zfill(2) for i in range(1, 13)]
  66. fm = int(self.first_month_of_financial_year)
  67. period = period[fm - 1 : fm + 12]
  68. period = [self.current_year + "00"] + period
  69. rename_to = ["OpeningBalance"] + ["Period" + str(i).zfill(2) for i in range(1, 13)]
  70. return dict(zip(period, rename_to))
  71. def extract_acct_info(self, df: pd.DataFrame):
  72. acct_info = [
  73. "Marke",
  74. "Standort",
  75. "Konto_Nr",
  76. "Kostenstelle",
  77. "Absatzkanal",
  78. "Kostenträger",
  79. ]
  80. df["Konto_Nr_SKR51"] = df.index
  81. df[acct_info] = df["Konto_Nr_SKR51"].str.split(pat="-", n=6, expand=True)
  82. return df
  83. def export_all_periods(self, overwrite=False):
  84. dt = datetime.now()
  85. prev = str(dt.year - 1)
  86. periods = [(prev, str(x).zfill(2)) for x in range(dt.month, 13)] + [
  87. (str(dt.year), str(x).zfill(2)) for x in range(1, dt.month)
  88. ]
  89. for year, month in periods:
  90. filename = self.export_filename_for_period(year, month)
  91. if overwrite or not Path(filename).exists():
  92. os.makedirs(Path(filename).parent.joinpath("info"), exist_ok=True)
  93. self.export_period(year, month)
  94. def export_period(self, year, month):
  95. self.set_bookkeep_period(year, month)
  96. # Übersetzungstabelle laden
  97. df_translate_import = pd.read_csv(
  98. self.account_translation,
  99. decimal=",",
  100. sep=";",
  101. encoding="latin-1",
  102. converters={i: str for i in range(0, 200)},
  103. )
  104. df_translate = self.prepare_translation(df_translate_import)
  105. # Kontensalden laden
  106. df_bookings = self.load_bookings_from_file()
  107. if df_bookings.shape[0] == 0 or len(set(df_bookings["Bookkeep Period"].to_list())) <= 1:
  108. logging.error("ABBRUCH!!! Keine Daten vorhanden!")
  109. return False
  110. filter_to = self.current_year + self.current_month
  111. period_no = list(self.bookkeep_filter.keys()).index(filter_to) + 1
  112. logging.info("df_bookings: " + str(df_bookings.shape))
  113. # Join auf Übersetzung
  114. df_combined = df_bookings.merge(df_translate, how="inner", on="Konto_Nr_Händler")
  115. logging.info(f"df_combined: {df_combined.shape}")
  116. # Hack für fehlende Markenzuordnung
  117. df_combined["Fremdmarke"] = df_combined["Marke_HBV"].str.match(r"^0000")
  118. df_combined["Marke"] = np.where(df_combined["Fremdmarke"], "99", df_combined["Marke"])
  119. df_combined["Standort_egal"] = df_combined["Standort_HBV"].str.match(r"^\d\d_")
  120. df_combined["Standort_HBV"] = np.where(
  121. df_combined["Fremdmarke"] | df_combined["Standort_egal"],
  122. "0000",
  123. df_combined["Standort_HBV"],
  124. )
  125. makes = df_combined[["Marke", "Marke_HBV"]].drop_duplicates().sort_values(by=["Marke"])
  126. sites = (
  127. df_combined[["Marke", "Standort", "Standort_HBV"]].drop_duplicates().sort_values(by=["Marke", "Standort"])
  128. )
  129. # df_combined.to_csv(account_invalid, decimal=',', sep=';', encoding='latin-1', index=False)
  130. # Gruppieren
  131. # df_grouped = df_combined.groupby(['Konto_Nr_SKR51', 'period']).sum()
  132. df_pivot = df_combined.pivot_table(
  133. index=["Konto_Nr_SKR51"],
  134. columns=["period"],
  135. values="amount",
  136. aggfunc="sum",
  137. margins=True,
  138. margins_name="CumulatedYear",
  139. )
  140. df_pivot.drop(index="CumulatedYear", inplace=True)
  141. logging.info("df_pivot: " + str(df_pivot.shape))
  142. df = self.special_translation(df_pivot, makes)
  143. from_label = ["Marke", "Standort", "Konto_Nr", "Kostenstelle", "Absatzkanal", "Kostenträger", "KRM"]
  144. to_label = ["Make", "Site", "Account", "Origin", "SalesChannel", "CostCarrier", "CostAccountingString"]
  145. col_dict = dict(zip(from_label, to_label))
  146. df = df.rename(columns=col_dict)
  147. makes = makes.rename(columns=col_dict).to_dict(orient="records")
  148. sites = sites.rename(columns=col_dict).to_dict(orient="records")
  149. df_invalid = df[df["IsNumeric"] == False]
  150. df_invalid.to_csv(self.account_invalid, decimal=",", sep=";", encoding="latin-1", index=False)
  151. export_csv = self.export_filename[:-4] + ".csv"
  152. df.to_csv(export_csv, decimal=",", sep=";", encoding="latin-1", index=False)
  153. df = df[df["IsNumeric"] != False].groupby(ACCOUNT_INFO, as_index=False).aggregate("sum")
  154. # Infos ergänzen
  155. df["Decimals"] = 2
  156. # df['OpeningBalance'] = 0.0
  157. logging.info(df.shape)
  158. self.export_xml(df.to_dict(orient="records"), self.bookkeep_filter, period_no, makes, sites)
  159. # Join auf Übersetzung - nicht zugeordnet
  160. df_ignored = df_bookings.merge(df_translate, how="left", on="Konto_Nr_Händler")
  161. df_ignored = df_ignored[
  162. df_ignored["Konto_Nr_SKR51"].isna()
  163. ] # [['Konto_Nr_Händler', 'Bookkeep Period', 'amount', 'quantity']]
  164. if not df_ignored.empty:
  165. df_ignored = df_ignored.pivot_table(
  166. index=["Konto_Nr_Händler"],
  167. columns=["period"],
  168. values="amount",
  169. aggfunc=np.sum,
  170. margins=True,
  171. margins_name="CumulatedYear",
  172. )
  173. df_ignored.to_csv(self.account_ignored, decimal=",", sep=";", encoding="latin-1")
  174. return self.export_filename
  175. def prepare_translation(self, df_translate: pd.DataFrame):
  176. logging.info(df_translate.shape)
  177. df_translate["duplicated"] = df_translate.duplicated()
  178. logging.info(df_translate[df_translate["duplicated"]])
  179. df_translate = df_translate[
  180. [
  181. "Konto_Nr_Händler",
  182. "Konto_Nr_SKR51",
  183. "Marke",
  184. "Marke_HBV",
  185. "Standort",
  186. "Standort_HBV",
  187. ]
  188. ]
  189. row = (
  190. df_translate[["Marke", "Marke_HBV", "Standort", "Standort_HBV"]]
  191. .drop_duplicates()
  192. .sort_values(by=["Marke", "Standort"])
  193. .iloc[:1]
  194. .to_dict(orient="records")[0]
  195. )
  196. row["Konto_Nr_Händler"] = "01-01-0861-00-00-00"
  197. row["Konto_Nr_SKR51"] = "01-01-0861-00-00-00"
  198. df_translate = pd.concat([df_translate, pd.DataFrame.from_records([row])])
  199. # print(df_translate.tail())
  200. # df_translate.drop(columns=['duplicated'], inplace=True)
  201. df_translate.drop_duplicates(inplace=True)
  202. df_translate.set_index("Konto_Nr_Händler")
  203. return df_translate
  204. def special_translation(self, df: pd.DataFrame, makes: pd.DataFrame):
  205. df = self.extract_acct_info(df)
  206. # df = df_translate.reset_index(drop=True).drop(columns=['Kostenträger_Ebene']).drop_duplicates()
  207. logging.info(df.shape)
  208. logging.info(df.columns)
  209. logging.info(df.head())
  210. # df = df.merge(df_translate, how='inner', on='Konto_Nr_SKR51')
  211. logging.info("df: " + str(df.shape))
  212. df["Bilanz"] = df["Konto_Nr"].str.match(r"^[013]")
  213. df["Kontoart"] = np.where(df["Bilanz"], "1", "2")
  214. df["Kontoart"] = np.where(df["Konto_Nr"].str.contains("_STK"), "3", df["Kontoart"])
  215. df["Kontoart"] = np.where(df["Konto_Nr"].str.match(r"^[9]"), "3", df["Kontoart"])
  216. df["Konto_1"] = df["Konto_Nr"].str.slice(0, 1)
  217. # Hack für fehlende Markenzuordnung
  218. df = df.merge(makes, how="left", on="Marke")
  219. df["Marke"] = np.where(df["Marke_HBV"].isna(), "99", df["Marke"])
  220. df_debug = df.drop(columns=["Bilanz"])
  221. logging.info(df_debug.groupby(["Kontoart"]).aggregate("sum"))
  222. logging.info(df_debug.groupby(["Kontoart", "Konto_1"]).aggregate("sum"))
  223. logging.info(df_debug.groupby(["Konto_Nr"]).aggregate("sum"))
  224. df_debug.groupby(["Konto_Nr"]).aggregate("sum").to_csv(
  225. self.debug_file, decimal=",", sep=";", encoding="latin-1"
  226. )
  227. # Bereinigung GW-Kostenträger
  228. df["NW_Verkauf_1"] = (df["Konto_Nr"].str.match(r"^[78]0")) & (df["Kostenstelle"].str.match(r"^[^1]\d"))
  229. df["Kostenstelle"] = np.where(df["NW_Verkauf_1"] == True, "11", df["Kostenstelle"])
  230. df["Konto_7010"] = df["Konto_Nr"].str.match(r"^[78]01[01]")
  231. df["Kostenstelle"] = np.where(df["Konto_7010"] == True, "14", df["Kostenstelle"])
  232. df["GW_Verkauf_2"] = (df["Konto_Nr"].str.match(r"^[78]1")) & (df["Kostenstelle"].str.match(r"^[^2]\d"))
  233. df["Kostenstelle"] = np.where(df["GW_Verkauf_2"] == True, "21", df["Kostenstelle"])
  234. df["GW_Verkauf_3"] = (df["Konto_Nr"].str.match(r"^[78]3")) & (df["Kostenstelle"].str.match(r"^[^3]\d"))
  235. df["Kostenstelle"] = np.where(df["GW_Verkauf_3"] == True, "31", df["Kostenstelle"])
  236. df["GW_Verkauf_4"] = (df["Konto_Nr"].str.match(r"^[78]4")) & (df["Kostenstelle"].str.match(r"^[^4]\d"))
  237. df["Kostenstelle"] = np.where(df["GW_Verkauf_4"] == True, "41", df["Kostenstelle"])
  238. df["GW_Verkauf_x420"] = df["Konto_Nr"].str.match(r"^[78]420")
  239. df["Kostenstelle"] = np.where(df["GW_Verkauf_x420"] == True, "42", df["Kostenstelle"])
  240. df["GW_Verkauf_5"] = (df["Konto_Nr"].str.match(r"^[78]5")) & (df["Kostenstelle"].str.match(r"^[^5]\d"))
  241. df["Kostenstelle"] = np.where(df["GW_Verkauf_5"] == True, "51", df["Kostenstelle"])
  242. df["GW_Verkauf_50"] = (df["Konto_Nr"].str.match(r"^[78]")) & (df["Kostenstelle"].str.match(r"^2"))
  243. df["Kostenträger"] = np.where(df["GW_Verkauf_50"] == True, "52", df["Kostenträger"])
  244. df["Kostenträger"] = np.where(
  245. (df["GW_Verkauf_50"] == True) & (df["Marke"] == "01"),
  246. "50",
  247. df["Kostenträger"],
  248. )
  249. df["NW_Verkauf_00"] = (
  250. (df["Konto_Nr"].str.match(r"^[78]2"))
  251. & (df["Kostenstelle"].str.match(r"^1"))
  252. & (df["Kostenträger"].str.match(r"^[^01234]"))
  253. )
  254. df["Kostenträger"] = np.where(df["NW_Verkauf_00"] == True, "00", df["Kostenträger"])
  255. df["GW_Stk_50"] = (df["Konto_Nr"].str.match(r"^9130")) & (df["Kostenstelle"].str.match(r"^2"))
  256. df["Kostenträger"] = np.where(df["GW_Stk_50"] == True, "52", df["Kostenträger"])
  257. df["Kostenträger"] = np.where((df["GW_Stk_50"] == True) & (df["Marke"] == "01"), "50", df["Kostenträger"])
  258. df["Kostenträger"] = np.where(df["Bilanz"] == True, "00", df["Kostenträger"])
  259. df["Konto_5er"] = (df["Konto_Nr"].str.match("^5")) | (df["Konto_Nr"].str.match("^9143"))
  260. df["Absatzkanal"] = np.where(df["Konto_5er"] == True, "99", df["Absatzkanal"])
  261. df["Konto_5005"] = (df["Konto_Nr"].str.match("^5005")) & (df["Kostenstelle"].str.match(r"^[^12]"))
  262. df["Kostenstelle"] = np.where(df["Konto_5005"] == True, "20", df["Kostenstelle"])
  263. df["Kostenträger"] = np.where(df["Konto_5005"] == True, "50", df["Kostenträger"])
  264. df["Konto_5007"] = (df["Konto_Nr"].str.match("^5007")) & (df["Kostenstelle"].str.match(r"^([^4]|42)"))
  265. df["Kostenstelle"] = np.where(df["Konto_5007"] == True, "41", df["Kostenstelle"])
  266. df["Kostenträger"] = np.where(df["Konto_5007"] == True, "70", df["Kostenträger"])
  267. df["Konto_914er"] = (df["Konto_Nr"].str.match("^914[34]")) & (df["Kostenträger"].str.match(r"^[^7]"))
  268. df["Kostenträger"] = np.where(df["Konto_914er"] == True, "70", df["Kostenträger"])
  269. df["Teile_30_60"] = (
  270. (df["Konto_Nr"].str.match(r"^[578]"))
  271. & (df["Kostenstelle"].str.match(r"^[3]"))
  272. & (df["Kostenträger"].str.match(r"^[^6]"))
  273. )
  274. df["Kostenträger"] = np.where(df["Teile_30_60"] == True, "60", df["Kostenträger"])
  275. df["Service_40_70"] = (
  276. (df["Konto_Nr"].str.match(r"^[578]"))
  277. & (df["Kostenstelle"].str.match(r"^[4]"))
  278. & (df["Kostenträger"].str.match(r"^[^7]"))
  279. )
  280. df["Kostenträger"] = np.where(df["Service_40_70"] == True, "70", df["Kostenträger"])
  281. df["KRM"] = df["Marke"] + df["Standort"] + df["Kostenstelle"] + df["Absatzkanal"] + df["Kostenträger"]
  282. df["IsNumeric"] = (df["KRM"].str.isdigit()) & (df["Konto_Nr"].str.isdigit()) & (df["Konto_Nr"].str.len() == 4)
  283. return df
  284. def load_bookings_from_file(self):
  285. df2 = []
  286. for csv_file in self.account_bookings:
  287. df2.append(
  288. pd.read_csv(
  289. csv_file,
  290. decimal=",",
  291. sep=";",
  292. encoding="latin-1",
  293. converters={0: str, 1: str},
  294. )
  295. )
  296. df_bookings = pd.concat(df2)
  297. # Kontensalden auf gegebenen Monat filtern
  298. filter_from = self.current_year + self.first_month_of_financial_year
  299. filter_prev = self.last_year + self.first_month_of_financial_year
  300. if self.first_month_of_financial_year > self.current_month:
  301. filter_from = self.last_year + self.first_month_of_financial_year
  302. filter_prev = self.last_year2 + self.first_month_of_financial_year
  303. filter_to = self.current_year + self.current_month
  304. filter_opening = self.current_year + "00"
  305. filter_prev_opening = self.last_year + "00"
  306. prev_year_closed = True
  307. df_opening_balance = df_bookings[(df_bookings["Bookkeep Period"] == filter_opening)]
  308. if df_opening_balance.shape[0] == 0:
  309. df_opening_balance = df_bookings[
  310. (df_bookings["Bookkeep Period"] == filter_prev_opening)
  311. | ((df_bookings["Bookkeep Period"] >= filter_prev) & (df_bookings["Bookkeep Period"] < filter_from))
  312. ].copy()
  313. df_opening_balance["Bookkeep Period"] = filter_opening
  314. prev_year_closed = False
  315. # df_opening_balance = df_opening_balance.merge(df_translate, how='inner', on='Konto_Nr_Händler')
  316. df_opening_balance = df_opening_balance[(df_opening_balance["Konto_Nr_Händler"].str.contains(r"-[013]\d\d+-"))]
  317. df_opening_balance["amount"] = (df_opening_balance["Debit Amount"] + df_opening_balance["Credit Amount"]).round(
  318. 2
  319. )
  320. # df_opening_balance.drop(columns=['Debit Amount', 'Credit Amount', 'Debit Quantity', 'Credit Quantity'], inplace=True)
  321. # df_opening_balance = df_opening_balance.groupby(['Marke', 'Standort']).sum()
  322. opening_balance = df_opening_balance["amount"].aggregate("sum").round(2)
  323. logging.info("Gewinn/Verlustvortrag")
  324. logging.info(opening_balance)
  325. if not prev_year_closed:
  326. row = {
  327. "Konto_Nr_Händler": "01-01-0861-00-00-00",
  328. "Bookkeep Period": filter_opening,
  329. "Debit Amount": opening_balance * -1,
  330. "Credit Amount": 0,
  331. "Debit Quantity": 0,
  332. "Credit Quantity": 0,
  333. "amount": opening_balance * -1,
  334. }
  335. df_opening_balance = pd.concat([df_opening_balance, pd.DataFrame.from_records([row])])
  336. df_bookings = df_bookings[
  337. (df_bookings["Bookkeep Period"] >= filter_from) & (df_bookings["Bookkeep Period"] <= filter_to)
  338. ]
  339. df_bookings["amount"] = (df_bookings["Debit Amount"] + df_bookings["Credit Amount"]).round(2)
  340. # Buchungen kopieren und als Statistikkonten anhängen
  341. df_stats = df_bookings.copy()
  342. # df_stats = df_stats[df_stats['Konto_Nr_Händler'].str.match(r'-[24578]\d\d\d-')]
  343. df_stats["Konto_Nr_Händler"] = df_stats["Konto_Nr_Händler"].str.replace(r"-(\d\d\d+)-", r"-\1_STK-", regex=True)
  344. df_stats["amount"] = (df_bookings["Debit Quantity"] + df_bookings["Credit Quantity"]).round(2)
  345. df_bookings = pd.concat([df_opening_balance, df_bookings, df_stats])
  346. # Spalten konvertieren
  347. df_bookings["period"] = df_bookings["Bookkeep Period"].apply(lambda x: self.bookkeep_filter[x])
  348. return df_bookings[df_bookings["amount"] != 0.00]
  349. @property
  350. def export_filename(self):
  351. return self.export_filename_for_period(self.current_year, self.current_month)
  352. @property
  353. def export_info_dir(self):
  354. return f"{self.base_dir}/Export/{self.current_year}/info/"
  355. def export_filename_for_period(self, year, month):
  356. return f"{self.base_dir}/Export/{year}/export_{year}-{month}.xml"
  357. def export_xml(self, records, bk_filter, period_no, makes, sites):
  358. record_elements = ACCOUNT_INFO + ["Decimals"] + list(bk_filter.values())[:period_no] + ["CumulatedYear"]
  359. root = ET.Element("HbvData")
  360. h = ET.SubElement(root, "Header")
  361. for k, v in self.header(makes, sites).items():
  362. ET.SubElement(h, k).text = str(v)
  363. make_list = ET.SubElement(root, "MakeList")
  364. for m in makes:
  365. e = ET.SubElement(make_list, "MakeListEntry")
  366. ET.SubElement(e, "Make").text = m["Make"]
  367. ET.SubElement(e, "MakeCode").text = m["Marke_HBV"]
  368. bm_code_list = ET.SubElement(root, "BmCodeList")
  369. for s in sites:
  370. e = ET.SubElement(bm_code_list, "BmCodeEntry")
  371. ET.SubElement(e, "Make").text = s["Make"]
  372. ET.SubElement(e, "Site").text = s["Site"]
  373. ET.SubElement(e, "BmCode").text = s["Standort_HBV"]
  374. record_list = ET.SubElement(root, "RecordList")
  375. for row in records:
  376. record = ET.SubElement(record_list, "Record")
  377. for e in record_elements:
  378. child = ET.SubElement(record, e)
  379. field = row.get(e, 0.0)
  380. if str(field) == "nan":
  381. field = "0"
  382. elif type(field) is float:
  383. field = "{:.0f}".format(field * 100)
  384. child.text = str(field)
  385. with open(self.export_filename, "w", encoding="utf-8") as fwh:
  386. fwh.write(minidom.parseString(ET.tostring(root)).toprettyxml(indent=" "))
  387. def convert_to_row(self, node):
  388. return [child.text for child in node]
  389. def convert_xml_to_csv(self, xmlfile, csvfile):
  390. record_list = ET.parse(xmlfile).getroot().find("RecordList")
  391. header = [child.tag for child in record_list.find("Record")]
  392. bookings = [self.convert_to_row(node) for node in record_list.findall("Record")]
  393. with open(csvfile, "w") as fwh:
  394. cwh = csv.writer(fwh, delimiter=";")
  395. cwh.writerow(header)
  396. cwh.writerows(bookings)
  397. return True
  398. def convert_csv_to_xml(self, csvfile, xmlfile):
  399. makes = [{"Make": "01", "Marke_HBV": "1844"}]
  400. sites = [{"Make": "01", "Site": "01", "Marke_HBV": "1844"}]
  401. with open(csvfile, "r", encoding="latin-1") as frh:
  402. csv_reader = csv.DictReader(frh, delimiter=";")
  403. self.export_xml(csv_reader, self.bookkeep_filter(), 1, makes, sites, xmlfile)
  404. def gchr_local(base_dir):
  405. for path in Path(base_dir).glob("*"):
  406. if path.is_dir():
  407. print(path.name)
  408. gchr_export(str(path))
  409. def gchr_export(base_dir):
  410. gchr = GCHR(base_dir)
  411. gchr.export_all_periods(overwrite=True)
  412. if __name__ == "__main__":
  413. base_dir = os.getcwd() + "/../GCHR2_Testdaten/Kunden"
  414. if Path(base_dir).exists():
  415. gchr_local(base_dir)