gchr.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511
  1. import pandas as pd
  2. import numpy as np
  3. import xml.etree.ElementTree as ET
  4. import csv
  5. from xml.dom import minidom
  6. from datetime import datetime
  7. import logging
  8. from pathlib import Path
  9. import os
  10. ACCOUNT_INFO = [
  11. "Account",
  12. "Make",
  13. "Site",
  14. "Origin",
  15. "SalesChannel",
  16. "CostCarrier",
  17. "CostAccountingString",
  18. ]
  19. class GCHR:
  20. booking_date: datetime
  21. df_bookings: pd.DataFrame = None
  22. def __init__(self, base_dir) -> None:
  23. self.base_dir = base_dir
  24. self.account_translation = f"{self.base_dir}/data/Kontenrahmen_uebersetzt.csv"
  25. self.account_bookings = list(Path(self.base_dir).joinpath("data").glob("GuV_Bilanz_Salden*.csv"))
  26. self.first_month_of_financial_year = "01"
  27. pd.set_option("display.max_rows", 500)
  28. pd.set_option("display.float_format", lambda x: "%.2f" % x)
  29. def set_bookkeep_period(self, year, month):
  30. self.current_year = year
  31. self.current_month = month
  32. period = f"{year}-{month}"
  33. prot_file = f"{self.export_info_dir}/protokoll_{period}.log"
  34. logging.basicConfig(
  35. filename=prot_file,
  36. filemode="w",
  37. encoding="utf-8",
  38. level=logging.DEBUG,
  39. force=True,
  40. )
  41. self.debug_file = f"{self.export_info_dir}/debug_{period}.csv"
  42. self.account_ignored = f"{self.export_info_dir}/ignoriert_{period}.csv"
  43. self.account_invalid = f"{self.export_info_dir}/ungueltig_{period}.csv"
  44. self.last_year = str(int(self.current_year) - 1)
  45. self.last_year2 = str(int(self.current_year) - 2)
  46. self.next_year = str(int(self.current_year) + 1)
  47. def header(self, makes, sites, main_site):
  48. return {
  49. "Country": "DE",
  50. "MainBmCode": main_site,
  51. "Month": self.current_month,
  52. "Year": self.current_year,
  53. "Currency": "EUR",
  54. "NumberOfMakes": len(makes),
  55. "NumberOfSites": len(sites),
  56. "ExtractionDate": self.booking_date.strftime("%d.%m.%Y"),
  57. "ExtractionTime": self.booking_date.strftime("%H:%M:%S"),
  58. "BeginFiscalYear": self.first_month_of_financial_year,
  59. }
  60. @property
  61. def bookkeep_filter(self):
  62. period = [self.current_year + str(i).zfill(2) for i in range(1, 13)]
  63. if self.first_month_of_financial_year != "01":
  64. if self.first_month_of_financial_year > self.current_month:
  65. period = [self.last_year + str(i).zfill(2) for i in range(1, 13)] + period
  66. else:
  67. period = period + [self.next_year + str(i).zfill(2) for i in range(1, 13)]
  68. fm = int(self.first_month_of_financial_year)
  69. period = period[fm - 1 : fm + 12]
  70. period = [self.current_year + "00"] + period
  71. rename_to = ["OpeningBalance"] + ["Period" + str(i).zfill(2) for i in range(1, 13)]
  72. return dict(zip(period, rename_to))
  73. def extract_acct_info(self, df: pd.DataFrame):
  74. acct_info = [
  75. "Marke",
  76. "Standort",
  77. "Konto_Nr",
  78. "Kostenstelle",
  79. "Absatzkanal",
  80. "Kostenträger",
  81. ]
  82. df["Konto_Nr_SKR51"] = df.index
  83. df[acct_info] = df["Konto_Nr_SKR51"].str.split(pat="-", n=6, expand=True)
  84. return df
  85. def export_all_periods(self, overwrite=False, today=None):
  86. dt = datetime.now()
  87. if today is not None:
  88. dt = datetime.fromisoformat(today)
  89. prev = str(dt.year - 1)
  90. periods = [(prev, str(x).zfill(2)) for x in range(dt.month, 13)] + [
  91. (str(dt.year), str(x).zfill(2)) for x in range(1, dt.month)
  92. ]
  93. for year, month in periods:
  94. filename = self.export_filename_for_period(year, month)
  95. if overwrite or not Path(filename).exists():
  96. os.makedirs(Path(filename).parent.joinpath("info"), exist_ok=True)
  97. self.export_period(year, month)
  98. def export_period(self, year, month):
  99. self.set_bookkeep_period(year, month)
  100. # Übersetzungstabelle laden
  101. df_translate_import = pd.read_csv(
  102. self.account_translation,
  103. decimal=",",
  104. sep=";",
  105. encoding="latin-1",
  106. converters={i: str for i in range(0, 200)},
  107. )
  108. df_translate = self.prepare_translation(df_translate_import)
  109. # Kontensalden laden
  110. df_bookings = self.filter_bookings()
  111. all_periods = set(df_bookings["Bookkeep Period"].to_list())
  112. bookkeep_period_date = datetime(int(year), int(month), 28)
  113. if df_bookings.shape[0] == 0 or len(all_periods) <= 1 or self.booking_date < bookkeep_period_date:
  114. logging.error("ABBRUCH!!! Keine Daten vorhanden!")
  115. return False
  116. filter_to = self.current_year + self.current_month
  117. period_no = list(self.bookkeep_filter.keys()).index(filter_to) + 1
  118. logging.info("df_bookings: " + str(df_bookings.shape))
  119. # Join auf Übersetzung
  120. df_combined = df_bookings.merge(df_translate, how="inner", on="Konto_Nr_Händler")
  121. logging.info(f"df_combined: {df_combined.shape}")
  122. # Hack für fehlende Markenzuordnung
  123. df_combined["Fremdmarke"] = df_combined["Marke_HBV"].str.match(r"^0000")
  124. df_combined["Marke"] = np.where(df_combined["Fremdmarke"], "99", df_combined["Marke"])
  125. df_combined["Standort_egal"] = df_combined["Standort_HBV"].str.match(r"^\d\d_")
  126. df_combined["Standort_HBV"] = np.where(
  127. df_combined["Fremdmarke"] | df_combined["Standort_egal"],
  128. "0000",
  129. df_combined["Standort_HBV"],
  130. )
  131. makes = df_combined[["Marke", "Marke_HBV"]].drop_duplicates().sort_values(by=["Marke"])
  132. sites = (
  133. df_combined[["Marke", "Standort", "Standort_HBV"]].drop_duplicates().sort_values(by=["Marke", "Standort"])
  134. )
  135. # df_combined.to_csv(account_invalid, decimal=',', sep=';', encoding='latin-1', index=False)
  136. # Gruppieren
  137. # df_grouped = df_combined.groupby(['Konto_Nr_SKR51', 'period']).sum()
  138. df_pivot = df_combined.pivot_table(
  139. index=["Konto_Nr_SKR51"],
  140. columns=["period"],
  141. values="amount",
  142. aggfunc="sum",
  143. margins=True,
  144. margins_name="CumulatedYear",
  145. )
  146. df_pivot.drop(index="CumulatedYear", inplace=True)
  147. logging.info("df_pivot: " + str(df_pivot.shape))
  148. df = self.special_translation(df_pivot, makes)
  149. from_label = ["Marke", "Standort", "Konto_Nr", "Kostenstelle", "Absatzkanal", "Kostenträger", "KRM"]
  150. to_label = ["Make", "Site", "Account", "Origin", "SalesChannel", "CostCarrier", "CostAccountingString"]
  151. col_dict = dict(zip(from_label, to_label))
  152. df = df.rename(columns=col_dict)
  153. makes = makes.rename(columns=col_dict).to_dict(orient="records")
  154. sites = sites.rename(columns=col_dict).to_dict(orient="records")
  155. df_invalid = df[df["IsNumeric"] == False]
  156. df_invalid.to_csv(self.account_invalid, decimal=",", sep=";", encoding="latin-1", index=False)
  157. export_csv = self.export_filename[:-4] + ".csv"
  158. df.to_csv(export_csv, decimal=",", sep=";", encoding="latin-1", index=False)
  159. df = df[df["IsNumeric"] != False].groupby(ACCOUNT_INFO, as_index=False).aggregate("sum")
  160. # Infos ergänzen
  161. df["Decimals"] = 2
  162. # df['OpeningBalance'] = 0.0
  163. logging.info(df.shape)
  164. self.export_xml(
  165. df.to_dict(orient="records"), self.bookkeep_filter, period_no, makes, sites, sites[0]["Standort_HBV"]
  166. )
  167. # Join auf Übersetzung - nicht zugeordnet
  168. df_ignored = df_bookings.merge(df_translate, how="left", on="Konto_Nr_Händler")
  169. df_ignored = df_ignored[
  170. df_ignored["Konto_Nr_SKR51"].isna()
  171. ] # [['Konto_Nr_Händler', 'Bookkeep Period', 'amount', 'quantity']]
  172. if not df_ignored.empty:
  173. df_ignored = df_ignored.pivot_table(
  174. index=["Konto_Nr_Händler"],
  175. columns=["period"],
  176. values="amount",
  177. aggfunc=np.sum,
  178. margins=True,
  179. margins_name="CumulatedYear",
  180. )
  181. df_ignored.to_csv(self.account_ignored, decimal=",", sep=";", encoding="latin-1")
  182. return self.export_filename
  183. def prepare_translation(self, df_translate: pd.DataFrame):
  184. logging.info(df_translate.shape)
  185. df_translate["duplicated"] = df_translate.duplicated()
  186. logging.info(df_translate[df_translate["duplicated"]])
  187. df_translate = df_translate[
  188. [
  189. "Konto_Nr_Händler",
  190. "Konto_Nr_SKR51",
  191. "Marke",
  192. "Marke_HBV",
  193. "Standort",
  194. "Standort_HBV",
  195. ]
  196. ]
  197. row = (
  198. df_translate[["Marke", "Marke_HBV", "Standort", "Standort_HBV"]]
  199. .drop_duplicates()
  200. .sort_values(by=["Marke", "Standort"])
  201. .iloc[:1]
  202. .to_dict(orient="records")[0]
  203. )
  204. row["Konto_Nr_Händler"] = "01-01-0861-00-00-00"
  205. row["Konto_Nr_SKR51"] = "01-01-0861-00-00-00"
  206. df_translate = pd.concat([df_translate, pd.DataFrame.from_records([row])])
  207. # print(df_translate.tail())
  208. # df_translate.drop(columns=['duplicated'], inplace=True)
  209. df_translate.drop_duplicates(inplace=True)
  210. df_translate.set_index("Konto_Nr_Händler")
  211. return df_translate
  212. def special_translation(self, df: pd.DataFrame, makes: pd.DataFrame):
  213. df = self.extract_acct_info(df)
  214. # df = df_translate.reset_index(drop=True).drop(columns=['Kostenträger_Ebene']).drop_duplicates()
  215. logging.info(df.shape)
  216. logging.info(df.columns)
  217. logging.info(df.head())
  218. # df = df.merge(df_translate, how='inner', on='Konto_Nr_SKR51')
  219. logging.info("df: " + str(df.shape))
  220. df["Bilanz"] = df["Konto_Nr"].str.match(r"^[013]")
  221. df["Kontoart"] = np.where(df["Bilanz"], "1", "2")
  222. df["Kontoart"] = np.where(df["Konto_Nr"].str.contains("_STK"), "3", df["Kontoart"])
  223. df["Kontoart"] = np.where(df["Konto_Nr"].str.match(r"^[9]"), "3", df["Kontoart"])
  224. df["Konto_1"] = df["Konto_Nr"].str.slice(0, 1)
  225. # Hack für fehlende Markenzuordnung
  226. df = df.merge(makes, how="left", on="Marke")
  227. df["Marke"] = np.where(df["Marke_HBV"].isna(), "99", df["Marke"])
  228. df_debug = df.drop(columns=["Bilanz"])
  229. logging.info(df_debug.groupby(["Kontoart"]).aggregate("sum"))
  230. logging.info(df_debug.groupby(["Kontoart", "Konto_1"]).aggregate("sum"))
  231. logging.info(df_debug.groupby(["Konto_Nr"]).aggregate("sum"))
  232. df_debug.groupby(["Konto_Nr"]).aggregate("sum").to_csv(
  233. self.debug_file, decimal=",", sep=";", encoding="latin-1"
  234. )
  235. # Bereinigung GW-Kostenträger
  236. df["NW_Verkauf_1"] = (df["Konto_Nr"].str.match(r"^[78]0")) & (df["Kostenstelle"].str.match(r"^[^1]\d"))
  237. df["Kostenstelle"] = np.where(df["NW_Verkauf_1"] == True, "11", df["Kostenstelle"])
  238. df["Konto_7010"] = df["Konto_Nr"].str.match(r"^[78]01[01]")
  239. df["Kostenstelle"] = np.where(df["Konto_7010"] == True, "14", df["Kostenstelle"])
  240. df["GW_Verkauf_2"] = (df["Konto_Nr"].str.match(r"^[78]1")) & (df["Kostenstelle"].str.match(r"^[^2]\d"))
  241. df["Kostenstelle"] = np.where(df["GW_Verkauf_2"] == True, "21", df["Kostenstelle"])
  242. df["GW_Verkauf_3"] = (df["Konto_Nr"].str.match(r"^[78]3")) & (df["Kostenstelle"].str.match(r"^[^3]\d"))
  243. df["Kostenstelle"] = np.where(df["GW_Verkauf_3"] == True, "31", df["Kostenstelle"])
  244. df["GW_Verkauf_4"] = (df["Konto_Nr"].str.match(r"^[78]4")) & (df["Kostenstelle"].str.match(r"^[^4]\d"))
  245. df["Kostenstelle"] = np.where(df["GW_Verkauf_4"] == True, "41", df["Kostenstelle"])
  246. df["GW_Verkauf_x420"] = df["Konto_Nr"].str.match(r"^[78]420")
  247. df["Kostenstelle"] = np.where(df["GW_Verkauf_x420"] == True, "42", df["Kostenstelle"])
  248. df["GW_Verkauf_5"] = (df["Konto_Nr"].str.match(r"^[78]5")) & (df["Kostenstelle"].str.match(r"^[^5]\d"))
  249. df["Kostenstelle"] = np.where(df["GW_Verkauf_5"] == True, "51", df["Kostenstelle"])
  250. df["GW_Verkauf_50"] = (df["Konto_Nr"].str.match(r"^[78]")) & (df["Kostenstelle"].str.match(r"^2"))
  251. df["Kostenträger"] = np.where(df["GW_Verkauf_50"] == True, "52", df["Kostenträger"])
  252. df["Kostenträger"] = np.where(
  253. (df["GW_Verkauf_50"] == True) & (df["Marke"] == "01"),
  254. "50",
  255. df["Kostenträger"],
  256. )
  257. df["NW_Verkauf_00"] = (
  258. (df["Konto_Nr"].str.match(r"^[78]2"))
  259. & (df["Kostenstelle"].str.match(r"^1"))
  260. & (df["Kostenträger"].str.match(r"^[^01234]"))
  261. )
  262. df["Kostenträger"] = np.where(df["NW_Verkauf_00"] == True, "00", df["Kostenträger"])
  263. df["GW_Stk_50"] = (df["Konto_Nr"].str.match(r"^9130")) & (df["Kostenstelle"].str.match(r"^2"))
  264. df["Kostenträger"] = np.where(df["GW_Stk_50"] == True, "52", df["Kostenträger"])
  265. df["Kostenträger"] = np.where((df["GW_Stk_50"] == True) & (df["Marke"] == "01"), "50", df["Kostenträger"])
  266. df["Kostenträger"] = np.where(df["Bilanz"] == True, "00", df["Kostenträger"])
  267. df["Konto_5er"] = (df["Konto_Nr"].str.match("^5")) | (df["Konto_Nr"].str.match("^9143"))
  268. df["Absatzkanal"] = np.where(df["Konto_5er"] == True, "99", df["Absatzkanal"])
  269. df["Konto_5005"] = (df["Konto_Nr"].str.match("^5005")) & (df["Kostenstelle"].str.match(r"^[^12]"))
  270. df["Kostenstelle"] = np.where(df["Konto_5005"] == True, "20", df["Kostenstelle"])
  271. df["Kostenträger"] = np.where(df["Konto_5005"] == True, "50", df["Kostenträger"])
  272. df["Konto_5007"] = (df["Konto_Nr"].str.match("^5007")) & (df["Kostenstelle"].str.match(r"^([^4]|42)"))
  273. df["Kostenstelle"] = np.where(df["Konto_5007"] == True, "41", df["Kostenstelle"])
  274. df["Kostenträger"] = np.where(df["Konto_5007"] == True, "70", df["Kostenträger"])
  275. df["Konto_914er"] = (df["Konto_Nr"].str.match("^914[34]")) & (df["Kostenträger"].str.match(r"^[^7]"))
  276. df["Kostenträger"] = np.where(df["Konto_914er"] == True, "70", df["Kostenträger"])
  277. df["Teile_30_60"] = (
  278. (df["Konto_Nr"].str.match(r"^[578]"))
  279. & (df["Kostenstelle"].str.match(r"^[3]"))
  280. & (df["Kostenträger"].str.match(r"^[^6]"))
  281. )
  282. df["Kostenträger"] = np.where(df["Teile_30_60"] == True, "60", df["Kostenträger"])
  283. df["Service_40_70"] = (
  284. (df["Konto_Nr"].str.match(r"^[578]"))
  285. & (df["Kostenstelle"].str.match(r"^[4]"))
  286. & (df["Kostenträger"].str.match(r"^[^7]"))
  287. )
  288. df["Kostenträger"] = np.where(df["Service_40_70"] == True, "70", df["Kostenträger"])
  289. df["KRM"] = df["Marke"] + df["Standort"] + df["Kostenstelle"] + df["Absatzkanal"] + df["Kostenträger"]
  290. df["IsNumeric"] = (df["KRM"].str.isdigit()) & (df["Konto_Nr"].str.isdigit()) & (df["Konto_Nr"].str.len() == 4)
  291. return df
  292. def load_bookings_from_file(self):
  293. df2 = []
  294. timestamps = []
  295. for csv_file in self.account_bookings:
  296. df2.append(
  297. pd.read_csv(
  298. csv_file,
  299. decimal=",",
  300. sep=";",
  301. encoding="latin-1",
  302. converters={0: str, 1: str},
  303. )
  304. )
  305. timestamps.append(Path(csv_file).stat().st_mtime)
  306. self.booking_date = datetime.fromtimestamp(max(timestamps))
  307. self.df_bookings = pd.concat(df2)
  308. self.df_bookings["amount"] = (self.df_bookings["Debit Amount"] + self.df_bookings["Credit Amount"]).round(2)
  309. def filter_bookings(self):
  310. if self.df_bookings is None:
  311. self.load_bookings_from_file()
  312. # Kontensalden auf gegebenen Monat filtern
  313. filter_from = self.current_year + self.first_month_of_financial_year
  314. filter_prev = self.last_year + self.first_month_of_financial_year
  315. if self.first_month_of_financial_year > self.current_month:
  316. filter_from = self.last_year + self.first_month_of_financial_year
  317. filter_prev = self.last_year2 + self.first_month_of_financial_year
  318. filter_to = self.current_year + self.current_month
  319. filter_opening = self.current_year + "00"
  320. filter_prev_opening = self.last_year + "00"
  321. prev_year_closed = True
  322. df_opening_balance = self.df_bookings[(self.df_bookings["Bookkeep Period"] == filter_opening)]
  323. if df_opening_balance.shape[0] == 0:
  324. df_opening_balance = self.df_bookings[
  325. (self.df_bookings["Bookkeep Period"] == filter_prev_opening)
  326. | (
  327. (self.df_bookings["Bookkeep Period"] >= filter_prev)
  328. & (self.df_bookings["Bookkeep Period"] < filter_from)
  329. )
  330. ].copy()
  331. df_opening_balance["Bookkeep Period"] = filter_opening
  332. prev_year_closed = False
  333. df_opening_balance = df_opening_balance[(df_opening_balance["Konto_Nr_Händler"].str.contains(r"-[013]\d\d+-"))]
  334. opening_balance = df_opening_balance["amount"].aggregate("sum").round(2)
  335. logging.info("Gewinn/Verlustvortrag")
  336. logging.info(opening_balance)
  337. if not prev_year_closed:
  338. row = {
  339. "Konto_Nr_Händler": "01-01-0861-00-00-00",
  340. "Bookkeep Period": filter_opening,
  341. "Debit Amount": opening_balance * -1,
  342. "Credit Amount": 0,
  343. "Debit Quantity": 0,
  344. "Credit Quantity": 0,
  345. "amount": opening_balance * -1,
  346. }
  347. df_opening_balance = pd.concat([df_opening_balance, pd.DataFrame.from_records([row])])
  348. df_filtered = self.df_bookings[
  349. (self.df_bookings["Bookkeep Period"] >= filter_from) & (self.df_bookings["Bookkeep Period"] <= filter_to)
  350. ]
  351. # Buchungen kopieren und als Statistikkonten anhängen
  352. df_stats = df_filtered.copy()
  353. # df_stats = df_stats[df_stats['Konto_Nr_Händler'].str.match(r'-[24578]\d\d\d-')]
  354. df_stats["Konto_Nr_Händler"] = df_stats["Konto_Nr_Händler"].str.replace(r"-(\d\d\d+)-", r"-\1_STK-", regex=True)
  355. df_stats["amount"] = (df_filtered["Debit Quantity"] + df_filtered["Credit Quantity"]).round(2)
  356. df_combined = pd.concat([df_opening_balance, df_filtered, df_stats])
  357. # Spalten konvertieren
  358. df_combined["period"] = df_combined["Bookkeep Period"].apply(lambda x: self.bookkeep_filter[x])
  359. return df_combined[df_combined["amount"] != 0.00]
  360. @property
  361. def export_filename(self):
  362. return self.export_filename_for_period(self.current_year, self.current_month)
  363. @property
  364. def export_info_dir(self):
  365. return f"{self.base_dir}/Export/{self.current_year}/info/"
  366. def export_filename_for_period(self, year, month):
  367. return f"{self.base_dir}/Export/{year}/export_{year}-{month}.xml"
  368. def export_xml(self, records, bk_filter, period_no, makes, sites, main_site):
  369. record_elements = ACCOUNT_INFO + ["Decimals"] + list(bk_filter.values())[:period_no] + ["CumulatedYear"]
  370. root = ET.Element("HbvData")
  371. h = ET.SubElement(root, "Header")
  372. for k, v in self.header(makes, sites, main_site).items():
  373. ET.SubElement(h, k).text = str(v)
  374. make_list = ET.SubElement(root, "MakeList")
  375. for m in makes:
  376. e = ET.SubElement(make_list, "MakeListEntry")
  377. ET.SubElement(e, "Make").text = m["Make"]
  378. ET.SubElement(e, "MakeCode").text = m["Marke_HBV"]
  379. bm_code_list = ET.SubElement(root, "BmCodeList")
  380. for s in sites:
  381. e = ET.SubElement(bm_code_list, "BmCodeEntry")
  382. ET.SubElement(e, "Make").text = s["Make"]
  383. ET.SubElement(e, "Site").text = s["Site"]
  384. ET.SubElement(e, "BmCode").text = s["Standort_HBV"]
  385. record_list = ET.SubElement(root, "RecordList")
  386. for row in records:
  387. record = ET.SubElement(record_list, "Record")
  388. for e in record_elements:
  389. child = ET.SubElement(record, e)
  390. field = row.get(e, 0.0)
  391. if str(field) == "nan":
  392. field = "0"
  393. elif type(field) is float:
  394. field = "{:.0f}".format(field * 100)
  395. child.text = str(field)
  396. with open(self.export_filename, "w", encoding="utf-8") as fwh:
  397. fwh.write(minidom.parseString(ET.tostring(root)).toprettyxml(indent=" "))
  398. def convert_to_row(self, node):
  399. return [child.text for child in node]
  400. def convert_xml_to_csv(self, xmlfile, csvfile):
  401. record_list = ET.parse(xmlfile).getroot().find("RecordList")
  402. header = [child.tag for child in record_list.find("Record")]
  403. bookings = [self.convert_to_row(node) for node in record_list.findall("Record")]
  404. with open(csvfile, "w") as fwh:
  405. cwh = csv.writer(fwh, delimiter=";")
  406. cwh.writerow(header)
  407. cwh.writerows(bookings)
  408. return True
  409. def convert_csv_to_xml(self, csvfile, xmlfile):
  410. makes = [{"Make": "01", "Marke_HBV": "1844"}]
  411. sites = [{"Make": "01", "Site": "01", "Marke_HBV": "1844"}]
  412. with open(csvfile, "r", encoding="latin-1") as frh:
  413. csv_reader = csv.DictReader(frh, delimiter=";")
  414. self.export_xml(csv_reader, self.bookkeep_filter(), 1, makes, sites, sites[0]["Standort_HBV"], xmlfile)
  415. def gchr_local():
  416. base_dir = os.getcwd() + "/../GCHR2_Testdaten/Kunden"
  417. for path in Path(base_dir).glob("*"):
  418. if path.is_dir():
  419. print(path.name)
  420. gchr_export(str(path))
  421. def gchr_export(base_dir):
  422. gchr = GCHR(base_dir)
  423. gchr.export_all_periods(overwrite=True, today="2022-08-01")
  424. if __name__ == "__main__":
  425. gchr_local()
  426. # import cProfile
  427. # cProfile.run(
  428. # "gchr_local()",
  429. # "gchr_local.prof",
  430. # )