gchr.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560
  1. import csv
  2. import logging
  3. import os
  4. import xml.etree.ElementTree as ET
  5. from datetime import datetime
  6. from pathlib import Path
  7. from xml.dom import minidom
  8. import numpy as np
  9. import pandas as pd
  10. ACCOUNT_INFO = [
  11. "Account",
  12. "Make",
  13. "Site",
  14. "Origin",
  15. "SalesChannel",
  16. "CostCarrier",
  17. "CostAccountingString",
  18. ]
  19. TRANSLATE = [
  20. "Konto_Nr_Händler",
  21. "Konto_Nr_SKR51",
  22. "Marke",
  23. "Standort",
  24. "Konto_Nr",
  25. "Kostenstelle",
  26. "Absatzkanal",
  27. "Kostenträger",
  28. "Kontoart",
  29. "Konto_1",
  30. "KRM",
  31. "IsNumeric",
  32. ]
  33. class GCHR:
  34. booking_date: datetime
  35. df_bookings: pd.DataFrame = None
  36. df_translate: pd.DataFrame = None
  37. df_translate2: pd.DataFrame = None
  38. makes: dict[str, str] = None
  39. sites: dict[str, str] = None
  40. def __init__(self, base_dir) -> None:
  41. self.base_dir = base_dir
  42. self.account_translation = f"{self.base_dir}/data/Kontenrahmen_uebersetzt.csv"
  43. self.account_bookings = list(Path(self.base_dir).joinpath("data").glob("GuV_Bilanz_Salden*.csv"))
  44. self.first_month_of_financial_year = "10"
  45. pd.set_option("display.max_rows", 500)
  46. pd.set_option("display.float_format", lambda x: "%.2f" % x)
  47. def set_bookkeep_period(self, year, month):
  48. self.current_year = year
  49. self.current_month = month
  50. period = f"{year}-{month}"
  51. prot_file = f"{self.export_info_dir}/protokoll_{period}.log"
  52. logging.basicConfig(
  53. filename=prot_file,
  54. filemode="w",
  55. encoding="utf-8",
  56. level=logging.DEBUG,
  57. force=True,
  58. )
  59. self.debug_file = f"{self.export_info_dir}/debug_{period}.csv"
  60. self.account_ignored = f"{self.export_info_dir}/ignoriert_{period}.csv"
  61. # self.account_invalid = f"{self.export_info_dir}/ungueltig_{period}.csv"
  62. self.last_year = str(int(self.current_year) - 1)
  63. self.last_year2 = str(int(self.current_year) - 2)
  64. self.next_year = str(int(self.current_year) + 1)
  65. def header(self, makes_used, sites_used, main_site):
  66. return {
  67. "Country": "DE",
  68. "MainBmCode": main_site,
  69. "Month": self.current_month,
  70. "Year": self.current_year,
  71. "Currency": "EUR",
  72. "NumberOfMakes": len(makes_used),
  73. "NumberOfSites": len(sites_used),
  74. "ExtractionDate": self.booking_date.strftime("%d.%m.%Y"),
  75. "ExtractionTime": self.booking_date.strftime("%H:%M:%S"),
  76. "BeginFiscalYear": self.first_month_of_financial_year,
  77. }
  78. @property
  79. def bookkeep_filter(self):
  80. period = [self.current_year + str(i).zfill(2) for i in range(1, 13)]
  81. if self.first_month_of_financial_year != "01":
  82. if self.first_month_of_financial_year > self.current_month:
  83. period = [self.last_year + str(i).zfill(2) for i in range(1, 13)] + period
  84. else:
  85. period = period + [self.next_year + str(i).zfill(2) for i in range(1, 13)]
  86. fm = int(self.first_month_of_financial_year)
  87. period = period[fm - 1 : fm + 12]
  88. period = [self.current_year + "00"] + period
  89. rename_to = ["OpeningBalance"] + ["Period" + str(i).zfill(2) for i in range(1, 13)]
  90. return dict(zip(period, rename_to))
  91. def extract_acct_info(self, df: pd.DataFrame):
  92. acct_info = [
  93. "Marke",
  94. "Standort",
  95. "Konto_Nr",
  96. "Kostenstelle",
  97. "Absatzkanal",
  98. "Kostenträger",
  99. ]
  100. df["HasFiveDashes"] = df["Konto_Nr_SKR51"].str.count("-") == 5
  101. df["Invalid"] = "XX-XX-XXXX-XX-XX-XX"
  102. df["Konto_Nr_SKR51"] = np.where(
  103. df["HasFiveDashes"],
  104. df["Konto_Nr_SKR51"],
  105. df["Invalid"],
  106. )
  107. df[acct_info] = df["Konto_Nr_SKR51"].str.split(pat="-", n=6, expand=True)
  108. return df
  109. def export_all_periods(self, overwrite=False, today=None):
  110. dt = datetime.now()
  111. if today is not None:
  112. dt = datetime.fromisoformat(today)
  113. prev = str(dt.year - 1)
  114. periods = [(prev, str(x).zfill(2)) for x in range(dt.month, 13)] + [
  115. (str(dt.year), str(x).zfill(2)) for x in range(1, dt.month)
  116. ]
  117. for year, month in periods:
  118. filename = self.export_filename_for_period(year, month)
  119. if overwrite or not Path(filename).exists():
  120. os.makedirs(Path(filename).parent.joinpath("info"), exist_ok=True)
  121. self.export_period(year, month)
  122. def export_period(self, year, month):
  123. self.set_bookkeep_period(year, month)
  124. # Übersetzungstabelle laden
  125. self.get_translation()
  126. # Kontensalden laden
  127. df_bookings = self.filter_bookings()
  128. all_periods = set(df_bookings["Bookkeep Period"].to_list())
  129. bookkeep_period_date = datetime(int(year), int(month), 28)
  130. if df_bookings.shape[0] == 0 or len(all_periods) <= 1 or self.booking_date < bookkeep_period_date:
  131. logging.error("ABBRUCH!!! Keine Daten vorhanden!")
  132. return False
  133. filter_to = self.current_year + self.current_month
  134. period_no = list(self.bookkeep_filter.keys()).index(filter_to) + 1
  135. logging.info("df_bookings: " + str(df_bookings.shape))
  136. # Join auf Übersetzung
  137. df_combined = df_bookings.merge(self.df_translate, how="inner", on="Konto_Nr_Händler")
  138. logging.info(f"df_combined: {df_combined.shape}")
  139. df_pivot = df_combined.pivot_table(
  140. index=["Konto_Nr_SKR51"],
  141. columns=["period"],
  142. values="amount",
  143. aggfunc="sum",
  144. margins=True,
  145. margins_name="CumulatedYear",
  146. )
  147. df_pivot.drop(index="CumulatedYear", inplace=True)
  148. logging.info("df_pivot: " + str(df_pivot.shape))
  149. df = df_pivot.merge(self.df_translate2, how="inner", on="Konto_Nr_SKR51")
  150. makes_used = sorted(list(set(df["Marke"].to_list())))
  151. sites_used = sorted(list(set((df["Marke"] + "-" + df["Standort"]).to_list())))
  152. from_label = ["Marke", "Standort", "Konto_Nr", "Kostenstelle", "Absatzkanal", "Kostenträger", "KRM"]
  153. to_label = ["Make", "Site", "Account", "Origin", "SalesChannel", "CostCarrier", "CostAccountingString"]
  154. col_dict = dict(zip(from_label, to_label))
  155. df = df.rename(columns=col_dict)
  156. export_csv = self.export_filename[:-4] + ".csv"
  157. df.to_csv(export_csv, decimal=",", sep=";", encoding="latin-1", index=False)
  158. df = df[df["IsNumeric"] != False].groupby(ACCOUNT_INFO, as_index=False).aggregate("sum")
  159. # Infos ergänzen
  160. df["Decimals"] = 2
  161. # df.sort_values(by=["Konto_Nr_SKR51"], inplace=True)
  162. logging.info(df.shape)
  163. main_sites = [self.sites[s] for s in sites_used if s in self.sites and self.sites[s] != "0000"]
  164. for i, main_site in enumerate(main_sites):
  165. filename = self.export_filename
  166. if i > 0:
  167. filename = f"{filename[:-4]}_{main_site}.xml"
  168. self.export_xml(
  169. df.to_dict(orient="records"),
  170. self.bookkeep_filter,
  171. period_no,
  172. makes_used,
  173. sites_used,
  174. main_site,
  175. filename,
  176. )
  177. # Join auf Übersetzung - nicht zugeordnet
  178. df_ignored = df_bookings.merge(self.df_translate, how="left", on="Konto_Nr_Händler")
  179. df_ignored = df_ignored[df_ignored["Konto_Nr_SKR51"].isna()]
  180. if not df_ignored.empty:
  181. df_ignored = df_ignored.pivot_table(
  182. index=["Konto_Nr_Händler"],
  183. columns=["period"],
  184. values="amount",
  185. aggfunc="sum",
  186. margins=True,
  187. margins_name="CumulatedYear",
  188. )
  189. df_ignored.to_csv(self.account_ignored, decimal=",", sep=";", encoding="latin-1")
  190. return self.export_filename
  191. def get_translation(self):
  192. if self.df_translate is None:
  193. df_translate_import = pd.read_csv(
  194. self.account_translation,
  195. decimal=",",
  196. sep=";",
  197. encoding="latin-1",
  198. converters={i: str for i in range(0, 200)},
  199. ).reset_index()
  200. df_makes = df_translate_import[["Marke", "Marke_HBV"]].copy().drop_duplicates()
  201. df_makes = df_makes[df_makes["Marke_HBV"] != "0000"]
  202. self.makes = dict([(e["Marke"], e["Marke_HBV"]) for e in df_makes.to_dict(orient="records")])
  203. self.makes["99"] = "0000"
  204. df_sites = df_translate_import[["Marke", "Standort", "Standort_HBV"]].copy().drop_duplicates()
  205. df_sites["Standort_HBV"] = np.where(
  206. df_sites["Standort_HBV"].str.len() != 6, "0000", df_sites["Standort_HBV"]
  207. )
  208. self.sites = dict(
  209. [(e["Marke"] + "-" + e["Standort"], e["Standort_HBV"]) for e in df_sites.to_dict(orient="records")]
  210. )
  211. df_prepared = self.prepare_translation(df_translate_import)
  212. self.df_translate = self.special_translation(df_prepared)
  213. self.df_translate2 = (
  214. self.df_translate.drop(columns=["Konto_Nr_Händler"])
  215. .copy()
  216. .drop_duplicates()
  217. .set_index("Konto_Nr_SKR51")
  218. )
  219. return self.df_translate
  220. def prepare_translation(self, df_translate_import: pd.DataFrame):
  221. df_translate = df_translate_import[
  222. [
  223. "Konto_Nr_Händler",
  224. "Konto_Nr_SKR51",
  225. ]
  226. ].drop_duplicates()
  227. logging.info(df_translate.shape)
  228. row = {
  229. "Konto_Nr_Händler": "01-01-0861-00-00-00",
  230. "Konto_Nr_SKR51": "01-01-0861-00-00-00",
  231. }
  232. df_translate = pd.concat([df_translate, pd.DataFrame.from_records([row])])
  233. df_translate.set_index("Konto_Nr_Händler")
  234. return df_translate
  235. def special_translation(self, df: pd.DataFrame):
  236. df["Konto_Nr_Händler"] = df["Konto_Nr_Händler"].str.upper()
  237. df["Konto_Nr_SKR51"] = df["Konto_Nr_SKR51"].str.upper()
  238. df = self.extract_acct_info(df)
  239. df["Konto_Nr"] = df["Konto_Nr"].str.upper()
  240. logging.info(df.shape)
  241. logging.info(df.columns)
  242. logging.info(df.head())
  243. logging.info("df: " + str(df.shape))
  244. df["Bilanz"] = df["Konto_Nr"].str.match(r"^[013]")
  245. df["Kontoart"] = np.where(df["Bilanz"], "1", "2")
  246. df["Kontoart"] = np.where(df["Konto_Nr"].str.contains("_STK"), "3", df["Kontoart"])
  247. df["Kontoart"] = np.where(df["Konto_Nr"].str.match(r"^[9]"), "3", df["Kontoart"])
  248. df["Konto_1"] = df["Konto_Nr"].str.slice(0, 1)
  249. # fehlende Marken- und Standortzuordnung
  250. df["Marke"] = np.where(df["Marke"].isin(self.makes.keys()), df["Marke"], "99")
  251. df["Marke_Standort"] = df["Marke"] + "-" + df["Standort"]
  252. df["Standort"] = np.where(df["Marke_Standort"].isin(self.sites.keys()), df["Standort"], "01")
  253. df_debug = df.drop(columns=["Bilanz"])
  254. logging.info(df_debug.groupby(["Kontoart"]).aggregate("sum"))
  255. logging.info(df_debug.groupby(["Kontoart", "Konto_1"]).aggregate("sum"))
  256. logging.info(df_debug.groupby(["Konto_Nr"]).aggregate("sum"))
  257. df_debug.groupby(["Konto_Nr"]).aggregate("sum").to_csv(
  258. self.debug_file, decimal=",", sep=";", encoding="latin-1"
  259. )
  260. # Bereinigung GW-Kostenträger
  261. df["NW_Verkauf_1"] = (df["Konto_Nr"].str.match(r"^[78]0")) & (df["Kostenstelle"].str.match(r"^[^1]\d"))
  262. df["Kostenstelle"] = np.where(df["NW_Verkauf_1"] == True, "11", df["Kostenstelle"])
  263. df["Konto_7010"] = df["Konto_Nr"].str.match(r"^[78]01[01]")
  264. df["Kostenstelle"] = np.where(df["Konto_7010"] == True, "14", df["Kostenstelle"])
  265. df["GW_Verkauf_2"] = (df["Konto_Nr"].str.match(r"^[78]1")) & (df["Kostenstelle"].str.match(r"^[^2]\d"))
  266. df["Kostenstelle"] = np.where(df["GW_Verkauf_2"] == True, "21", df["Kostenstelle"])
  267. df["GW_Verkauf_3"] = (df["Konto_Nr"].str.match(r"^[78]3")) & (df["Kostenstelle"].str.match(r"^[^3]\d"))
  268. df["Kostenstelle"] = np.where(df["GW_Verkauf_3"] == True, "31", df["Kostenstelle"])
  269. df["GW_Verkauf_4"] = (df["Konto_Nr"].str.match(r"^[78]4")) & (df["Kostenstelle"].str.match(r"^[^4]\d"))
  270. df["Kostenstelle"] = np.where(df["GW_Verkauf_4"] == True, "41", df["Kostenstelle"])
  271. df["GW_Verkauf_x420"] = df["Konto_Nr"].str.match(r"^[78]420")
  272. df["Kostenstelle"] = np.where(df["GW_Verkauf_x420"] == True, "42", df["Kostenstelle"])
  273. df["GW_Verkauf_5"] = (df["Konto_Nr"].str.match(r"^[78]5")) & (df["Kostenstelle"].str.match(r"^[^5]\d"))
  274. df["Kostenstelle"] = np.where(df["GW_Verkauf_5"] == True, "51", df["Kostenstelle"])
  275. df["GW_Verkauf_50"] = (df["Konto_Nr"].str.match(r"^[78]")) & (df["Kostenstelle"].str.match(r"^2"))
  276. df["Kostenträger"] = np.where(df["GW_Verkauf_50"] == True, "52", df["Kostenträger"])
  277. df["Kostenträger"] = np.where(
  278. (df["GW_Verkauf_50"] == True) & (df["Marke"] == "01"),
  279. "50",
  280. df["Kostenträger"],
  281. )
  282. df["NW_Verkauf_00"] = (
  283. (df["Konto_Nr"].str.match(r"^[78]2"))
  284. & (df["Kostenstelle"].str.match(r"^1"))
  285. & (df["Kostenträger"].str.match(r"^[^01234]"))
  286. )
  287. df["Kostenträger"] = np.where(df["NW_Verkauf_00"] == True, "00", df["Kostenträger"])
  288. df["GW_Stk_50"] = (df["Konto_Nr"].str.match(r"^9130")) & (df["Kostenstelle"].str.match(r"^2"))
  289. df["Kostenträger"] = np.where(df["GW_Stk_50"] == True, "52", df["Kostenträger"])
  290. df["Kostenträger"] = np.where((df["GW_Stk_50"] == True) & (df["Marke"] == "01"), "50", df["Kostenträger"])
  291. df["Kostenträger"] = np.where(df["Bilanz"] == True, "00", df["Kostenträger"])
  292. df["Konto_5er"] = (df["Konto_Nr"].str.match("^5")) | (df["Konto_Nr"].str.match("^9143"))
  293. df["Absatzkanal"] = np.where(df["Konto_5er"] == True, "99", df["Absatzkanal"])
  294. df["Konto_5005"] = (df["Konto_Nr"].str.match("^5005")) & (df["Kostenstelle"].str.match(r"^[^12]"))
  295. df["Kostenstelle"] = np.where(df["Konto_5005"] == True, "20", df["Kostenstelle"])
  296. df["Kostenträger"] = np.where(df["Konto_5005"] == True, "50", df["Kostenträger"])
  297. df["Konto_5007"] = (df["Konto_Nr"].str.match("^5007")) & (df["Kostenstelle"].str.match(r"^([^4]|42)"))
  298. df["Kostenstelle"] = np.where(df["Konto_5007"] == True, "41", df["Kostenstelle"])
  299. df["Kostenträger"] = np.where(df["Konto_5007"] == True, "70", df["Kostenträger"])
  300. df["Konto_914er"] = (df["Konto_Nr"].str.match("^914[34]")) & (df["Kostenträger"].str.match(r"^[^7]"))
  301. df["Kostenträger"] = np.where(df["Konto_914er"] == True, "70", df["Kostenträger"])
  302. df["Teile_30_60"] = (
  303. (df["Konto_Nr"].str.match(r"^[578]"))
  304. & (df["Kostenstelle"].str.match(r"^[3]"))
  305. & (df["Kostenträger"].str.match(r"^[^6]"))
  306. )
  307. df["Kostenträger"] = np.where(df["Teile_30_60"] == True, "60", df["Kostenträger"])
  308. df["Service_40_70"] = (
  309. (df["Konto_Nr"].str.match(r"^[578]"))
  310. & (df["Kostenstelle"].str.match(r"^[4]"))
  311. & (df["Kostenträger"].str.match(r"^[^7]"))
  312. )
  313. df["Kostenträger"] = np.where(df["Service_40_70"] == True, "70", df["Kostenträger"])
  314. df["KRM"] = df["Marke"] + df["Standort"] + df["Kostenstelle"] + df["Absatzkanal"] + df["Kostenträger"]
  315. df["Konto_Nr_SKR51"] = (
  316. (df["Marke"] + "-" + df["Standort"] + "-" + df["Konto_Nr"])
  317. + "-"
  318. + (df["Kostenstelle"] + "-" + df["Absatzkanal"] + "-" + df["Kostenträger"])
  319. )
  320. df["IsNumeric"] = (
  321. (df["KRM"].str.isdigit())
  322. & (df["Konto_Nr"].str.isdigit())
  323. & (df["Konto_Nr"].str.len() == 4)
  324. # & (df["Konto_Nr_SKR51"].str.len() == 19)
  325. )
  326. df_invalid = df[df["IsNumeric"] == False]
  327. df_invalid.to_csv(self.export_invalid_filename, decimal=",", sep=";", encoding="latin-1", index=False)
  328. return df[df["IsNumeric"] == True][TRANSLATE]
  329. def load_bookings_from_file(self):
  330. df2 = []
  331. timestamps = []
  332. for csv_file in self.account_bookings:
  333. df2.append(
  334. pd.read_csv(
  335. csv_file,
  336. decimal=",",
  337. sep=";",
  338. encoding="latin-1",
  339. converters={0: str, 1: str},
  340. )
  341. )
  342. timestamps.append(Path(csv_file).stat().st_mtime)
  343. self.booking_date = datetime.fromtimestamp(max(timestamps))
  344. self.df_bookings = pd.concat(df2)
  345. self.df_bookings["amount"] = (self.df_bookings["Debit Amount"] + self.df_bookings["Credit Amount"]).round(2)
  346. def filter_bookings(self):
  347. if self.df_bookings is None:
  348. self.load_bookings_from_file()
  349. # Kontensalden auf gegebenen Monat filtern
  350. filter_from = self.current_year + self.first_month_of_financial_year
  351. filter_prev = self.last_year + self.first_month_of_financial_year
  352. if self.first_month_of_financial_year > self.current_month:
  353. filter_from = self.last_year + self.first_month_of_financial_year
  354. filter_prev = self.last_year2 + self.first_month_of_financial_year
  355. filter_to = self.current_year + self.current_month
  356. filter_opening = self.current_year + "00"
  357. filter_prev_opening = self.last_year + "00"
  358. prev_year_closed = True
  359. df_opening_balance = self.df_bookings[(self.df_bookings["Bookkeep Period"] == filter_opening)]
  360. if df_opening_balance.shape[0] == 0:
  361. df_opening_balance = self.df_bookings[
  362. (self.df_bookings["Bookkeep Period"] == filter_prev_opening)
  363. | (
  364. (self.df_bookings["Bookkeep Period"] >= filter_prev)
  365. & (self.df_bookings["Bookkeep Period"] < filter_from)
  366. )
  367. ].copy()
  368. df_opening_balance["Bookkeep Period"] = filter_opening
  369. prev_year_closed = False
  370. df_opening_balance = df_opening_balance[(df_opening_balance["Konto_Nr_Händler"].str.contains(r"-[013]\d\d+-"))]
  371. opening_balance = df_opening_balance["amount"].aggregate("sum").round(2)
  372. logging.info("Gewinn/Verlustvortrag")
  373. logging.info(opening_balance)
  374. if not prev_year_closed:
  375. row = {
  376. "Konto_Nr_Händler": "01-01-0861-00-00-00",
  377. "Bookkeep Period": filter_opening,
  378. "Debit Amount": opening_balance * -1,
  379. "Credit Amount": 0,
  380. "Debit Quantity": 0,
  381. "Credit Quantity": 0,
  382. "amount": opening_balance * -1,
  383. }
  384. df_opening_balance = pd.concat([df_opening_balance, pd.DataFrame.from_records([row])])
  385. df_filtered = self.df_bookings[
  386. (self.df_bookings["Bookkeep Period"] >= filter_from) & (self.df_bookings["Bookkeep Period"] <= filter_to)
  387. ]
  388. # Buchungen kopieren und als Statistikkonten anhängen
  389. df_stats = df_filtered.copy()
  390. # df_stats = df_stats[df_stats['Konto_Nr_Händler'].str.match(r'-[24578]\d\d\d-')]
  391. df_stats["Konto_Nr_Händler"] = df_stats["Konto_Nr_Händler"].str.replace(r"-(\d\d\d+)-", r"-\1_STK-", regex=True)
  392. df_stats["amount"] = (df_filtered["Debit Quantity"] + df_filtered["Credit Quantity"]).round(2)
  393. df_combined = pd.concat([df_opening_balance, df_filtered, df_stats])
  394. # Spalten konvertieren
  395. df_combined["period"] = df_combined["Bookkeep Period"].apply(lambda x: self.bookkeep_filter[x])
  396. return df_combined[df_combined["amount"] != 0.00]
  397. @property
  398. def export_filename(self):
  399. return self.export_filename_for_period(self.current_year, self.current_month)
  400. @property
  401. def export_info_dir(self):
  402. return f"{self.base_dir}/Export/{self.current_year}/info/"
  403. @property
  404. def export_invalid_filename(self):
  405. return f"{self.base_dir}/Export/ungueltig.csv"
  406. def export_filename_for_period(self, year, month):
  407. return f"{self.base_dir}/Export/{year}/export_{year}-{month}.xml"
  408. def export_xml(self, records, bk_filter, period_no, makes_used, sites_used, main_site, filename):
  409. record_elements = ACCOUNT_INFO + ["Decimals"] + list(bk_filter.values())[:period_no] + ["CumulatedYear"]
  410. root = ET.Element("HbvData")
  411. h = ET.SubElement(root, "Header")
  412. for k, v in self.header(makes_used, sites_used, main_site).items():
  413. ET.SubElement(h, k).text = str(v)
  414. make_list = ET.SubElement(root, "MakeList")
  415. for make in makes_used:
  416. if make not in self.makes:
  417. continue
  418. e = ET.SubElement(make_list, "MakeListEntry")
  419. ET.SubElement(e, "Make").text = make
  420. ET.SubElement(e, "MakeCode").text = self.makes[make]
  421. bm_code_list = ET.SubElement(root, "BmCodeList")
  422. for s in sites_used:
  423. make, site = s.split("-")
  424. if s not in self.sites:
  425. continue
  426. e = ET.SubElement(bm_code_list, "BmCodeEntry")
  427. ET.SubElement(e, "Make").text = make
  428. ET.SubElement(e, "Site").text = site
  429. ET.SubElement(e, "BmCode").text = self.sites[s]
  430. record_list = ET.SubElement(root, "RecordList")
  431. for row in records:
  432. record = ET.SubElement(record_list, "Record")
  433. for e in record_elements:
  434. child = ET.SubElement(record, e)
  435. field = row.get(e, 0.0)
  436. if str(field) == "nan":
  437. field = "0"
  438. elif type(field) is float:
  439. field = "{:.0f}".format(field * 100)
  440. child.text = str(field)
  441. with open(filename, "w", encoding="utf-8") as fwh:
  442. fwh.write(minidom.parseString(ET.tostring(root)).toprettyxml(indent=" "))
  443. def convert_to_row(self, node):
  444. return [child.text for child in node]
  445. def convert_xml_to_csv(self, xmlfile, csvfile):
  446. record_list = ET.parse(xmlfile).getroot().find("RecordList")
  447. header = [child.tag for child in record_list.find("Record")]
  448. bookings = [self.convert_to_row(node) for node in record_list.findall("Record")]
  449. with open(csvfile, "w") as fwh:
  450. cwh = csv.writer(fwh, delimiter=";")
  451. cwh.writerow(header)
  452. cwh.writerows(bookings)
  453. return True
  454. def convert_csv_to_xml(self, csvfile, xmlfile):
  455. self.makes = {"01": "1844"}
  456. self.sites = {"01-01": "1844"}
  457. with open(csvfile, "r", encoding="latin-1") as frh:
  458. csv_reader = csv.DictReader(frh, delimiter=";")
  459. self.export_xml(csv_reader, self.bookkeep_filter(), 1, list(self.sites.values())[0], xmlfile)
  460. def gchr_local():
  461. base_dir = os.getcwd() + "/../GCHR2_Testdaten/Kunden"
  462. for path in Path(base_dir).glob("*"):
  463. if path.is_dir():
  464. print(path.name)
  465. gchr_export(str(path))
  466. def gchr_export(base_dir):
  467. gchr = GCHR(base_dir)
  468. # gchr.export_all_periods(overwrite=True, today="2022-08-01")
  469. gchr.export_all_periods()
  470. if __name__ == "__main__":
  471. gchr_local()
  472. # import cProfile
  473. # cProfile.run(
  474. # "gchr_local()",
  475. # "gchr_local.prof",
  476. # )