gchr.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498
  1. import pandas as pd
  2. import numpy as np
  3. import xml.etree.ElementTree as ET
  4. import csv
  5. from xml.dom import minidom
  6. from datetime import datetime
  7. import logging
  8. from pathlib import Path
  9. import os
  10. from enum import Enum, auto
  11. ACCOUNT_INFO = [
  12. "Account",
  13. "Make",
  14. "Site",
  15. "Origin",
  16. "SalesChannel",
  17. "CostCarrier",
  18. "CostAccountingString",
  19. ]
  20. class GCHR:
  21. def __init__(self, base_dir) -> None:
  22. self.base_dir = base_dir
  23. self.account_translation = f"{self.base_dir}/data/Kontenrahmen_uebersetzt.csv"
  24. self.account_bookings = list(Path(self.base_dir).joinpath("data").glob("GuV_Bilanz_Salden*.csv"))
  25. self.first_month_of_financial_year = "09"
  26. pd.set_option("display.max_rows", 500)
  27. pd.set_option("display.float_format", lambda x: "%.2f" % x)
  28. def set_bookkeep_period(self, year, month):
  29. self.current_year = year
  30. self.current_month = month
  31. period = f"{year}-{month}"
  32. prot_file = f"{self.export_info_dir}/protokoll_{period}.log"
  33. logging.basicConfig(
  34. filename=prot_file,
  35. filemode="w",
  36. encoding="utf-8",
  37. level=logging.DEBUG,
  38. force=True,
  39. )
  40. self.debug_file = f"{self.export_info_dir}/debug_{period}.csv"
  41. self.account_ignored = f"{self.export_info_dir}/ignoriert_{period}.csv"
  42. self.account_invalid = f"{self.export_info_dir}/ungueltig_{period}.csv"
  43. self.last_year = str(int(self.current_year) - 1)
  44. self.last_year2 = str(int(self.current_year) - 2)
  45. self.next_year = str(int(self.current_year) + 1)
  46. def header(self, makes, sites):
  47. return {
  48. "Country": "DE",
  49. "MainBmCode": sites[0]["Standort_HBV"],
  50. "Month": self.current_month,
  51. "Year": self.current_year,
  52. "Currency": "EUR",
  53. "NumberOfMakes": len(makes),
  54. "NumberOfSites": len(sites),
  55. "ExtractionDate": datetime.now().strftime("%d.%m.%Y"),
  56. "ExtractionTime": datetime.now().strftime("%H:%M:%S"),
  57. "BeginFiscalYear": self.first_month_of_financial_year,
  58. }
  59. def bookkeep_filter(self):
  60. period = [self.current_year + str(i).zfill(2) for i in range(1, 13)]
  61. if self.first_month_of_financial_year != "01":
  62. if self.first_month_of_financial_year > self.current_month:
  63. period = [self.last_year + str(i).zfill(2) for i in range(1, 13)] + period
  64. else:
  65. period = period + [self.next_year + str(i).zfill(2) for i in range(1, 13)]
  66. fm = int(self.first_month_of_financial_year)
  67. period = period[fm - 1 : fm + 12]
  68. period = [self.current_year + "00"] + period
  69. rename_to = ["OpeningBalance"] + ["Period" + str(i).zfill(2) for i in range(1, 13)]
  70. return dict(zip(period, rename_to))
  71. def extract_acct_info(self, df: pd.DataFrame):
  72. acct_info = [
  73. "Marke",
  74. "Standort",
  75. "Konto_Nr",
  76. "Kostenstelle",
  77. "Absatzkanal",
  78. "Kostenträger",
  79. ]
  80. df["Konto_Nr_SKR51"] = df.index
  81. df[acct_info] = df["Konto_Nr_SKR51"].str.split(pat="-", n=6, expand=True)
  82. return df
  83. def export_all_periods(self):
  84. dt = datetime.now()
  85. prev = str(dt.year - 1)
  86. periods = [(prev, str(x).zfill(2)) for x in range(dt.month, 13)] + [
  87. (str(dt.year), str(x).zfill(2)) for x in range(1, dt.month)
  88. ]
  89. for year, month in periods:
  90. filename = self.export_filename_for_period(year, month)
  91. if not Path(filename).exists():
  92. os.makedirs(Path(filename).parent.joinpath("info"), exist_ok=True)
  93. self.export_period(year, month)
  94. def export_period(self, year, month):
  95. self.set_bookkeep_period(year, month)
  96. # Übersetzungstabelle laden
  97. df_translate = pd.read_csv(
  98. self.account_translation,
  99. decimal=",",
  100. sep=";",
  101. encoding="latin-1",
  102. converters={i: str for i in range(0, 200)},
  103. )
  104. logging.info(df_translate.shape)
  105. df_translate["duplicated"] = df_translate.duplicated()
  106. logging.info(df_translate[df_translate["duplicated"]])
  107. df_translate = df_translate[
  108. [
  109. "Konto_Nr_Händler",
  110. "Konto_Nr_SKR51",
  111. "Marke",
  112. "Marke_HBV",
  113. "Standort",
  114. "Standort_HBV",
  115. ]
  116. ]
  117. row = (
  118. df_translate[["Marke", "Marke_HBV", "Standort", "Standort_HBV"]]
  119. .drop_duplicates()
  120. .sort_values(by=["Marke", "Standort"])
  121. .iloc[:1]
  122. .to_dict(orient="records")[0]
  123. )
  124. row["Konto_Nr_Händler"] = "01-01-0861-00-00-00"
  125. row["Konto_Nr_SKR51"] = "01-01-0861-00-00-00"
  126. df_translate = pd.concat([df_translate, pd.DataFrame.from_records([row])])
  127. # print(df_translate.tail())
  128. # df_translate.drop(columns=['duplicated'], inplace=True)
  129. df_translate.drop_duplicates(inplace=True)
  130. df_translate.set_index("Konto_Nr_Händler")
  131. # Kontensalden laden
  132. df2 = []
  133. for csv_file in self.account_bookings:
  134. df2.append(
  135. pd.read_csv(
  136. csv_file,
  137. decimal=",",
  138. sep=";",
  139. encoding="latin-1",
  140. converters={0: str, 1: str},
  141. )
  142. )
  143. df_bookings = pd.concat(df2)
  144. # Kontensalden auf gegebenen Monat filtern
  145. filter_from = self.current_year + self.first_month_of_financial_year
  146. filter_prev = self.last_year + self.first_month_of_financial_year
  147. if self.first_month_of_financial_year > self.current_month:
  148. filter_from = self.last_year + self.first_month_of_financial_year
  149. filter_prev = self.last_year2 + self.first_month_of_financial_year
  150. filter_to = self.current_year + self.current_month
  151. filter_opening = self.current_year + "00"
  152. filter_prev_opening = self.last_year + "00"
  153. prev_year_closed = True
  154. df_opening_balance = df_bookings[(df_bookings["Bookkeep Period"] == filter_opening)]
  155. if df_opening_balance.shape[0] == 0:
  156. df_opening_balance = df_bookings[
  157. (df_bookings["Bookkeep Period"] == filter_prev_opening)
  158. | ((df_bookings["Bookkeep Period"] >= filter_prev) & (df_bookings["Bookkeep Period"] < filter_from))
  159. ].copy()
  160. df_opening_balance["Bookkeep Period"] = filter_opening
  161. prev_year_closed = False
  162. # df_opening_balance = df_opening_balance.merge(df_translate, how='inner', on='Konto_Nr_Händler')
  163. df_opening_balance = df_opening_balance[(df_opening_balance["Konto_Nr_Händler"].str.contains(r"-[013]\d\d+-"))]
  164. df_opening_balance["amount"] = (df_opening_balance["Debit Amount"] + df_opening_balance["Credit Amount"]).round(
  165. 2
  166. )
  167. # df_opening_balance.drop(columns=['Debit Amount', 'Credit Amount', 'Debit Quantity', 'Credit Quantity'], inplace=True)
  168. # df_opening_balance = df_opening_balance.groupby(['Marke', 'Standort']).sum()
  169. opening_balance = df_opening_balance["amount"].aggregate("sum").round(2)
  170. logging.info("Gewinn/Verlustvortrag")
  171. logging.info(opening_balance)
  172. if not prev_year_closed:
  173. row = {
  174. "Konto_Nr_Händler": "01-01-0861-00-00-00",
  175. "Bookkeep Period": filter_opening,
  176. "Debit Amount": opening_balance * -1,
  177. "Credit Amount": 0,
  178. "Debit Quantity": 0,
  179. "Credit Quantity": 0,
  180. "amount": opening_balance * -1,
  181. }
  182. df_opening_balance = pd.concat([df_opening_balance, pd.DataFrame.from_records([row])])
  183. df_bookings = df_bookings[
  184. (df_bookings["Bookkeep Period"] >= filter_from) & (df_bookings["Bookkeep Period"] <= filter_to)
  185. ]
  186. df_bookings["amount"] = (df_bookings["Debit Amount"] + df_bookings["Credit Amount"]).round(2)
  187. df_stats = df_bookings.copy()
  188. # df_stats = df_stats[df_stats['Konto_Nr_Händler'].str.match(r'-[24578]\d\d\d-')]
  189. df_stats["Konto_Nr_Händler"] = df_stats["Konto_Nr_Händler"].str.replace(r"-(\d\d\d+)-", r"-\1_STK-", regex=True)
  190. df_stats["amount"] = (df_bookings["Debit Quantity"] + df_bookings["Credit Quantity"]).round(2)
  191. df_bookings = pd.concat([df_opening_balance, df_bookings, df_stats])
  192. df_bookings = df_bookings[df_bookings["amount"] != 0.00]
  193. if df_bookings.shape[0] == 0:
  194. logging.error("ABBRUCH!!! Keine Daten vorhanden!")
  195. return False
  196. bk_filter = self.bookkeep_filter()
  197. period_no = list(bk_filter.keys()).index(filter_to) + 1
  198. # Spalten konvertieren
  199. df_bookings["period"] = df_bookings["Bookkeep Period"].apply(lambda x: bk_filter[x])
  200. logging.info("df_bookings: " + str(df_bookings.shape))
  201. # Join auf Übersetzung
  202. df_combined = df_bookings.merge(df_translate, how="inner", on="Konto_Nr_Händler")
  203. logging.info(f"df_combined: {df_combined.shape}")
  204. # Hack für fehlende Markenzuordnung
  205. df_combined["Fremdmarke"] = df_combined["Marke_HBV"].str.match(r"^0000")
  206. df_combined["Marke"] = np.where(df_combined["Fremdmarke"], "99", df_combined["Marke"])
  207. df_combined["Standort_egal"] = df_combined["Standort_HBV"].str.match(r"^\d\d_")
  208. df_combined["Standort_HBV"] = np.where(
  209. df_combined["Fremdmarke"] | df_combined["Standort_egal"],
  210. "0000",
  211. df_combined["Standort_HBV"],
  212. )
  213. makes = df_combined[["Marke", "Marke_HBV"]].drop_duplicates().sort_values(by=["Marke"])
  214. sites = (
  215. df_combined[["Marke", "Standort", "Standort_HBV"]].drop_duplicates().sort_values(by=["Marke", "Standort"])
  216. )
  217. # df_combined.to_csv(account_invalid, decimal=',', sep=';', encoding='latin-1', index=False)
  218. # Gruppieren
  219. # df_grouped = df_combined.groupby(['Konto_Nr_SKR51', 'period']).sum()
  220. df = df_combined.pivot_table(
  221. index=["Konto_Nr_SKR51"],
  222. columns=["period"],
  223. values="amount",
  224. aggfunc=np.sum,
  225. margins=True,
  226. margins_name="CumulatedYear",
  227. )
  228. logging.info("df_pivot: " + str(df.shape))
  229. df = self.extract_acct_info(df)
  230. # df = df_translate.reset_index(drop=True).drop(columns=['Kostenträger_Ebene']).drop_duplicates()
  231. logging.info(df.shape)
  232. logging.info(df.columns)
  233. logging.info(df.head())
  234. # df = df.merge(df_translate, how='inner', on='Konto_Nr_SKR51')
  235. logging.info("df: " + str(df.shape))
  236. df["Bilanz"] = df["Konto_Nr"].str.match(r"^[013]")
  237. df["Kontoart"] = np.where(df["Bilanz"], "1", "2")
  238. df["Kontoart"] = np.where(df["Konto_Nr"].str.contains("_STK"), "3", df["Kontoart"])
  239. df["Kontoart"] = np.where(df["Konto_Nr"].str.match(r"^[9]"), "3", df["Kontoart"])
  240. df["Konto_1"] = df["Konto_Nr"].str.slice(0, 1)
  241. # Hack für fehlende Markenzuordnung
  242. df = df.merge(makes, how="left", on="Marke")
  243. df["Marke"] = np.where(df["Marke_HBV"].isna(), "99", df["Marke"])
  244. df_debug = df.drop(columns=["Bilanz"])
  245. logging.info(df_debug.groupby(["Kontoart"]).aggregate("sum"))
  246. logging.info(df_debug.groupby(["Kontoart", "Konto_1"]).aggregate("sum"))
  247. logging.info(df_debug.groupby(["Konto_Nr"]).aggregate("sum"))
  248. df_debug.groupby(["Konto_Nr"]).aggregate("sum").to_csv(
  249. self.debug_file, decimal=",", sep=";", encoding="latin-1"
  250. )
  251. # Bereinigung GW-Kostenträger
  252. df["NW_Verkauf_1"] = (df["Konto_Nr"].str.match(r"^[78]0")) & (df["Kostenstelle"].str.match(r"^[^1]\d"))
  253. df["Kostenstelle"] = np.where(df["NW_Verkauf_1"] == True, "11", df["Kostenstelle"])
  254. df["Konto_7010"] = df["Konto_Nr"].str.match(r"^[78]01[01]")
  255. df["Kostenstelle"] = np.where(df["Konto_7010"] == True, "14", df["Kostenstelle"])
  256. df["GW_Verkauf_2"] = (df["Konto_Nr"].str.match(r"^[78]1")) & (df["Kostenstelle"].str.match(r"^[^2]\d"))
  257. df["Kostenstelle"] = np.where(df["GW_Verkauf_2"] == True, "21", df["Kostenstelle"])
  258. df["GW_Verkauf_3"] = (df["Konto_Nr"].str.match(r"^[78]3")) & (df["Kostenstelle"].str.match(r"^[^3]\d"))
  259. df["Kostenstelle"] = np.where(df["GW_Verkauf_3"] == True, "31", df["Kostenstelle"])
  260. df["GW_Verkauf_4"] = (df["Konto_Nr"].str.match(r"^[78]4")) & (df["Kostenstelle"].str.match(r"^[^4]\d"))
  261. df["Kostenstelle"] = np.where(df["GW_Verkauf_4"] == True, "41", df["Kostenstelle"])
  262. df["GW_Verkauf_x420"] = df["Konto_Nr"].str.match(r"^[78]420")
  263. df["Kostenstelle"] = np.where(df["GW_Verkauf_x420"] == True, "42", df["Kostenstelle"])
  264. df["GW_Verkauf_5"] = (df["Konto_Nr"].str.match(r"^[78]5")) & (df["Kostenstelle"].str.match(r"^[^5]\d"))
  265. df["Kostenstelle"] = np.where(df["GW_Verkauf_5"] == True, "51", df["Kostenstelle"])
  266. df["GW_Verkauf_50"] = (df["Konto_Nr"].str.match(r"^[78]")) & (df["Kostenstelle"].str.match(r"^2"))
  267. df["Kostenträger"] = np.where(df["GW_Verkauf_50"] == True, "52", df["Kostenträger"])
  268. df["Kostenträger"] = np.where(
  269. (df["GW_Verkauf_50"] == True) & (df["Marke"] == "01"),
  270. "50",
  271. df["Kostenträger"],
  272. )
  273. df["NW_Verkauf_00"] = (
  274. (df["Konto_Nr"].str.match(r"^[78]2"))
  275. & (df["Kostenstelle"].str.match(r"^1"))
  276. & (df["Kostenträger"].str.match(r"^[^01234]"))
  277. )
  278. df["Kostenträger"] = np.where(df["NW_Verkauf_00"] == True, "00", df["Kostenträger"])
  279. df["GW_Stk_50"] = (df["Konto_Nr"].str.match(r"^9130")) & (df["Kostenstelle"].str.match(r"^2"))
  280. df["Kostenträger"] = np.where(df["GW_Stk_50"] == True, "52", df["Kostenträger"])
  281. df["Kostenträger"] = np.where((df["GW_Stk_50"] == True) & (df["Marke"] == "01"), "50", df["Kostenträger"])
  282. df["Kostenträger"] = np.where(df["Bilanz"] == True, "00", df["Kostenträger"])
  283. df["Konto_5er"] = (df["Konto_Nr"].str.match("^5")) | (df["Konto_Nr"].str.match("^9143"))
  284. df["Absatzkanal"] = np.where(df["Konto_5er"] == True, "99", df["Absatzkanal"])
  285. df["Konto_5005"] = (df["Konto_Nr"].str.match("^5005")) & (df["Kostenstelle"].str.match(r"^[^12]"))
  286. df["Kostenstelle"] = np.where(df["Konto_5005"] == True, "20", df["Kostenstelle"])
  287. df["Kostenträger"] = np.where(df["Konto_5005"] == True, "50", df["Kostenträger"])
  288. df["Konto_5007"] = (df["Konto_Nr"].str.match("^5007")) & (df["Kostenstelle"].str.match(r"^([^4]|42)"))
  289. df["Kostenstelle"] = np.where(df["Konto_5007"] == True, "41", df["Kostenstelle"])
  290. df["Kostenträger"] = np.where(df["Konto_5007"] == True, "70", df["Kostenträger"])
  291. df["Konto_914er"] = (df["Konto_Nr"].str.match("^914[34]")) & (df["Kostenträger"].str.match(r"^[^7]"))
  292. df["Kostenträger"] = np.where(df["Konto_914er"] == True, "70", df["Kostenträger"])
  293. df["Teile_30_60"] = (
  294. (df["Konto_Nr"].str.match(r"^[578]"))
  295. & (df["Kostenstelle"].str.match(r"^[3]"))
  296. & (df["Kostenträger"].str.match(r"^[^6]"))
  297. )
  298. df["Kostenträger"] = np.where(df["Teile_30_60"] == True, "60", df["Kostenträger"])
  299. df["Service_40_70"] = (
  300. (df["Konto_Nr"].str.match(r"^[578]"))
  301. & (df["Kostenstelle"].str.match(r"^[4]"))
  302. & (df["Kostenträger"].str.match(r"^[^7]"))
  303. )
  304. df["Kostenträger"] = np.where(df["Service_40_70"] == True, "70", df["Kostenträger"])
  305. from_label = [
  306. "Marke",
  307. "Standort",
  308. "Konto_Nr",
  309. "Kostenstelle",
  310. "Absatzkanal",
  311. "Kostenträger",
  312. ]
  313. to_label = ["Make", "Site", "Account", "Origin", "SalesChannel", "CostCarrier"]
  314. df = df.rename(columns=dict(zip(from_label, to_label)))
  315. makes = makes.rename(columns=dict(zip(from_label, to_label))).to_dict(orient="records")
  316. sites = sites.rename(columns=dict(zip(from_label, to_label))).to_dict(orient="records")
  317. df["CostAccountingString"] = df["Make"] + df["Site"] + df["Origin"] + df["SalesChannel"] + df["CostCarrier"]
  318. df["IsNumeric"] = (
  319. (df["CostAccountingString"].str.isdigit()) & (df["Account"].str.isdigit()) & (df["Account"].str.len() == 4)
  320. )
  321. df_invalid = df[df["IsNumeric"] == False]
  322. df_invalid.to_csv(self.account_invalid, decimal=",", sep=";", encoding="latin-1", index=False)
  323. export_csv = self.export_filename[:-4] + ".csv"
  324. df.to_csv(export_csv, decimal=",", sep=";", encoding="latin-1", index=False)
  325. df = df[df["IsNumeric"] != False].groupby(ACCOUNT_INFO, as_index=False).aggregate("sum")
  326. # Infos ergänzen
  327. df["Decimals"] = 2
  328. # df['OpeningBalance'] = 0.0
  329. logging.info(df.shape)
  330. self.export_xml(df.to_dict(orient="records"), bk_filter, period_no, makes, sites)
  331. # Join auf Übersetzung - nicht zugeordnet
  332. df_ignored = df_bookings.merge(df_translate, how="left", on="Konto_Nr_Händler")
  333. df_ignored = df_ignored[
  334. df_ignored["Konto_Nr_SKR51"].isna()
  335. ] # [['Konto_Nr_Händler', 'Bookkeep Period', 'amount', 'quantity']]
  336. if not df_ignored.empty:
  337. df_ignored = df_ignored.pivot_table(
  338. index=["Konto_Nr_Händler"],
  339. columns=["period"],
  340. values="amount",
  341. aggfunc=np.sum,
  342. margins=True,
  343. margins_name="CumulatedYear",
  344. )
  345. df_ignored.to_csv(self.account_ignored, decimal=",", sep=";", encoding="latin-1")
  346. return self.export_filename
  347. @property
  348. def export_filename(self):
  349. return self.export_filename_for_period(self.current_year, self.current_month)
  350. @property
  351. def export_info_dir(self):
  352. return f"{self.base_dir}/Export/{self.current_year}/info/"
  353. def export_filename_for_period(self, year, month):
  354. return f"{self.base_dir}/Export/{year}/export_{year}-{month}.xml"
  355. def export_xml(self, records, bk_filter, period_no, makes, sites):
  356. record_elements = ACCOUNT_INFO + ["Decimals"] + list(bk_filter.values())[:period_no] + ["CumulatedYear"]
  357. root = ET.Element("HbvData")
  358. h = ET.SubElement(root, "Header")
  359. for k, v in self.header(makes, sites).items():
  360. ET.SubElement(h, k).text = str(v)
  361. make_list = ET.SubElement(root, "MakeList")
  362. for m in makes:
  363. e = ET.SubElement(make_list, "MakeListEntry")
  364. ET.SubElement(e, "Make").text = m["Make"]
  365. ET.SubElement(e, "MakeCode").text = m["Marke_HBV"]
  366. bm_code_list = ET.SubElement(root, "BmCodeList")
  367. for s in sites:
  368. e = ET.SubElement(bm_code_list, "BmCodeEntry")
  369. ET.SubElement(e, "Make").text = s["Make"]
  370. ET.SubElement(e, "Site").text = s["Site"]
  371. ET.SubElement(e, "BmCode").text = s["Standort_HBV"]
  372. record_list = ET.SubElement(root, "RecordList")
  373. for row in records:
  374. record = ET.SubElement(record_list, "Record")
  375. for e in record_elements:
  376. child = ET.SubElement(record, e)
  377. field = row.get(e, 0.0)
  378. if str(field) == "nan":
  379. field = "0"
  380. elif type(field) is float:
  381. field = "{:.0f}".format(field * 100)
  382. child.text = str(field)
  383. with open(self.export_filename, "w", encoding="utf-8") as fwh:
  384. fwh.write(minidom.parseString(ET.tostring(root)).toprettyxml(indent=" "))
  385. def convert_to_row(self, node):
  386. return [child.text for child in node]
  387. def convert_xml_to_csv(self, xmlfile, csvfile):
  388. record_list = ET.parse(xmlfile).getroot().find("RecordList")
  389. header = [child.tag for child in record_list.find("Record")]
  390. bookings = [self.convert_to_row(node) for node in record_list.findall("Record")]
  391. with open(csvfile, "w") as fwh:
  392. cwh = csv.writer(fwh, delimiter=";")
  393. cwh.writerow(header)
  394. cwh.writerows(bookings)
  395. return True
  396. def convert_csv_to_xml(self, csvfile, xmlfile):
  397. makes = [{"Make": "01", "Marke_HBV": "1844"}]
  398. sites = [{"Make": "01", "Site": "01", "Marke_HBV": "1844"}]
  399. with open(csvfile, "r", encoding="latin-1") as frh:
  400. csv_reader = csv.DictReader(frh, delimiter=";")
  401. self.export_xml(csv_reader, self.bookkeep_filter(), 1, makes, sites, xmlfile)
  402. class Kunden(Enum):
  403. Altermann = auto()
  404. Barth_und_Frey = auto()
  405. Hannuschka = auto()
  406. Koenig_und_Partner = auto()
  407. Luchtenberg = auto()
  408. Russig_Neustadt_deop01 = auto()
  409. Russig_Neustadt_deop02 = auto()
  410. Siebrecht = auto()
  411. def gchr_local(base_dir):
  412. for path in Path(base_dir).glob("*"):
  413. if path.is_dir():
  414. print(path.name)
  415. gchr_export(str(path))
  416. def gchr_export(base_dir):
  417. gchr = GCHR(base_dir)
  418. gchr.export_all_periods()
  419. if __name__ == "__main__":
  420. base_dir = os.getcwd() + "/gcstruct/Kunden"
  421. if Path(base_dir).exists():
  422. gchr_local(base_dir)
  423. else:
  424. gchr_export(os.getcwd())