gcstruct.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523
  1. import pandas as pd
  2. import numpy as np
  3. import xml.etree.ElementTree as ET
  4. import json
  5. import csv
  6. import re
  7. import chevron
  8. # from shutil import copyfile
  9. from bs4 import BeautifulSoup
  10. from functools import reduce
  11. from pathlib import Path
  12. def get_flat(node):
  13. result = [{
  14. 'id': node['id'],
  15. 'text': node['text'],
  16. 'children': [x['id'] for x in node['children']],
  17. 'children2': [],
  18. 'parents': node['parents'],
  19. 'accounts': node['accounts'],
  20. 'costcenter': '',
  21. 'level': node['level'],
  22. 'drilldown': node['level'] < 2, # (node['level'] != 2 and len(node['accounts']) == 0),
  23. 'form': node['form'],
  24. 'accountlevel': False,
  25. 'absolute': True,
  26. 'seasonal': True,
  27. 'status': "0",
  28. 'values': [],
  29. 'values2': {}
  30. }]
  31. for child in node['children']:
  32. result += get_flat(child)
  33. return result
  34. def get_parents_list(p_list):
  35. id = ';'.join(p_list) + ';' * (10 - len(p_list))
  36. if len(p_list) > 0:
  37. return [id] + get_parents_list(p_list[:-1])
  38. return [';' * 9]
  39. def structure_from_tree(node):
  40. result = []
  41. result.append(node['id'])
  42. for child in node['children']:
  43. result.extend(structure_from_tree(child))
  44. return result
  45. def xml_from_tree(xml_node, tree_node):
  46. for child in tree_node['children']:
  47. element = ET.SubElement(xml_node, 'Ebene')
  48. element.set("Name", child['text'])
  49. xml_from_tree(element, child)
  50. def split_it(text, index):
  51. try:
  52. return re.findall(r'([^;]+) - ([^;]*);;', text)[0][index]
  53. except Exception:
  54. return ''
  55. def last_layer(text):
  56. try:
  57. return re.findall(r'([^;]+);;', text)[0]
  58. except Exception:
  59. return ''
  60. def get_default_cols(i):
  61. return ['Ebene' + str(i) for i in range(i * 10 + 1, (i + 1) * 10 + 1)]
  62. def get_structure_exports(s):
  63. result = {
  64. 'files': {},
  65. 'format': {
  66. 'KontoFormat': '{0} - {1}',
  67. 'HerstellerkontoFormat': '{{Herstellerkonto_Nr}}',
  68. 'HerstellerBezeichnungFormat': '{{Herstellerkonto_Bez}}',
  69. 'NeueHerstellerkontenAnlegen': False
  70. }
  71. }
  72. export_files = ['ExportStk', 'ExportStrukturenStk', 'ExportAdjazenz', 'ExportUebersetzung',
  73. 'ExportUebersetzungStk', 'ExportHerstellerKontenrahmen']
  74. export_format = ['KontoFormat', 'HerstellerkontoFormat', 'HerstellerBezeichnungFormat', 'NeueHerstellerkontenAnlegen']
  75. for e in export_files:
  76. if s.find(e) is not None and s.find(e).text is not None and s.find(e).text[-4:] == '.csv':
  77. result['files'][e] = s.find(e).text
  78. for e in export_format:
  79. if s.find(e) is not None and s.find(e).text != '':
  80. result['format'][e] = s.find(e).text
  81. result['format']['NeueHerstellerkontenAnlegen'] = (result['format']['NeueHerstellerkontenAnlegen'] == 'true')
  82. return result
  83. class GCStruct():
  84. config = {
  85. 'path': 'c:/projekte/python/gcstruct',
  86. 'path2': 'c:/projekte/python/gcstruct',
  87. 'file': 'c:/projekte/python/gcstruct/config/config.xml',
  88. 'output': 'gcstruct.json',
  89. 'default': [],
  90. 'special': {},
  91. 'special2': {
  92. 'Planner': ['Kostenstelle', 'Ebene1', 'Ebene2'],
  93. 'Test': ['Ebene1', 'Ebene2']
  94. },
  95. 'columns': ['Konto_Nr', 'Konto_Bezeichnung', 'Konto_Art', 'Konto_KST', 'Konto_STK',
  96. 'Konto_1', 'Konto_2', 'Konto_3', 'Konto_4', 'Konto_5'],
  97. 'struct': {},
  98. 'export': {}
  99. }
  100. json_result = {'accounts': {}, 'tree': {}, 'flat': {}, 'struct_export': {}, 'skr51_vars': {}}
  101. structure_ids = []
  102. translate = {'Konto_Nr': 'SKR51', 'Kostenstelle': 'KST', 'Absatzkanal': 'ABS',
  103. 'Kostenträger': 'KTR', 'Marke': 'MAR', 'Standort': 'STA', 'Marke_HBV': 'MAR', 'Standort_HBV': 'BMC'}
  104. def __init__(self, struct_dir, export_dir=None):
  105. self.config['path'] = struct_dir
  106. self.config['path2'] = struct_dir + '/export' if export_dir is None else export_dir
  107. self.config['file'] = f"{self.config['path']}/config/gcstruct.xml"
  108. if not Path(self.config['file']).exists():
  109. self.config['file'] = f"{self.config['path']}/config/config.xml"
  110. cfg = ET.parse(self.config['file'])
  111. self.config['default'] = [s.find('Name').text for s in cfg.getroot().find('Strukturdefinitionen').findall('Struktur')]
  112. self.config['export'] = dict([(s.find('Name').text, get_structure_exports(s)) for s in
  113. cfg.getroot().find('Strukturdefinitionen').findall('Struktur')])
  114. struct = dict([(x, get_default_cols(i)) for (i, x) in enumerate(self.config['default'])])
  115. struct.update(self.config['special'])
  116. self.config['struct'] = struct
  117. # print(self.config['struct'])
  118. def export_header(self, filetype):
  119. return {
  120. 'ExportStk': [],
  121. 'ExportStrukturenStk': [],
  122. 'ExportAdjazenz': [],
  123. 'ExportUebersetzung': ['Konto_Nr_Hersteller', 'Konto_Nr_Split', 'Konto_Nr_Haendler', 'Info'],
  124. 'ExportUebersetzungStk': ['Konto_Nr_Hersteller', 'Konto_Nr_Split', 'Konto_Nr_Haendler', 'Info'],
  125. 'ExportHerstellerKontenrahmen': ['Konto_Nr', 'Konto_Bezeichnung', 'Case', 'Info']
  126. }[filetype]
  127. def accounts_from_csv(self, struct):
  128. max_rows = (len(self.config['default']) + 1) * 10
  129. with open(f"{self.config['path']}/Kontenrahmen/Kontenrahmen.csv", 'r', encoding='latin-1') as f:
  130. csv_reader = csv.reader(f, delimiter=';')
  131. imported_csv = [row[:max_rows] for row in csv_reader]
  132. df = pd.DataFrame.from_records(np.array(imported_csv[1:], dtype='object'), columns=imported_csv[0]).fillna(value='')
  133. df = df.rename(columns={'Kostenstelle': 'Konto_KST', 'STK': 'Konto_STK'})
  134. for i, (s, cols) in enumerate(struct.items()):
  135. df[s] = reduce(lambda x, y: x + ";" + df[y], cols, '')
  136. df[s] = df[s].apply(lambda x: x[1:])
  137. df['LetzteEbene' + str(i + 1)] = df[s].apply(lambda x: last_layer(x))
  138. df['LetzteEbene' + str(i + 1) + '_Nr'] = df[s].apply(lambda x: split_it(x, 0))
  139. df['LetzteEbene' + str(i + 1) + '_Bez'] = df[s].apply(lambda x: split_it(x, 1))
  140. df['Herstellerkonto_Nr'] = df['LetzteEbene1_Nr']
  141. df['Herstellerkonto_Bez'] = df['LetzteEbene1_Bez']
  142. return df
  143. def tree_from_xml(self, struct, df):
  144. result = {}
  145. for (s, cols) in struct.items():
  146. try:
  147. tree = ET.parse(f"{self.config['path']}/Xml/{s}.xml")
  148. result[s] = self.get_tree_root(tree.getroot(), s)
  149. except FileNotFoundError:
  150. print('XML-Datei fehlt')
  151. used_entries = [x.split(";")[1:] for x in set(df[s].to_numpy())]
  152. print(used_entries)
  153. root = ET.Element('Ebene')
  154. root.set('Name', s)
  155. result[s] = self.get_tree_root(root, s)
  156. # self.json_result["tree"][s] = get_tree_from_accounts(cols, [])
  157. return result
  158. def get_structure_and_tree(self):
  159. df = self.accounts_from_csv(self.config['struct'])
  160. self.json_result['accounts'] = df.to_dict('records')
  161. self.structure_ids = df.melt(id_vars=['Konto_Nr'], value_vars=self.config['struct'].keys(),
  162. var_name='Struktur', value_name='id').groupby(by=['Struktur', 'id'])
  163. self.json_result['tree'] = self.tree_from_xml(self.config['struct'], df)
  164. for (s, cols) in self.config['struct'].items():
  165. self.json_result['flat'][s] = get_flat(self.json_result['tree'][s])
  166. for (s, entries) in self.json_result['flat'].items():
  167. cols = self.config['struct'][s]
  168. df_temp = pd.DataFrame([x['id'].split(';') for x in entries], columns=cols)
  169. self.json_result['struct_export'][s] = df_temp.to_dict(orient='records')
  170. # {'accounts': {}, 'tree': {}, 'flat': {}, 'struct_export': {}, 'skr51_vars': {}}
  171. json.dump(self.json_result, open(f"{self.config['path2']}/{self.config['output']}", 'w'), indent=2)
  172. return self.json_result
  173. def get_accounts(self, structure, id):
  174. return [x['Konto_Nr'] for x in self.json_result['accounts'] if x[structure] == id]
  175. # return []
  176. # res = self.structure_ids.groups.get((structure, id))
  177. # if res is None:
  178. # return []
  179. # return res.values
  180. def export(self):
  181. for s in self.config['export'].keys():
  182. for (filetype, filename) in self.config['export'][s]['files'].items():
  183. with open(self.config['path2'] + '/' + filename, 'w') as fwh:
  184. fwh.write('Konto_Nr_Hersteller;Konto_Nr_Split;Konto_Nr_Haendler;Info\n')
  185. # 'Hersteller'Konto_Nr;Konto_Bezeichnung;Case;Info'
  186. for a in self.json_result['accounts']:
  187. if a['Herstellerkonto_Nr'] != '':
  188. account = chevron.render(self.config['export']['SKR51']['format']['HerstellerkontoFormat'], a)
  189. fwh.write(account + ';' + account + ';' + a['Konto_Nr'] + ';' + '\n') # a['Herstellerkonto_Bez']
  190. def get_tree(self, node, parents, structure):
  191. result = []
  192. for child in node:
  193. p = get_parents_list(parents)
  194. parents.append(child.attrib['Name'])
  195. id = ';'.join(parents) + ';' * (10 - len(parents))
  196. result.append({
  197. 'id': id,
  198. 'text': child.attrib['Name'],
  199. 'children': self.get_tree(child, parents, structure),
  200. 'parents': p,
  201. 'accounts': self.get_accounts(structure, id),
  202. 'level': len(parents),
  203. 'form': child.attrib.get('Split', '')
  204. })
  205. parents.pop()
  206. return result
  207. def get_tree_root(self, node, structure):
  208. id = ';' * 9
  209. return {
  210. 'id': id,
  211. 'text': node.attrib['Name'],
  212. 'children': self.get_tree(node, [], structure),
  213. 'parents': [],
  214. 'accounts': [],
  215. 'level': 0,
  216. 'form': ''
  217. }
  218. def post_structure_and_tree(self):
  219. json_post = json.load(open(f"{self.config['path']}/{self.config['output']}", 'r'))
  220. # Kontenrahmen.csv
  221. ebenen = ['Ebene' + str(i) for i in range(1, len(self.config['default']) * 10 + 1)]
  222. header = ';'.join(self.config['columns'] + ebenen)
  223. cols = self.config['columns'] + self.config['default']
  224. with open(self.config['path'] + '/Kontenrahmen/Kontenrahmen_out.csv', 'w', encoding='latin-1') as f:
  225. f.write(header + '\n')
  226. for row in json_post['Kontenrahmen']:
  227. f.write(';'.join([row[e] for e in cols]) + '\n')
  228. # print(header)
  229. # xml und evtl. Struktur.csv
  230. for i, s in enumerate(self.config['default']):
  231. with open(f"{self.config['path']}/Strukturen/Kontenrahmen.csv/{s}_out.csv", 'w', encoding='latin-1') as f:
  232. f.write(';'.join(['Ebene' + str(i * 10 + j) for j in range(1, 11)]) + '\n')
  233. rows = structure_from_tree({'id': ";" * 9, 'children': json_post[s]})
  234. f.write('\n'.join(rows))
  235. # with open(self.config['path'] + "/Strukturen/Kontenrahmen.csv/" + structure + "_2.csv", "w", encoding="latin-1") as f:
  236. root = ET.Element('Ebene')
  237. root.set('Name', s)
  238. xml_from_tree(root, {'id': ";" * 9, 'children': json_post[s]})
  239. with open(f"{self.config['path']}/Xml/{s}_out.xml", 'w', encoding='utf-8') as f:
  240. f.write(BeautifulSoup(ET.tostring(root), 'xml').prettify())
  241. def skr51_translate(self, accounts_combined_files):
  242. df = self.accounts_from_csv(self.config['struct'])
  243. df_translate = {}
  244. for i, (t_from, t_to) in enumerate(self.translate.items()):
  245. last = 'LetzteEbene' + str(i + 1)
  246. from_label = ['Konto_Nr', last, last + '_Nr', last + '_Bez', 'Ebene' + str(i * 10 + 1), 'Ebene' + str(i * 10 + 2)]
  247. to_label = [t_to, t_to + '_Ebene', t_to + '_Nr', t_to + '_Bez', 'Ebene1', 'Ebene2']
  248. df_translate[t_from] = df[df[last + '_Nr'] != ''][from_label].rename(columns=dict(zip(from_label, to_label)))
  249. # print(df_translate[t_to].head())
  250. df2 = []
  251. for ac_file in accounts_combined_files:
  252. df2.append(pd.read_csv(ac_file, decimal=',', sep=';', encoding='latin-1',
  253. converters={i: str for i in range(0, 200)}))
  254. df_source = pd.concat(df2)
  255. df3 = df_source.copy()
  256. df3['Konto_Nr'] = df3['Konto_Nr'] + '_STK'
  257. df_source = pd.concat([df_source, df3])
  258. for t_from, t_to in self.translate.items():
  259. if t_to == 'SKR51':
  260. df_source['SKR51'] = df_source['Konto_Nr']
  261. elif t_from in ['Marke_HBV']:
  262. df_source['Marke_HBV'] = df_source['Marke']
  263. elif t_from in ['Standort_HBV']:
  264. df_source['Standort_HBV'] = df_source['Standort'] + '_' + df_source['Marke']
  265. df_source['BMC'] = 'BMC_' + df_source['Standort_HBV']
  266. elif t_to == 'KTR':
  267. df_source['KTR'] = np.where(df_source['Kostenträger_Quelle'] == 'TZ', 'KTR_TZ_' + df_source['Kostenträger'], 'KTR_00')
  268. df_source['KTR'] = np.where(df_source['Kostenträger_Quelle'].isin(['NW', 'SC']), 'KTR_' + df_source['Kostenträger_Quelle'] +
  269. '_' + df_source['Marke'] + '_' + df_source['Kostenträger'], df_source['KTR'])
  270. else:
  271. df_source[t_to] = t_to + '_' + df_source[t_from]
  272. df_source = df_source.merge(df_translate[t_from], how='left', on=[t_to], suffixes=(None, '_' + t_to))
  273. df_source[t_to + '_Nr'] = np.where(df_source[t_to + '_Nr'].isna(), df_source[t_from], df_source[t_to + '_Nr'])
  274. df_source['Konto_Nr_SKR51'] = df_source['MAR_Nr'] + '-' + df_source['STA_Nr'] + '-' + df_source['SKR51_Nr'] + '-' + \
  275. df_source['KST_Nr'] + '-' + df_source['ABS_Nr'] + '-' + df_source['KTR_Nr']
  276. df_source['Konto_Nr_Händler'] = df_source['Marke'] + '-' + df_source['Standort'] + '-' + df_source['Konto_Nr'] + '-' + \
  277. df_source['Kostenstelle'] + '-' + df_source['Absatzkanal'] + '-' + df_source['Kostenträger']
  278. # df_source.to_csv(f"{self.config['path2']}/SKR51_Uebersetzung.csv", sep=';', encoding='latin-1', index=False)
  279. df_source['MAR_Nr_MAR'] = np.where(df_source['MAR_Nr_MAR'].isna(), '0000', df_source['MAR_Nr_MAR'])
  280. from_label = ['MAR_Nr', 'STA_Nr', 'SKR51_Nr', 'KST_Nr', 'ABS_Nr', 'KTR_Nr', 'KTR_Ebene', 'Konto_Nr_Händler',
  281. 'Konto_Nr_SKR51', 'MAR_Nr_MAR', 'BMC_Nr']
  282. to_label = ['Marke', 'Standort', 'Konto_Nr', 'Kostenstelle', 'Absatzkanal', 'Kostenträger',
  283. 'Kostenträger_Ebene', 'Konto_Nr_Händler', 'Konto_Nr_SKR51', 'Marke_HBV', 'Standort_HBV']
  284. df_combined = df_source[from_label].rename(columns=dict(zip(from_label, to_label)))
  285. df_combined.to_csv(f"{self.config['path2']}/Kontenrahmen_uebersetzt.csv", sep=';', encoding='latin-1', index=False)
  286. def skr51_translate2(self, accounts_combined_file):
  287. df = self.accounts_from_csv(self.config['struct'])
  288. df_list = []
  289. for i, s in enumerate(self.config['struct'].keys()):
  290. from_label = ['Konto_Nr', 'Ebene' + str(i * 10 + 1), 'Ebene' + str(i * 10 + 2), 'Ebene' + str(i * 10 + 3)]
  291. to_label = ['Konto_Nr', 'key', 'value', 'value2']
  292. df_temp = df[from_label].rename(columns=dict(zip(from_label, to_label)))
  293. df_temp['key'] = '{' + s + '/' + df_temp['key'] + '}'
  294. df_list.append(df_temp[df_temp['value'] != ''])
  295. df_translate = pd.concat(df_list)
  296. # df_translate.to_csv(f"{self.config['path2']}/SKR51_Variablen.csv", sep=';', encoding='latin-1', index=False)
  297. df_source = pd.read_csv(accounts_combined_file, decimal=',', sep=';', encoding='latin-1',
  298. converters={i: str for i in range(0, 200)})
  299. df_source = df_source[df_source['Konto_Nr'].str.contains('_STK') == False]
  300. df_source['Konto_Nr_Gesamt'] = df_source['Konto_Nr']
  301. df_source['Konto_Nr'] = np.where(df_source['Konto_Nr'].str.contains(r'^[4578]'), df_source['Konto_Nr'] + '_' +
  302. df_source['Kostenstelle'].str.slice(stop=1), df_source['Konto_Nr'])
  303. df_source['Konto_Nr'] = np.where(df_source['Konto_Nr'].str.contains(r'^5\d+_4'), df_source['Konto_Nr'] +
  304. df_source['Kostenstelle'].str.slice(start=1, stop=2), df_source['Konto_Nr'])
  305. df_source = df_source.merge(df, how='left', on=['Konto_Nr'])
  306. # rows = df_source.shape[0]
  307. df_source['value'] = ''
  308. cols = get_default_cols(0)
  309. for t_from, t_to in self.translate.items():
  310. if t_from in ['Marke_HBV', 'Standort_HBV']:
  311. continue
  312. if t_from == 'Konto_Nr':
  313. df_source[t_to] = df_source[t_from]
  314. else:
  315. df_source[t_to] = t_to + '_' + df_source[t_from]
  316. for e in cols:
  317. df_source = df_source.merge(df_translate, how='left', left_on=[t_to, e], right_on=['Konto_Nr', 'key'],
  318. suffixes=(None, '_' + t_to + '_' + e))
  319. df_source[e] = np.where(df_source['value_' + t_to + '_' + e].notna(), df_source['value_' + t_to + '_' + e], df_source[e])
  320. # if df_source.shape[0] > rows:
  321. # print(t_to + '_' + e + ': ' + str(df_source.shape[0]))
  322. # df_source.to_csv(f"{self.config['path2']}/SKR51_Variablen2.csv", sep=';', encoding='latin-1', index=False)
  323. # df_source[t_to + '_Nr'] = np.where(df_source[t_to + '_Nr'].isna(), df_source[t_from], df_source[t_to + '_Nr'])
  324. for e in cols:
  325. df_source[e] = np.where(df_source[e].str.startswith('{'), df_source[e].str.extract(r'\/(.*)}', expand=False) +
  326. ' falsch', df_source[e]) # df_source[e].str.extract(r'/(.*)}') +
  327. df_source[e] = np.where(df_source[e] == '[KTR]', df_source['Kostenträger_Ebene'], df_source[e])
  328. # df_all[df_all['Ebene1'] == ]
  329. # print(df_source.head())
  330. df_source['Konto_neu'] = df_source['Marke'] + '-' + df_source['Standort'] + '-' + df_source['Konto_Nr'] + '-' + \
  331. df_source['Kostenstelle'] + '-' + df_source['Absatzkanal'] + '-' + df_source['Kostenträger'] + ' - ' + \
  332. df_source['Konto_Bezeichnung']
  333. df_source['Ebene1_empty'] = df_source['Ebene1'].isna() # , df_source['Ebene1'].map(lambda x: x == ''))
  334. df_source['Konto_neu'] = np.where(df_source['Ebene1_empty'], 'keine Zuordnung', df_source['Konto_neu'])
  335. df_source['Ebene1'] = np.where(df_source['Ebene1_empty'], 'keine Zuordnung', df_source['Ebene1'])
  336. df_source['Konto_Gruppe'] = df_source['Konto_Nr'] + ' - ' + df_source['Konto_Bezeichnung']
  337. df_source['Konto_Gruppe'] = np.where(df_source['Ebene1_empty'], 'keine Zuordnung', df_source['Konto_Gruppe'])
  338. df_source['Konto_Gesamt'] = df_source['Konto_Nr_Gesamt'] + ' - ' + df_source['Konto_Bezeichnung']
  339. df_amount = df_source[df_source['Ebene1'] == 'Umsatzerlöse'].reset_index()
  340. df_amount['Ebene1'] = 'verkaufte Stückzahlen'
  341. df_amount['Ebene72'] = 'verkaufte Stückzahlen'
  342. df_amount['Konto_neu'] = 'STK ' + df_amount['Konto_neu']
  343. df_amount['Konto_Nr_Händler'] = df_amount['Konto_Nr_Händler'] + '_STK'
  344. df_amount['Konto_Gruppe'] = 'STK ' + df_amount['Konto_Gruppe']
  345. df_amount['Konto_Gesamt'] = 'STK ' + df_amount['Konto_Gesamt']
  346. df_source = pd.concat([df_source, df_amount])
  347. df_source['GuV'] = (df_source['Ebene71'] == 'GuV')
  348. df_source['Ebene81'] = np.where(df_source['GuV'], df_source['Ebene72'], 'Bilanz')
  349. df_source['Ebene82'] = np.where(df_source['GuV'], df_source['Ebene73'], '')
  350. df_source['Ebene83'] = np.where(df_source['GuV'], df_source['Ebene74'], '')
  351. df_source['Ebene84'] = np.where(df_source['GuV'], df_source['Ebene75'], '')
  352. df_source['Ebene85'] = np.where(df_source['GuV'], df_source['Ebene76'], '')
  353. df_source['Ebene86'] = np.where(df_source['GuV'], df_source['Ebene77'], '')
  354. df_source['Ebene87'] = np.where(df_source['GuV'], df_source['Ebene78'], '')
  355. df_source['Ebene88'] = np.where(df_source['GuV'], df_source['Ebene79'], '')
  356. df_source['Ebene89'] = np.where(df_source['GuV'], df_source['Ebene80'], '')
  357. df_source['Ebene90'] = ''
  358. df_source['Ebene71'] = np.where(df_source['GuV'], 'GuV', df_source['Ebene72'])
  359. df_source['Ebene72'] = np.where(df_source['GuV'], '', df_source['Ebene73'])
  360. df_source['Ebene73'] = np.where(df_source['GuV'], '', df_source['Ebene74'])
  361. df_source['Ebene74'] = np.where(df_source['GuV'], '', df_source['Ebene75'])
  362. df_source['Ebene75'] = np.where(df_source['GuV'], '', df_source['Ebene76'])
  363. df_source['Ebene76'] = np.where(df_source['GuV'], '', df_source['Ebene77'])
  364. df_source['Ebene77'] = np.where(df_source['GuV'], '', df_source['Ebene78'])
  365. df_source['Ebene78'] = np.where(df_source['GuV'], '', df_source['Ebene79'])
  366. df_source['Ebene79'] = np.where(df_source['GuV'], '', df_source['Ebene80'])
  367. df_source['Ebene80'] = ''
  368. df_source['Susa'] = df_source['Konto_Gruppe'].str.slice(stop=1)
  369. df_source['Konto_KST'] = ''
  370. df_source['GuV_Bilanz'] = df_source['Konto_Art']
  371. from_label = ['Konto_neu', 'Konto_Nr_Händler']
  372. to_label = ['Konto', 'Acct_Nr']
  373. df_source = df_source.rename(columns=dict(zip(from_label, to_label)))
  374. df_source = df_source[['Konto', 'Acct_Nr', 'Konto_Bezeichnung', 'GuV_Bilanz', 'Konto_KST', 'Konto_STK',
  375. 'Konto_1', 'Konto_2', 'Konto_3', 'Konto_4', 'Konto_5'] +
  376. get_default_cols(0) + get_default_cols(7) + get_default_cols(8) +
  377. ['Konto_Gruppe', 'Konto_Nr_Gesamt', 'Konto_Gesamt', 'Susa']]
  378. df_source.to_csv(f"{self.config['path2']}/SKR51_Uebersetzung.csv", sep=';', encoding='latin-1', index=False)
  379. def skr51_vars(self):
  380. self.get_structure_and_tree()
  381. cols = get_default_cols(0)
  382. df_temp = pd.read_csv(f"{self.config['path']}/Export/Kostentraeger.csv", decimal=',', sep=';',
  383. encoding='latin-1', converters={i: str for i in range(0, 200)})
  384. df_temp['value'] = df_temp['Ebene33']
  385. df_temp['key'] = '[KTR]'
  386. df_temp = df_temp[df_temp['value'].str.contains(' - ')]
  387. df_list = [df_temp[['key', 'value']]]
  388. for (s, entries) in self.json_result['flat'].items():
  389. df = pd.DataFrame([x['id'].split(';') for x in entries], columns=cols)
  390. df['key'] = df[cols[0]].apply(lambda x: '{' + s + '/' + x + '}')
  391. df['value'] = df[cols[1]]
  392. df_list.append(df[['key', 'value']])
  393. df = pd.concat(df_list)
  394. df_vars = df[df['value'] != '']
  395. # df_vars.to_csv(f"{self.config['path2']}/SKR51_Variablen2.csv", sep=';', encoding='latin-1', index=False)
  396. df_main = pd.DataFrame([x['id'].split(';') for x in self.json_result['flat']['SKR51']], columns=cols)
  397. df_main['value'] = ''
  398. for c in cols:
  399. df_main = df_main.merge(df_vars, how='left', left_on=c, right_on='key', suffixes=(None, '_' + c))
  400. df_main[c] = np.where(df_main['value_' + c].isna(), df_main[c], df_main['value_' + c])
  401. df_amount = df_main[df_main['Ebene1'] == 'Umsatzerlöse'].reset_index()
  402. df_amount['Ebene1'] = 'verkaufte Stückzahlen'
  403. df_main = pd.concat([df_main, df_amount])
  404. # from_label = cols
  405. to_label = cols # get_default_cols(9)
  406. # df_main = df_main.rename(columns=dict(zip(from_label, to_label)))
  407. df_main[to_label].to_csv(f"{self.config['path2']}/SKR51_Struktur.csv", sep=';', encoding='latin-1', index_label='Sortierung')
  408. def gcstruct_uebersetzung():
  409. # base_dir = 'P:/SKR51_GCStruct/'
  410. base_dir = Path('.').absolute()
  411. import_dir = base_dir
  412. if base_dir.name == 'scripts':
  413. if base_dir.parent.parent.name == 'Portal':
  414. base_dir = base_dir.parent.parent.parent
  415. import_dir = base_dir.joinpath('Portal/System/IQD/Belege/Kontenrahmen')
  416. else:
  417. base_dir = base_dir.parent.parent
  418. import_dir = base_dir.joinpath('System/OPTIMA/Export')
  419. elif not base_dir.joinpath('GCStruct_Aufbereitung').exists():
  420. base_dir = Path('//192.168.2.21/verwaltung/Kunden/Luchtenberg/1 Umstellung SKR51/')
  421. if not base_dir.exists():
  422. base_dir = Path('//media/fileserver1/verwaltung/Kunden/Luchtenberg/1 Umstellung SKR51/')
  423. import_dir = base_dir
  424. struct = GCStruct(str(base_dir.joinpath('GCStruct_Aufbereitung')))
  425. struct.skr51_translate(import_dir.glob('Kontenrahmen_kombiniert*.csv'))
  426. print('Kontenrahmen_uebersetzt.csv erstellt.')
  427. # copyfile('c:/Projekte/Python/Gcstruct/Kontenrahmen_kombiniert.csv', base_dir + 'GCStruct_Modell/Export/Kontenrahmen_kombiniert.csv')
  428. struct2 = GCStruct(str(base_dir.joinpath('GCStruct_Modell')))
  429. struct2.skr51_translate2(str(base_dir.joinpath('GCStruct_Aufbereitung/Export/Kontenrahmen_uebersetzt.csv')))
  430. print('SKR51_Uebersetzung.csv erstellt.')
  431. struct2.skr51_vars()
  432. print('SKR51_Struktur.csv erstellt.')
  433. def dresen():
  434. struct = GCStruct('c:/projekte/GCHRStruct_Hyundai_Export')
  435. struct.get_structure_and_tree()
  436. struct.export()
  437. def reisacher():
  438. struct = GCStruct('X:/Robert/Planung Reisacher/GCStruct_neue_Struktur_Planung')
  439. struct.get_structure_and_tree()
  440. # json.dump(res['flat'], open(f"{self.config['path2']}/{self.config['output']}", 'w'), indent=2)
  441. if __name__ == '__main__':
  442. # struct = GCStruct('c:/projekte/gcstruct_dresen')
  443. # struct = GCStruct('c:/projekte/python/gcstruct')
  444. # struct = GCStruct('c:/projekte/python/gcstruct_reisacher_planung')
  445. reisacher()
  446. # dresen()