Browse Source

Black Formatter

robert 1 year ago
parent
commit
c007ff092d

+ 7 - 0
.vscode/settings.json

@@ -0,0 +1,7 @@
+{
+    "[python]": {
+        "editor.defaultFormatter": "ms-python.black-formatter",
+        "editor.formatOnSave": true,
+    },
+    "black-formatter.args": ["--line-length", "120"],
+}

+ 12 - 13
main.py

@@ -3,26 +3,25 @@ from webservice import api, file_io
 import os
 
 
-app = Flask(__name__, template_folder='static')
-app.register_blueprint(api.bp, url_prefix='/api')
+app = Flask(__name__, template_folder="static")
+app.register_blueprint(api.bp, url_prefix="/api")
 app.register_blueprint(file_io.bp)
 
 
-@app.route('/')
-@app.route('/login', methods=['GET'])
-@app.route('/select', methods=['GET'])
-@app.route('/static/login')
-@app.route('/static/select')
+@app.route("/")
+@app.route("/login", methods=["GET"])
+@app.route("/select", methods=["GET"])
+@app.route("/static/login")
+@app.route("/static/select")
 def home():
-    return render_template('index.html')
+    return render_template("index.html")
 
 
-@app.route('/static/planner/<year>/<version>/<timestamp>')
+@app.route("/static/planner/<year>/<version>/<timestamp>")
 def planner(year, version, timestamp):
-    return render_template('index.html')
+    return render_template("index.html")
 
 
-
-if __name__ == '__main__':
+if __name__ == "__main__":
     app.secret_key = os.urandom(24)
-    app.run(host='0.0.0.0', port='8084', debug=True)
+    app.run(host="0.0.0.0", port="8084", debug=True)

+ 2 - 2
tox.ini

@@ -1,5 +1,5 @@
 [flake8]
-ignore = E712, W504
-max-line-length = 140
+ignore = E712, W503, W504
+max-line-length = 120
 # exclude = tests/*
 # max-complexity = 10

+ 3 - 3
webservice/api.py

@@ -1,9 +1,9 @@
 from flask import Blueprint
 
 
-bp = Blueprint('api', __name__)
+bp = Blueprint("api", __name__)
 
 
-@bp.route('/')
+@bp.route("/")
 def home():
-   return 'hello world!!'
+    return "hello world!!"

+ 12 - 12
webservice/auth.py

@@ -21,15 +21,15 @@ class User:
 
 class Auth:
     def __init__(self) -> None:
-        with open(Path(__file__).parent.parent.joinpath('config', 'users.csv'), 'r') as frh:
-            csv_reader = csv.DictReader(frh, delimiter=';')
-            self.users = dict([(row['username'].lower(), self.parse_users_csv(row)) for row in csv_reader])
+        with open(Path(__file__).parent.parent.joinpath("config", "users.csv"), "r") as frh:
+            csv_reader = csv.DictReader(frh, delimiter=";")
+            self.users = dict([(row["username"].lower(), self.parse_users_csv(row)) for row in csv_reader])
 
     def parse_users_csv(self, row: dict) -> User:
-        row['admin'] = (row['admin'] == 'True')
-        row['write'] = (row['write'] == 'True')
-        row['department'] = json.loads(row['department'])
-        row['costcenter'] = json.loads(row['costcenter'])
+        row["admin"] = row["admin"] == "True"
+        row["write"] = row["write"] == "True"
+        row["department"] = json.loads(row["department"])
+        row["costcenter"] = json.loads(row["costcenter"])
         return User(**row)
 
     def get_user(self, username, password) -> Optional[dict]:
@@ -41,12 +41,12 @@ class Auth:
             return None
 
         res = asdict(self.users[username])
-        del(res['password'])
+        del res["password"]
         return res
 
     def connect_ldap(self, username, password) -> bool:
-        server = Server('ahr.local:389', get_info=ALL, use_ssl=False, connect_timeout=5)
-        user = username.lower() + '@ahr.local'
+        server = Server("ahr.local:389", get_info=ALL, use_ssl=False, connect_timeout=5)
+        user = username.lower() + "@ahr.local"
         conn = Connection(server, user=user, password=password)
         try:
             return bool(conn.bind())
@@ -54,5 +54,5 @@ class Auth:
             return False
 
 
-if __name__ == '__main__':
-    print(Auth().get_user('TKP', 'G9zHjA__'))
+if __name__ == "__main__":
+    print(Auth().get_user("TKP", "G9zHjA__"))

+ 9 - 13
webservice/config_load.py

@@ -9,19 +9,15 @@ class ConfigLoad:
         self.base_dir = Path(base_dir)
 
     def load_file(self, client, year):
-        with open(self.base_dir.joinpath(client + '.json'), 'r') as frh:
+        with open(self.base_dir.joinpath(client + ".json"), "r") as frh:
             cfg = json.load(frh)
-        year_new = {
-            "plan": year,
-            "actuals": str(int(year) - 1),
-            "previous": str(int(year) - 2)
-        }
-        cfg['config']['year'] = year_new
-        cfg['config']['previous'] = [
-            "Ist " + year_new['previous'],
-            "Plan " + year_new['actuals'],
-            "Ist per 10/" + year_new['actuals'],
-            "FC 12/" + year_new['actuals'],
-            "Ist " + year_new['actuals']
+        year_new = {"plan": year, "actuals": str(int(year) - 1), "previous": str(int(year) - 2)}
+        cfg["config"]["year"] = year_new
+        cfg["config"]["previous"] = [
+            "Ist " + year_new["previous"],
+            "Plan " + year_new["actuals"],
+            "Ist per 10/" + year_new["actuals"],
+            "FC 12/" + year_new["actuals"],
+            "Ist " + year_new["actuals"],
         ]
         return cfg

+ 77 - 63
webservice/csv_accounts.py

@@ -6,108 +6,122 @@ import plac
 from pathlib import Path
 
 
-@plac.pos('period', '', type=str)
+@plac.pos("period", "", type=str)
 def actuals(period):
     base_dir = Path(__file__).absolute().parent.parent
     print(base_dir)
-    df1 = pd.read_csv(base_dir / 'data/Belege_Planung_Ist_FC_AHR.csv', sep=';', decimal=',',
-                      dtype={0: str, 1: str, 2: str, 3: float})
-    df2 = pd.read_csv(base_dir / 'data/Belege_Planung_Ist_FC_AHA.csv', sep=';', decimal=',',
-                      dtype={0: str, 1: str, 2: str, 3: float})
+    df1 = pd.read_csv(
+        base_dir / "data/Belege_Planung_Ist_FC_AHR.csv", sep=";", decimal=",", dtype={0: str, 1: str, 2: str, 3: float}
+    )
+    df2 = pd.read_csv(
+        base_dir / "data/Belege_Planung_Ist_FC_AHA.csv", sep=";", decimal=",", dtype={0: str, 1: str, 2: str, 3: float}
+    )
     df12 = pd.concat([df1, df2])
-    df3 = pd.read_csv(base_dir / 'data/NW_GW_Stk_Planung_AHR.csv', sep=';', decimal=',',
-                      dtype={0: str, 1: str, 2: str, 3: float})
-    df4 = pd.read_csv(base_dir / 'data/NW_GW_Stk_Planung_AHA.csv', sep=';', decimal=',',
-                      dtype={0: str, 1: str, 2: str, 3: float})
+    df3 = pd.read_csv(
+        base_dir / "data/NW_GW_Stk_Planung_AHR.csv", sep=";", decimal=",", dtype={0: str, 1: str, 2: str, 3: float}
+    )
+    df4 = pd.read_csv(
+        base_dir / "data/NW_GW_Stk_Planung_AHA.csv", sep=";", decimal=",", dtype={0: str, 1: str, 2: str, 3: float}
+    )
     df34 = pd.concat([df3, df4])
-    df = pd.merge(df12, df34, how='left', on=['Bookkeep_Period', 'Betrieb_Nr', 'Konto_Nr'])
+    df = pd.merge(df12, df34, how="left", on=["Bookkeep_Period", "Betrieb_Nr", "Konto_Nr"])
     # df = pd.read_csv('Planung/Belege_Planung_Ist_FC_Dresen.csv', sep=';', decimal=',',
     #                  dtype={0: str, 1: str, 2: str, 3: str, 4: str, 5: float, 6: float})
-    df['Jahr'] = df['Bookkeep_Period'].apply(lambda x: x[:4])
+    df["Jahr"] = df["Bookkeep_Period"].apply(lambda x: x[:4])
     current_year = period[:4]
     prev_year = str(int(current_year) - 1)
     next_year = str(int(current_year) + 1)
     month_no = int(period[4:])
     # df = df[df['Bookkeep_Period'] <= period]
-    df['PY'] = np.where(df['Jahr'] == prev_year, df['Betrag'], 0)
-    df['PYQ'] = np.where(df['Jahr'] == prev_year, df['Menge'], 0)
-    df['CY'] = np.where(df['Jahr'] == current_year, df['Betrag'], 0)
-    df['CYQ'] = np.where(df['Jahr'] == current_year, df['Menge'], 0)
-    df['YTD'] = np.where(df['Bookkeep_Period'] <= period, df['CY'], 0)
-    df['YTDQ'] = np.where(df['Bookkeep_Period'] <= period, df['CYQ'], 0)
-    df['FC'] = df['YTD'] * 12 / month_no
-    df['FCQ'] = df['YTDQ'] * 12 / month_no
-    df.drop(columns=['Menge', 'Betrag'], inplace=True)
+    df["PY"] = np.where(df["Jahr"] == prev_year, df["Betrag"], 0)
+    df["PYQ"] = np.where(df["Jahr"] == prev_year, df["Menge"], 0)
+    df["CY"] = np.where(df["Jahr"] == current_year, df["Betrag"], 0)
+    df["CYQ"] = np.where(df["Jahr"] == current_year, df["Menge"], 0)
+    df["YTD"] = np.where(df["Bookkeep_Period"] <= period, df["CY"], 0)
+    df["YTDQ"] = np.where(df["Bookkeep_Period"] <= period, df["CYQ"], 0)
+    df["FC"] = df["YTD"] * 12 / month_no
+    df["FCQ"] = df["YTDQ"] * 12 / month_no
+    df.drop(columns=["Menge", "Betrag"], inplace=True)
 
     # df2 = pd.pivot_table(df, values='Betrag', index=['Konto Nr', 'Betrieb Nr'], columns=['Jahr'], aggfunc=np.sum, fill_value=0.0)
-    df = df.groupby(['Konto_Nr', 'Betrieb_Nr']).sum()
+    df = df.groupby(["Konto_Nr", "Betrieb_Nr"]).sum()
     print(df.head())
 
     res = {}
-    for (acct, dept), values in df.to_dict(orient='index').items():
+    for (acct, dept), values in df.to_dict(orient="index").items():
         if acct not in res:
             res[acct] = {}
         res[acct][dept] = [round(v, 2) for v in values.values()]
 
-    data = {'values': res}
-    json.dump(data, open(base_dir / f'export/accounts_{next_year}.json', 'w'), indent=2)
+    data = {"values": res}
+    json.dump(data, open(base_dir / f"export/accounts_{next_year}.json", "w"), indent=2)
 
 
-@plac.pos('year', '', type=str)
+@plac.pos("year", "", type=str)
 def planning_prev(year):
     base_dir = Path.cwd().parent
     print(base_dir)
-    df1 = pd.read_csv(base_dir / f'data/Planner_{year}_V1_Plan.csv',
-                      sep=';', decimal=',', encoding='latin-1', dtype={'Betrieb Nr': str, 'Bereich': str, 'Zeile': str})
-    df1['Wert'] = df1['Gesamt']
-    df1 = df1[['Jahr', 'Betrieb Nr', 'Vstufe 1', 'Bereich', 'Zeile', 'Konto', 'Version', 'Wert']]
-
-    df2 = pd.read_csv(base_dir / f'data/Planner_{year}_V1_Stk.csv',
-                      sep=';', decimal=',', encoding='latin-1', dtype={'Betrieb Nr': str, 'Bereich': str, 'Zeile': str})
-    df2['Menge'] = df2['Gesamt']
-    df2['Vstufe 1'] = 'Umsatzerlöse'
-    df2 = df2[['Jahr', 'Betrieb Nr', 'Vstufe 1', 'Bereich', 'Zeile', 'Konto', 'Version', 'Menge']]
-    df = pd.merge(df1, df2, how='left', on=['Jahr', 'Betrieb Nr', 'Vstufe 1', 'Bereich', 'Zeile', 'Konto', 'Version'])
-    df['Menge'] = df['Menge'].fillna(0)
-    df['Wert'] = df['Wert'].fillna(0)
-    df['Wert'] = np.where(df['Vstufe 1'] == 'Umsatzerlöse', df['Wert'], df['Wert'] * -1)
-
-    df['Bereich'] = df['Bereich'].fillna('NA').replace('VW (inkl. GF)', '?')
-    df['regex'] = df['Vstufe 1'] + ";" + df['Bereich'] + ";.*" + df['Zeile'] + ' - [^;]*;;'
-    df = df[df['Wert'] != 0]
-
-    gcstruct = json.load(open(base_dir / 'export/gcstruct.json', 'r'))
-    structure_ids = [s['id'] for s in gcstruct['flat']['Struktur_FB']]
-
-    df['id'] = df['regex'].apply(lambda x: (list(filter(lambda y: match(x, y), structure_ids)) + [''])[0])
-    df = df[df['id'] != '']
+    df1 = pd.read_csv(
+        base_dir / f"data/Planner_{year}_V1_Plan.csv",
+        sep=";",
+        decimal=",",
+        encoding="latin-1",
+        dtype={"Betrieb Nr": str, "Bereich": str, "Zeile": str},
+    )
+    df1["Wert"] = df1["Gesamt"]
+    df1 = df1[["Jahr", "Betrieb Nr", "Vstufe 1", "Bereich", "Zeile", "Konto", "Version", "Wert"]]
+
+    df2 = pd.read_csv(
+        base_dir / f"data/Planner_{year}_V1_Stk.csv",
+        sep=";",
+        decimal=",",
+        encoding="latin-1",
+        dtype={"Betrieb Nr": str, "Bereich": str, "Zeile": str},
+    )
+    df2["Menge"] = df2["Gesamt"]
+    df2["Vstufe 1"] = "Umsatzerlöse"
+    df2 = df2[["Jahr", "Betrieb Nr", "Vstufe 1", "Bereich", "Zeile", "Konto", "Version", "Menge"]]
+    df = pd.merge(df1, df2, how="left", on=["Jahr", "Betrieb Nr", "Vstufe 1", "Bereich", "Zeile", "Konto", "Version"])
+    df["Menge"] = df["Menge"].fillna(0)
+    df["Wert"] = df["Wert"].fillna(0)
+    df["Wert"] = np.where(df["Vstufe 1"] == "Umsatzerlöse", df["Wert"], df["Wert"] * -1)
+
+    df["Bereich"] = df["Bereich"].fillna("NA").replace("VW (inkl. GF)", "?")
+    df["regex"] = df["Vstufe 1"] + ";" + df["Bereich"] + ";.*" + df["Zeile"] + " - [^;]*;;"
+    df = df[df["Wert"] != 0]
+
+    gcstruct = json.load(open(base_dir / "export/gcstruct.json", "r"))
+    structure_ids = [s["id"] for s in gcstruct["flat"]["Struktur_FB"]]
+
+    df["id"] = df["regex"].apply(lambda x: (list(filter(lambda y: match(x, y), structure_ids)) + [""])[0])
+    df = df[df["id"] != ""]
 
     res = {}
-    for item in df.to_dict(orient='records'):
-        if item['id'] not in res:
-            res[item['id']] = {}
-        res[item['id']][item['Betrieb Nr']] = [item['Wert'], item['Menge']]
-    data = {'values': res}
+    for item in df.to_dict(orient="records"):
+        if item["id"] not in res:
+            res[item["id"]] = {}
+        res[item["id"]][item["Betrieb Nr"]] = [item["Wert"], item["Menge"]]
+    data = {"values": res}
     next_year = str(int(year) + 1)
-    json.dump(data, open(base_dir / f'export/planning_{next_year}.json', 'w'), indent=2)
+    json.dump(data, open(base_dir / f"export/planning_{next_year}.json", "w"), indent=2)
 
 
 def planning_new(filename):
-    with open('planner/export/' + filename, 'r') as frh:
+    with open("planner/export/" + filename, "r") as frh:
         structure = json.load(frh)
     year = str(int(filename[:4]) + 1)
     result = {}
     for s in structure:
-        if len(s['accounts']) == 0:
+        if len(s["accounts"]) == 0:
             continue
-        result[s['id']] = dict([(k, [v[10], v[5]]) for k, v in s['values2'].items()])
+        result[s["id"]] = dict([(k, [v[10], v[5]]) for k, v in s["values2"].items()])
 
-    with open(f"planner/export/planning_{year}.json", 'w') as fwh:
-        json.dump({'values': result}, fwh, indent=2)
+    with open(f"planner/export/planning_{year}.json", "w") as fwh:
+        json.dump({"values": result}, fwh, indent=2)
 
 
-if __name__ == '__main__':
-    actuals('202310')
-    planning_prev('2023')
+if __name__ == "__main__":
+    actuals("202310")
+    planning_prev("2023")
     # plac.call(actuals)
     # planning_new('2022_V2_20220407150009.json')

+ 52 - 63
webservice/file_io.py

@@ -1,80 +1,75 @@
-import os
+# import os
 from pathlib import Path
 from flask import Blueprint, Response, request
 from flask_cors import CORS
 from datetime import datetime
 import json
 import hashlib
-import time
+
+# import time
 from webservice.auth import Auth
 from webservice.planner_load import PlannerLoad
 from webservice.config_load import ConfigLoad
 
-bp = Blueprint('file_io', __name__)
+bp = Blueprint("file_io", __name__)
 # cors = CORS(app, resources={r"/*": {"origins": "http://localhost:4200/"}})
 CORS(bp)
 
 script_dir = Path(__file__).parent
-save_dir = script_dir.parent.joinpath('save')
-planner_dir = script_dir.parent.joinpath('export')
-config_dir = script_dir.parent.joinpath('config')
+save_dir = script_dir.parent.joinpath("save")
+planner_dir = script_dir.parent.joinpath("export")
+config_dir = script_dir.parent.joinpath("config")
 # save_dir = Path('C:\\Projekte\\Python\\Planung\\save')
 user_info = None
-timestamp_keywords = ('accounts', 'plan', 'marketing')
+timestamp_keywords = ("accounts", "plan", "marketing")
 
 
-@bp.route('/login/', methods=['POST'])
+@bp.route("/login/", methods=["POST"])
 def login():
-    user = request.get_json()['data']['user']
-    password = request.get_json()['data']['password']
+    user = request.get_json()["data"]["user"]
+    password = request.get_json()["data"]["password"]
     user_info = Auth().get_user(user, password)
 
-    return Response(
-        response=json.dumps(user_info),
-        mimetype='application/json'
-    )
+    return Response(response=json.dumps(user_info), mimetype="application/json")
 
 
-@bp.route('/list', methods=['GET'])
+@bp.route("/list", methods=["GET"])
 def list_json():
     return json.dumps(list_dict(), indent=2)
 
 
 def list_dict():
-    result = {'list': [], 'tree': {}}
+    result = {"list": [], "tree": {}}
     for currentFile in save_dir.iterdir():
-        if currentFile.is_file() and currentFile.name[-5:] == '.json':
+        if currentFile.is_file() and currentFile.name[-5:] == ".json":
             year, version, timestamp = currentFile.name[:-5].split("_")
-            if year not in result['tree']:
-                result['tree'][year] = {}
-            if version not in result['tree'][year]:
-                result['tree'][year][version] = []
-            result['tree'][year][version].append(timestamp)
-            result['list'].append({'year': year, 'version': version, 'timestamp': timestamp})
-            result['list'].sort(key=lambda x: x['timestamp'])
+            if year not in result["tree"]:
+                result["tree"][year] = {}
+            if version not in result["tree"][year]:
+                result["tree"][year][version] = []
+            result["tree"][year][version].append(timestamp)
+            result["list"].append({"year": year, "version": version, "timestamp": timestamp})
+            result["list"].sort(key=lambda x: x["timestamp"])
     return result
 
 
-@bp.route('/load/<year>/<version>/<timestamp>', methods=['GET'])
+@bp.route("/load/<year>/<version>/<timestamp>", methods=["GET"])
 def load(year, version, timestamp):
     file = full_filename(year, version, timestamp)
 
-    if timestamp == 'new':
-        return Response(
-            response=new_file(year),
-            mimetype='application/json'
-        )
+    if timestamp == "new":
+        return Response(response=new_file(year), mimetype="application/json")
 
-    if timestamp == 'current' or not Path(file).exists():
+    if timestamp == "current" or not Path(file).exists():
         file_list = list_dict()
-        timestamp2 = file_list['tree'][year][version][-1]
+        timestamp2 = file_list["tree"][year][version][-1]
         file = full_filename(year, version, timestamp2)
 
     print(file)
 
-    with open(file, 'r') as frh:
+    with open(file, "r") as frh:
         structure = json.loads(frh.read())
-        if 'options' not in structure[0]:
+        if "options" not in structure[0]:
             p_load = PlannerLoad(planner_dir)
             structure = p_load.convert_file(structure)
         elif timestamp in timestamp_keywords:
@@ -83,84 +78,78 @@ def load(year, version, timestamp):
             p_load.set_structure(structure)
             structure = p_load.update_values(timestamp)
 
-        return Response(
-            response=json.dumps(structure),
-            mimetype='application/json'
-        )
+        return Response(response=json.dumps(structure), mimetype="application/json")
 
 
 def full_filename(year, version, timestamp):
-    return f'{str(save_dir)}/{year}_{version}_{timestamp}.json'
+    return f"{str(save_dir)}/{year}_{version}_{timestamp}.json"
 
 
-@bp.route('/new/<year>', methods=['GET'])
+@bp.route("/new/<year>", methods=["GET"])
 def new_file(year):
     p_load = PlannerLoad(planner_dir)
     structure = p_load.new_file(year)
     return json.dumps(structure)
 
 
-@bp.route('/load/<filename>', methods=['GET'])
+@bp.route("/load/<filename>", methods=["GET"])
 def load_file(filename):
-    full_filename = f'{str(save_dir)}/{filename}.json'
+    full_filename = f"{str(save_dir)}/{filename}.json"
     if not Path(full_filename).exists():
         file_list = list_dict()
         timestamp = file_list[filename][-1]
-        full_filename = f'{full_filename[:-5]}_{timestamp}.json'
+        full_filename = f"{full_filename[:-5]}_{timestamp}.json"
 
     print(full_filename)
-    with open(full_filename, 'r') as frh:
+    with open(full_filename, "r") as frh:
         return frh.read()
 
 
-@bp.route('/save/<year>/<version>', methods=['POST'])
+@bp.route("/save/<year>/<version>", methods=["POST"])
 def save_version(year, version):
-    return save(year + '_' + version)
+    return save(year + "_" + version)
 
 
-@bp.route('/save/<filename>', methods=['POST'])
+@bp.route("/save/<filename>", methods=["POST"])
 def save(filename):
-    if request.method != 'POST':
-        return 'File is missing!'
+    if request.method != "POST":
+        return "File is missing!"
 
-    new_filename = str(save_dir) + '/' + filename + '_' + datetime.now().strftime('%Y%m%d%H%M%S') + '.json'
-    data = request.get_json()['data']
-    with open(new_filename, 'w') as fwh:
+    new_filename = str(save_dir) + "/" + filename + "_" + datetime.now().strftime("%Y%m%d%H%M%S") + ".json"
+    data = request.get_json()["data"]
+    with open(new_filename, "w") as fwh:
         json.dump(data, fwh, indent=2)
     print(new_filename)
 
     try:
         file_list = list_dict()
         timestamp = file_list[filename][-2]
-        old_filename = str(save_dir) + '/' + filename + "_" + timestamp + '.json'
+        old_filename = str(save_dir) + "/" + filename + "_" + timestamp + ".json"
 
         if old_filename != new_filename and hash(old_filename) == hash(new_filename):
             Path(old_filename).unlink()
-            return 'File saved with no changes!'
+            return "File saved with no changes!"
     except KeyError:
         pass
-    return 'File saved with new data!'
+    return "File saved with new data!"
 
 
-@bp.route('/config', methods=['GET'])
+@bp.route("/config", methods=["GET"])
 def config():
     cfg = ConfigLoad(str(config_dir))
-    return Response(
-        response=json.dumps(cfg.load_file('reisacher', '2023')),
-        mimetype='application/json'
-    )
+    return Response(response=json.dumps(cfg.load_file("reisacher", "2023")), mimetype="application/json")
 
 
-@bp.route('/accounts/<period>', methods=['GET'])
+@bp.route("/accounts/<period>", methods=["GET"])
 def accounts(period):
-    with open(planner_dir.joinpath(f'accounts_{period}.json'), 'r') as frh:
+    with open(planner_dir.joinpath(f"accounts_{period}.json"), "r") as frh:
         return frh.read()
 
 
 def hash(filename):
     BUF_SIZE = 65536
     sha1 = hashlib.sha1()
-    with open(filename, 'rb') as g:
+    with open(filename, "rb") as g:
         while True:
             hashdata = g.read(BUF_SIZE)
             if not hashdata:

+ 475 - 308
webservice/gcstruct.py

@@ -5,6 +5,7 @@ import json
 import csv
 import re
 import chevron
+
 # from shutil import copyfile
 from bs4 import BeautifulSoup
 from functools import reduce
@@ -12,202 +13,239 @@ from pathlib import Path
 
 
 def get_flat(node):
-    result = [{
-        'id': node['id'],
-        'text': node['text'],
-        'children': [x['id'] for x in node['children']],
-        'children2': [],
-        'parents': node['parents'],
-        'accounts': node['accounts'],
-        'costcenter': '',
-        'level': node['level'],
-        'drilldown': node['level'] < 2,    # (node['level'] != 2 and len(node['accounts']) == 0),
-        'form': node['form'],
-        'accountlevel': False,
-        'absolute': True,
-        'seasonal': True,
-        'status': "0",
-        'values': [],
-        'values2': {}
-    }]
-    for child in node['children']:
+    result = [
+        {
+            "id": node["id"],
+            "text": node["text"],
+            "children": [x["id"] for x in node["children"]],
+            "children2": [],
+            "parents": node["parents"],
+            "accounts": node["accounts"],
+            "costcenter": "",
+            "level": node["level"],
+            "drilldown": node["level"] < 2,  # (node['level'] != 2 and len(node['accounts']) == 0),
+            "form": node["form"],
+            "accountlevel": False,
+            "absolute": True,
+            "seasonal": True,
+            "status": "0",
+            "values": [],
+            "values2": {},
+        }
+    ]
+    for child in node["children"]:
         result += get_flat(child)
     return result
 
 
 def get_parents_list(p_list):
-    id = ';'.join(p_list) + ';' * (10 - len(p_list))
+    id = ";".join(p_list) + ";" * (10 - len(p_list))
     if len(p_list) > 0:
         return [id] + get_parents_list(p_list[:-1])
-    return [';' * 9]
+    return [";" * 9]
 
 
 def structure_from_tree(node):
     result = []
-    result.append(node['id'])
-    for child in node['children']:
+    result.append(node["id"])
+    for child in node["children"]:
         result.extend(structure_from_tree(child))
     return result
 
 
 def xml_from_tree(xml_node, tree_node):
-    for child in tree_node['children']:
-        element = ET.SubElement(xml_node, 'Ebene')
-        element.set("Name", child['text'])
+    for child in tree_node["children"]:
+        element = ET.SubElement(xml_node, "Ebene")
+        element.set("Name", child["text"])
         xml_from_tree(element, child)
 
 
 def split_it(text, index):
     try:
-        return re.findall(r'([^;]+) - ([^;]*);;', text)[0][index]
+        return re.findall(r"([^;]+) - ([^;]*);;", text)[0][index]
     except Exception:
-        return ''
+        return ""
 
 
 def last_layer(text):
     try:
-        return re.findall(r'([^;]+);;', text)[0]
+        return re.findall(r"([^;]+);;", text)[0]
     except Exception:
-        return ''
+        return ""
 
 
 def get_default_cols(i):
-    return ['Ebene' + str(i) for i in range(i * 10 + 1, (i + 1) * 10 + 1)]
+    return ["Ebene" + str(i) for i in range(i * 10 + 1, (i + 1) * 10 + 1)]
 
 
 def get_structure_exports(s):
     result = {
-        'files': {},
-        'format': {
-            'KontoFormat': '{0} - {1}',
-            'HerstellerkontoFormat': '{{Herstellerkonto_Nr}}',
-            'HerstellerBezeichnungFormat': '{{Herstellerkonto_Bez}}',
-            'NeueHerstellerkontenAnlegen': False
-        }
+        "files": {},
+        "format": {
+            "KontoFormat": "{0} - {1}",
+            "HerstellerkontoFormat": "{{Herstellerkonto_Nr}}",
+            "HerstellerBezeichnungFormat": "{{Herstellerkonto_Bez}}",
+            "NeueHerstellerkontenAnlegen": False,
+        },
     }
-    export_files = ['ExportStk', 'ExportStrukturenStk', 'ExportAdjazenz', 'ExportUebersetzung',
-                    'ExportUebersetzungStk', 'ExportHerstellerKontenrahmen']
-    export_format = ['KontoFormat', 'HerstellerkontoFormat', 'HerstellerBezeichnungFormat', 'NeueHerstellerkontenAnlegen']
+    export_files = [
+        "ExportStk",
+        "ExportStrukturenStk",
+        "ExportAdjazenz",
+        "ExportUebersetzung",
+        "ExportUebersetzungStk",
+        "ExportHerstellerKontenrahmen",
+    ]
+    export_format = [
+        "KontoFormat",
+        "HerstellerkontoFormat",
+        "HerstellerBezeichnungFormat",
+        "NeueHerstellerkontenAnlegen",
+    ]
     for e in export_files:
-        if s.find(e) is not None and s.find(e).text is not None and s.find(e).text[-4:] == '.csv':
-            result['files'][e] = s.find(e).text
+        if s.find(e) is not None and s.find(e).text is not None and s.find(e).text[-4:] == ".csv":
+            result["files"][e] = s.find(e).text
     for e in export_format:
-        if s.find(e) is not None and s.find(e).text != '':
-            result['format'][e] = s.find(e).text
+        if s.find(e) is not None and s.find(e).text != "":
+            result["format"][e] = s.find(e).text
 
-    result['format']['NeueHerstellerkontenAnlegen'] = (result['format']['NeueHerstellerkontenAnlegen'] == 'true')
+    result["format"]["NeueHerstellerkontenAnlegen"] = result["format"]["NeueHerstellerkontenAnlegen"] == "true"
     return result
 
 
-class GCStruct():
+class GCStruct:
     config = {
-        'path': 'c:/projekte/python/gcstruct',
-        'path2': 'c:/projekte/python/gcstruct',
-        'file': 'c:/projekte/python/gcstruct/config/config.xml',
-        'output': 'gcstruct.json',
-        'default': [],
-        'special': {},
-        'special2': {
-            'Planner': ['Kostenstelle', 'Ebene1', 'Ebene2'],
-            'Test': ['Ebene1', 'Ebene2']
-        },
-        'columns': ['Konto_Nr', 'Konto_Bezeichnung', 'Konto_Art', 'Konto_KST', 'Konto_STK',
-                    'Konto_1', 'Konto_2', 'Konto_3', 'Konto_4', 'Konto_5'],
-        'struct': {},
-        'export': {}
+        "path": "c:/projekte/python/gcstruct",
+        "path2": "c:/projekte/python/gcstruct",
+        "file": "c:/projekte/python/gcstruct/config/config.xml",
+        "output": "gcstruct.json",
+        "default": [],
+        "special": {},
+        "special2": {"Planner": ["Kostenstelle", "Ebene1", "Ebene2"], "Test": ["Ebene1", "Ebene2"]},
+        "columns": [
+            "Konto_Nr",
+            "Konto_Bezeichnung",
+            "Konto_Art",
+            "Konto_KST",
+            "Konto_STK",
+            "Konto_1",
+            "Konto_2",
+            "Konto_3",
+            "Konto_4",
+            "Konto_5",
+        ],
+        "struct": {},
+        "export": {},
     }
 
-    json_result = {'accounts': {}, 'tree': {}, 'flat': {}, 'struct_export': {}, 'skr51_vars': {}}
+    json_result = {"accounts": {}, "tree": {}, "flat": {}, "struct_export": {}, "skr51_vars": {}}
     structure_ids = []
 
-    translate = {'Konto_Nr': 'SKR51', 'Kostenstelle': 'KST', 'Absatzkanal': 'ABS',
-                 'Kostenträger': 'KTR', 'Marke': 'MAR', 'Standort': 'STA', 'Marke_HBV': 'MAR', 'Standort_HBV': 'BMC'}
+    translate = {
+        "Konto_Nr": "SKR51",
+        "Kostenstelle": "KST",
+        "Absatzkanal": "ABS",
+        "Kostenträger": "KTR",
+        "Marke": "MAR",
+        "Standort": "STA",
+        "Marke_HBV": "MAR",
+        "Standort_HBV": "BMC",
+    }
 
     def __init__(self, struct_dir, export_dir=None):
-        self.config['path'] = struct_dir
-        self.config['path2'] = struct_dir + '/export' if export_dir is None else export_dir
-
-        self.config['file'] = f"{self.config['path']}/config/gcstruct.xml"
-        if not Path(self.config['file']).exists():
-            self.config['file'] = f"{self.config['path']}/config/config.xml"
-
-        cfg = ET.parse(self.config['file'])
-        self.config['default'] = [s.find('Name').text for s in cfg.getroot().find('Strukturdefinitionen').findall('Struktur')]
-        self.config['export'] = dict([(s.find('Name').text, get_structure_exports(s)) for s in
-                                     cfg.getroot().find('Strukturdefinitionen').findall('Struktur')])
-
-        struct = dict([(x, get_default_cols(i)) for (i, x) in enumerate(self.config['default'])])
-        struct.update(self.config['special'])
-        self.config['struct'] = struct
+        self.config["path"] = struct_dir
+        self.config["path2"] = struct_dir + "/export" if export_dir is None else export_dir
+
+        self.config["file"] = f"{self.config['path']}/config/gcstruct.xml"
+        if not Path(self.config["file"]).exists():
+            self.config["file"] = f"{self.config['path']}/config/config.xml"
+
+        cfg = ET.parse(self.config["file"])
+        self.config["default"] = [
+            s.find("Name").text for s in cfg.getroot().find("Strukturdefinitionen").findall("Struktur")
+        ]
+        self.config["export"] = dict(
+            [
+                (s.find("Name").text, get_structure_exports(s))
+                for s in cfg.getroot().find("Strukturdefinitionen").findall("Struktur")
+            ]
+        )
+
+        struct = dict([(x, get_default_cols(i)) for (i, x) in enumerate(self.config["default"])])
+        struct.update(self.config["special"])
+        self.config["struct"] = struct
         # print(self.config['struct'])
 
     def export_header(self, filetype):
         return {
-            'ExportStk': [],
-            'ExportStrukturenStk': [],
-            'ExportAdjazenz': [],
-            'ExportUebersetzung': ['Konto_Nr_Hersteller', 'Konto_Nr_Split', 'Konto_Nr_Haendler', 'Info'],
-            'ExportUebersetzungStk': ['Konto_Nr_Hersteller', 'Konto_Nr_Split', 'Konto_Nr_Haendler', 'Info'],
-            'ExportHerstellerKontenrahmen': ['Konto_Nr', 'Konto_Bezeichnung', 'Case', 'Info']
+            "ExportStk": [],
+            "ExportStrukturenStk": [],
+            "ExportAdjazenz": [],
+            "ExportUebersetzung": ["Konto_Nr_Hersteller", "Konto_Nr_Split", "Konto_Nr_Haendler", "Info"],
+            "ExportUebersetzungStk": ["Konto_Nr_Hersteller", "Konto_Nr_Split", "Konto_Nr_Haendler", "Info"],
+            "ExportHerstellerKontenrahmen": ["Konto_Nr", "Konto_Bezeichnung", "Case", "Info"],
         }[filetype]
 
     def accounts_from_csv(self, struct):
-        max_rows = (len(self.config['default']) + 1) * 10
-        with open(f"{self.config['path']}/Kontenrahmen/Kontenrahmen.csv", 'r', encoding='latin-1') as f:
-            csv_reader = csv.reader(f, delimiter=';')
+        max_rows = (len(self.config["default"]) + 1) * 10
+        with open(f"{self.config['path']}/Kontenrahmen/Kontenrahmen.csv", "r", encoding="latin-1") as f:
+            csv_reader = csv.reader(f, delimiter=";")
             imported_csv = [row[:max_rows] for row in csv_reader]
 
-        df = pd.DataFrame.from_records(np.array(imported_csv[1:], dtype='object'), columns=imported_csv[0]).fillna(value='')
-        df = df.rename(columns={'Kostenstelle': 'Konto_KST', 'STK': 'Konto_STK'})
+        df = pd.DataFrame.from_records(np.array(imported_csv[1:], dtype="object"), columns=imported_csv[0]).fillna(
+            value=""
+        )
+        df = df.rename(columns={"Kostenstelle": "Konto_KST", "STK": "Konto_STK"})
 
         for i, (s, cols) in enumerate(struct.items()):
-            df[s] = reduce(lambda x, y: x + ";" + df[y], cols, '')
+            df[s] = reduce(lambda x, y: x + ";" + df[y], cols, "")
             df[s] = df[s].apply(lambda x: x[1:])
-            df['LetzteEbene' + str(i + 1)] = df[s].apply(lambda x: last_layer(x))
-            df['LetzteEbene' + str(i + 1) + '_Nr'] = df[s].apply(lambda x: split_it(x, 0))
-            df['LetzteEbene' + str(i + 1) + '_Bez'] = df[s].apply(lambda x: split_it(x, 1))
-        df['Herstellerkonto_Nr'] = df['LetzteEbene1_Nr']
-        df['Herstellerkonto_Bez'] = df['LetzteEbene1_Bez']
+            df["LetzteEbene" + str(i + 1)] = df[s].apply(lambda x: last_layer(x))
+            df["LetzteEbene" + str(i + 1) + "_Nr"] = df[s].apply(lambda x: split_it(x, 0))
+            df["LetzteEbene" + str(i + 1) + "_Bez"] = df[s].apply(lambda x: split_it(x, 1))
+        df["Herstellerkonto_Nr"] = df["LetzteEbene1_Nr"]
+        df["Herstellerkonto_Bez"] = df["LetzteEbene1_Bez"]
         return df
 
     def tree_from_xml(self, struct, df):
         result = {}
-        for (s, cols) in struct.items():
+        for s, cols in struct.items():
             try:
                 tree = ET.parse(f"{self.config['path']}/Xml/{s}.xml")
                 result[s] = self.get_tree_root(tree.getroot(), s)
 
             except FileNotFoundError:
-                print('XML-Datei fehlt')
+                print("XML-Datei fehlt")
                 used_entries = [x.split(";")[1:] for x in set(df[s].to_numpy())]
                 print(used_entries)
-                root = ET.Element('Ebene')
-                root.set('Name', s)
+                root = ET.Element("Ebene")
+                root.set("Name", s)
                 result[s] = self.get_tree_root(root, s)
                 # self.json_result["tree"][s] = get_tree_from_accounts(cols, [])
         return result
 
     def get_structure_and_tree(self):
-        df = self.accounts_from_csv(self.config['struct'])
-        self.json_result['accounts'] = df.to_dict('records')
-        self.structure_ids = df.melt(id_vars=['Konto_Nr'], value_vars=self.config['struct'].keys(),
-                                     var_name='Struktur', value_name='id').groupby(by=['Struktur', 'id'])
-        self.json_result['tree'] = self.tree_from_xml(self.config['struct'], df)
+        df = self.accounts_from_csv(self.config["struct"])
+        self.json_result["accounts"] = df.to_dict("records")
+        self.structure_ids = df.melt(
+            id_vars=["Konto_Nr"], value_vars=self.config["struct"].keys(), var_name="Struktur", value_name="id"
+        ).groupby(by=["Struktur", "id"])
+        self.json_result["tree"] = self.tree_from_xml(self.config["struct"], df)
 
-        for (s, cols) in self.config['struct'].items():
-            self.json_result['flat'][s] = get_flat(self.json_result['tree'][s])
+        for s, cols in self.config["struct"].items():
+            self.json_result["flat"][s] = get_flat(self.json_result["tree"][s])
 
-        for (s, entries) in self.json_result['flat'].items():
-            cols = self.config['struct'][s]
-            df_temp = pd.DataFrame([x['id'].split(';') for x in entries], columns=cols)
-            self.json_result['struct_export'][s] = df_temp.to_dict(orient='records')
+        for s, entries in self.json_result["flat"].items():
+            cols = self.config["struct"][s]
+            df_temp = pd.DataFrame([x["id"].split(";") for x in entries], columns=cols)
+            self.json_result["struct_export"][s] = df_temp.to_dict(orient="records")
 
         # {'accounts': {}, 'tree': {}, 'flat': {}, 'struct_export': {}, 'skr51_vars': {}}
-        json.dump(self.json_result, open(f"{self.config['path2']}/{self.config['output']}", 'w'), indent=2)
+        json.dump(self.json_result, open(f"{self.config['path2']}/{self.config['output']}", "w"), indent=2)
         return self.json_result
 
     def get_accounts(self, structure, id):
-        return [x['Konto_Nr'] for x in self.json_result['accounts'] if x[structure] == id]
+        return [x["Konto_Nr"] for x in self.json_result["accounts"] if x[structure] == id]
         # return []
         # res = self.structure_ids.groups.get((structure, id))
         # if res is None:
@@ -215,307 +253,436 @@ class GCStruct():
         # return res.values
 
     def export(self):
-        for s in self.config['export'].keys():
-            for (filetype, filename) in self.config['export'][s]['files'].items():
-                with open(self.config['path2'] + '/' + filename, 'w') as fwh:
-                    fwh.write('Konto_Nr_Hersteller;Konto_Nr_Split;Konto_Nr_Haendler;Info\n')
+        for s in self.config["export"].keys():
+            for filetype, filename in self.config["export"][s]["files"].items():
+                with open(self.config["path2"] + "/" + filename, "w") as fwh:
+                    fwh.write("Konto_Nr_Hersteller;Konto_Nr_Split;Konto_Nr_Haendler;Info\n")
                     # 'Hersteller'Konto_Nr;Konto_Bezeichnung;Case;Info'
-                    for a in self.json_result['accounts']:
-                        if a['Herstellerkonto_Nr'] != '':
-                            account = chevron.render(self.config['export']['SKR51']['format']['HerstellerkontoFormat'], a)
-                            fwh.write(account + ';' + account + ';' + a['Konto_Nr'] + ';' + '\n')   # a['Herstellerkonto_Bez']
+                    for a in self.json_result["accounts"]:
+                        if a["Herstellerkonto_Nr"] != "":
+                            account = chevron.render(
+                                self.config["export"]["SKR51"]["format"]["HerstellerkontoFormat"], a
+                            )
+                            fwh.write(
+                                account + ";" + account + ";" + a["Konto_Nr"] + ";" + "\n"
+                            )  # a['Herstellerkonto_Bez']
 
     def get_tree(self, node, parents, structure):
         result = []
         for child in node:
             p = get_parents_list(parents)
-            parents.append(child.attrib['Name'])
-            id = ';'.join(parents) + ';' * (10 - len(parents))
-            result.append({
-                'id': id,
-                'text': child.attrib['Name'],
-                'children': self.get_tree(child, parents, structure),
-                'parents': p,
-                'accounts': self.get_accounts(structure, id),
-                'level': len(parents),
-                'form': child.attrib.get('Split', '')
-            })
+            parents.append(child.attrib["Name"])
+            id = ";".join(parents) + ";" * (10 - len(parents))
+            result.append(
+                {
+                    "id": id,
+                    "text": child.attrib["Name"],
+                    "children": self.get_tree(child, parents, structure),
+                    "parents": p,
+                    "accounts": self.get_accounts(structure, id),
+                    "level": len(parents),
+                    "form": child.attrib.get("Split", ""),
+                }
+            )
             parents.pop()
         return result
 
     def get_tree_root(self, node, structure):
-        id = ';' * 9
+        id = ";" * 9
         return {
-            'id': id,
-            'text': node.attrib['Name'],
-            'children': self.get_tree(node, [], structure),
-            'parents': [],
-            'accounts': [],
-            'level': 0,
-            'form': ''
+            "id": id,
+            "text": node.attrib["Name"],
+            "children": self.get_tree(node, [], structure),
+            "parents": [],
+            "accounts": [],
+            "level": 0,
+            "form": "",
         }
 
     def post_structure_and_tree(self):
-        json_post = json.load(open(f"{self.config['path']}/{self.config['output']}", 'r'))
+        json_post = json.load(open(f"{self.config['path']}/{self.config['output']}", "r"))
 
         # Kontenrahmen.csv
-        ebenen = ['Ebene' + str(i) for i in range(1, len(self.config['default']) * 10 + 1)]
-        header = ';'.join(self.config['columns'] + ebenen)
-        cols = self.config['columns'] + self.config['default']
-        with open(self.config['path'] + '/Kontenrahmen/Kontenrahmen_out.csv', 'w', encoding='latin-1') as f:
-            f.write(header + '\n')
-            for row in json_post['Kontenrahmen']:
-                f.write(';'.join([row[e] for e in cols]) + '\n')
+        ebenen = ["Ebene" + str(i) for i in range(1, len(self.config["default"]) * 10 + 1)]
+        header = ";".join(self.config["columns"] + ebenen)
+        cols = self.config["columns"] + self.config["default"]
+        with open(self.config["path"] + "/Kontenrahmen/Kontenrahmen_out.csv", "w", encoding="latin-1") as f:
+            f.write(header + "\n")
+            for row in json_post["Kontenrahmen"]:
+                f.write(";".join([row[e] for e in cols]) + "\n")
         # print(header)
         # xml und evtl. Struktur.csv
-        for i, s in enumerate(self.config['default']):
-            with open(f"{self.config['path']}/Strukturen/Kontenrahmen.csv/{s}_out.csv", 'w', encoding='latin-1') as f:
-                f.write(';'.join(['Ebene' + str(i * 10 + j) for j in range(1, 11)]) + '\n')
-                rows = structure_from_tree({'id': ";" * 9, 'children': json_post[s]})
-                f.write('\n'.join(rows))
+        for i, s in enumerate(self.config["default"]):
+            with open(f"{self.config['path']}/Strukturen/Kontenrahmen.csv/{s}_out.csv", "w", encoding="latin-1") as f:
+                f.write(";".join(["Ebene" + str(i * 10 + j) for j in range(1, 11)]) + "\n")
+                rows = structure_from_tree({"id": ";" * 9, "children": json_post[s]})
+                f.write("\n".join(rows))
 
-            # with open(self.config['path'] + "/Strukturen/Kontenrahmen.csv/" + structure + "_2.csv", "w", encoding="latin-1") as f:
-            root = ET.Element('Ebene')
-            root.set('Name', s)
-            xml_from_tree(root, {'id': ";" * 9, 'children': json_post[s]})
+            # with open(self.config['path'] + "/Strukturen/Kontenrahmen.csv/" +
+            # structure + "_2.csv", "w", encoding="latin-1") as f:
+            root = ET.Element("Ebene")
+            root.set("Name", s)
+            xml_from_tree(root, {"id": ";" * 9, "children": json_post[s]})
 
-            with open(f"{self.config['path']}/Xml/{s}_out.xml", 'w', encoding='utf-8') as f:
-                f.write(BeautifulSoup(ET.tostring(root), 'xml').prettify())
+            with open(f"{self.config['path']}/Xml/{s}_out.xml", "w", encoding="utf-8") as f:
+                f.write(BeautifulSoup(ET.tostring(root), "xml").prettify())
 
     def skr51_translate(self, accounts_combined_files):
-        df = self.accounts_from_csv(self.config['struct'])
+        df = self.accounts_from_csv(self.config["struct"])
 
         df_translate = {}
         for i, (t_from, t_to) in enumerate(self.translate.items()):
-            last = 'LetzteEbene' + str(i + 1)
-            from_label = ['Konto_Nr', last, last + '_Nr', last + '_Bez', 'Ebene' + str(i * 10 + 1), 'Ebene' + str(i * 10 + 2)]
-            to_label = [t_to, t_to + '_Ebene', t_to + '_Nr', t_to + '_Bez', 'Ebene1', 'Ebene2']
-            df_translate[t_from] = df[df[last + '_Nr'] != ''][from_label].rename(columns=dict(zip(from_label, to_label)))
+            last = "LetzteEbene" + str(i + 1)
+            from_label = [
+                "Konto_Nr",
+                last,
+                last + "_Nr",
+                last + "_Bez",
+                "Ebene" + str(i * 10 + 1),
+                "Ebene" + str(i * 10 + 2),
+            ]
+            to_label = [t_to, t_to + "_Ebene", t_to + "_Nr", t_to + "_Bez", "Ebene1", "Ebene2"]
+            df_translate[t_from] = df[df[last + "_Nr"] != ""][from_label].rename(
+                columns=dict(zip(from_label, to_label))
+            )
             # print(df_translate[t_to].head())
 
         df2 = []
         for ac_file in accounts_combined_files:
-            df2.append(pd.read_csv(ac_file, decimal=',', sep=';', encoding='latin-1',
-                                   converters={i: str for i in range(0, 200)}))
+            df2.append(
+                pd.read_csv(
+                    ac_file, decimal=",", sep=";", encoding="latin-1", converters={i: str for i in range(0, 200)}
+                )
+            )
         df_source = pd.concat(df2)
         df3 = df_source.copy()
-        df3['Konto_Nr'] = df3['Konto_Nr'] + '_STK'
+        df3["Konto_Nr"] = df3["Konto_Nr"] + "_STK"
         df_source = pd.concat([df_source, df3])
 
         for t_from, t_to in self.translate.items():
-            if t_to == 'SKR51':
-                df_source['SKR51'] = df_source['Konto_Nr']
-            elif t_from in ['Marke_HBV']:
-                df_source['Marke_HBV'] = df_source['Marke']
-            elif t_from in ['Standort_HBV']:
-                df_source['Standort_HBV'] = df_source['Standort'] + '_' + df_source['Marke']
-                df_source['BMC'] = 'BMC_' + df_source['Standort_HBV']
-            elif t_to == 'KTR':
-                df_source['KTR'] = np.where(df_source['Kostenträger_Quelle'] == 'TZ', 'KTR_TZ_' + df_source['Kostenträger'], 'KTR_00')
-                df_source['KTR'] = np.where(df_source['Kostenträger_Quelle'].isin(['NW', 'SC']), 'KTR_' + df_source['Kostenträger_Quelle'] +
-                                            '_' + df_source['Marke'] + '_' + df_source['Kostenträger'], df_source['KTR'])
+            if t_to == "SKR51":
+                df_source["SKR51"] = df_source["Konto_Nr"]
+            elif t_from in ["Marke_HBV"]:
+                df_source["Marke_HBV"] = df_source["Marke"]
+            elif t_from in ["Standort_HBV"]:
+                df_source["Standort_HBV"] = df_source["Standort"] + "_" + df_source["Marke"]
+                df_source["BMC"] = "BMC_" + df_source["Standort_HBV"]
+            elif t_to == "KTR":
+                df_source["KTR"] = np.where(
+                    df_source["Kostenträger_Quelle"] == "TZ", "KTR_TZ_" + df_source["Kostenträger"], "KTR_00"
+                )
+                df_source["KTR"] = np.where(
+                    df_source["Kostenträger_Quelle"].isin(["NW", "SC"]),
+                    "KTR_"
+                    + df_source["Kostenträger_Quelle"]
+                    + "_"
+                    + df_source["Marke"]
+                    + "_"
+                    + df_source["Kostenträger"],
+                    df_source["KTR"],
+                )
             else:
-                df_source[t_to] = t_to + '_' + df_source[t_from]
-            df_source = df_source.merge(df_translate[t_from], how='left', on=[t_to], suffixes=(None, '_' + t_to))
-            df_source[t_to + '_Nr'] = np.where(df_source[t_to + '_Nr'].isna(), df_source[t_from], df_source[t_to + '_Nr'])
-
-        df_source['Konto_Nr_SKR51'] = df_source['MAR_Nr'] + '-' + df_source['STA_Nr'] + '-' + df_source['SKR51_Nr'] + '-' + \
-            df_source['KST_Nr'] + '-' + df_source['ABS_Nr'] + '-' + df_source['KTR_Nr']
-        df_source['Konto_Nr_Händler'] = df_source['Marke'] + '-' + df_source['Standort'] + '-' + df_source['Konto_Nr'] + '-' + \
-            df_source['Kostenstelle'] + '-' + df_source['Absatzkanal'] + '-' + df_source['Kostenträger']
+                df_source[t_to] = t_to + "_" + df_source[t_from]
+            df_source = df_source.merge(df_translate[t_from], how="left", on=[t_to], suffixes=(None, "_" + t_to))
+            df_source[t_to + "_Nr"] = np.where(
+                df_source[t_to + "_Nr"].isna(), df_source[t_from], df_source[t_to + "_Nr"]
+            )
+
+        df_source["Konto_Nr_SKR51"] = (
+            df_source["MAR_Nr"]
+            + "-"
+            + df_source["STA_Nr"]
+            + "-"
+            + df_source["SKR51_Nr"]
+            + "-"
+            + df_source["KST_Nr"]
+            + "-"
+            + df_source["ABS_Nr"]
+            + "-"
+            + df_source["KTR_Nr"]
+        )
+        df_source["Konto_Nr_Händler"] = (
+            df_source["Marke"]
+            + "-"
+            + df_source["Standort"]
+            + "-"
+            + df_source["Konto_Nr"]
+            + "-"
+            + df_source["Kostenstelle"]
+            + "-"
+            + df_source["Absatzkanal"]
+            + "-"
+            + df_source["Kostenträger"]
+        )
         # df_source.to_csv(f"{self.config['path2']}/SKR51_Uebersetzung.csv", sep=';', encoding='latin-1', index=False)
-        df_source['MAR_Nr_MAR'] = np.where(df_source['MAR_Nr_MAR'].isna(), '0000', df_source['MAR_Nr_MAR'])
-        from_label = ['MAR_Nr', 'STA_Nr', 'SKR51_Nr', 'KST_Nr', 'ABS_Nr', 'KTR_Nr', 'KTR_Ebene', 'Konto_Nr_Händler',
-                      'Konto_Nr_SKR51', 'MAR_Nr_MAR', 'BMC_Nr']
-        to_label = ['Marke', 'Standort', 'Konto_Nr', 'Kostenstelle', 'Absatzkanal', 'Kostenträger',
-                    'Kostenträger_Ebene', 'Konto_Nr_Händler', 'Konto_Nr_SKR51', 'Marke_HBV', 'Standort_HBV']
+        df_source["MAR_Nr_MAR"] = np.where(df_source["MAR_Nr_MAR"].isna(), "0000", df_source["MAR_Nr_MAR"])
+        from_label = [
+            "MAR_Nr",
+            "STA_Nr",
+            "SKR51_Nr",
+            "KST_Nr",
+            "ABS_Nr",
+            "KTR_Nr",
+            "KTR_Ebene",
+            "Konto_Nr_Händler",
+            "Konto_Nr_SKR51",
+            "MAR_Nr_MAR",
+            "BMC_Nr",
+        ]
+        to_label = [
+            "Marke",
+            "Standort",
+            "Konto_Nr",
+            "Kostenstelle",
+            "Absatzkanal",
+            "Kostenträger",
+            "Kostenträger_Ebene",
+            "Konto_Nr_Händler",
+            "Konto_Nr_SKR51",
+            "Marke_HBV",
+            "Standort_HBV",
+        ]
         df_combined = df_source[from_label].rename(columns=dict(zip(from_label, to_label)))
-        df_combined.to_csv(f"{self.config['path2']}/Kontenrahmen_uebersetzt.csv", sep=';', encoding='latin-1', index=False)
+        df_combined.to_csv(
+            f"{self.config['path2']}/Kontenrahmen_uebersetzt.csv", sep=";", encoding="latin-1", index=False
+        )
 
     def skr51_translate2(self, accounts_combined_file):
-        df = self.accounts_from_csv(self.config['struct'])
+        df = self.accounts_from_csv(self.config["struct"])
 
         df_list = []
-        for i, s in enumerate(self.config['struct'].keys()):
-            from_label = ['Konto_Nr', 'Ebene' + str(i * 10 + 1), 'Ebene' + str(i * 10 + 2), 'Ebene' + str(i * 10 + 3)]
-            to_label = ['Konto_Nr', 'key', 'value', 'value2']
+        for i, s in enumerate(self.config["struct"].keys()):
+            from_label = ["Konto_Nr", "Ebene" + str(i * 10 + 1), "Ebene" + str(i * 10 + 2), "Ebene" + str(i * 10 + 3)]
+            to_label = ["Konto_Nr", "key", "value", "value2"]
             df_temp = df[from_label].rename(columns=dict(zip(from_label, to_label)))
-            df_temp['key'] = '{' + s + '/' + df_temp['key'] + '}'
-            df_list.append(df_temp[df_temp['value'] != ''])
+            df_temp["key"] = "{" + s + "/" + df_temp["key"] + "}"
+            df_list.append(df_temp[df_temp["value"] != ""])
         df_translate = pd.concat(df_list)
 
         # df_translate.to_csv(f"{self.config['path2']}/SKR51_Variablen.csv", sep=';', encoding='latin-1', index=False)
 
-        df_source = pd.read_csv(accounts_combined_file, decimal=',', sep=';', encoding='latin-1',
-                                converters={i: str for i in range(0, 200)})
-        df_source = df_source[df_source['Konto_Nr'].str.contains('_STK') == False]
-        df_source['Konto_Nr_Gesamt'] = df_source['Konto_Nr']
-        df_source['Konto_Nr'] = np.where(df_source['Konto_Nr'].str.contains(r'^[4578]'), df_source['Konto_Nr'] + '_' +
-                                         df_source['Kostenstelle'].str.slice(stop=1), df_source['Konto_Nr'])
-        df_source['Konto_Nr'] = np.where(df_source['Konto_Nr'].str.contains(r'^5\d+_4'), df_source['Konto_Nr'] +
-                                         df_source['Kostenstelle'].str.slice(start=1, stop=2), df_source['Konto_Nr'])
-
-        df_source = df_source.merge(df, how='left', on=['Konto_Nr'])
+        df_source = pd.read_csv(
+            accounts_combined_file, decimal=",", sep=";", encoding="latin-1", converters={i: str for i in range(0, 200)}
+        )
+        df_source = df_source[df_source["Konto_Nr"].str.contains("_STK") == False]
+        df_source["Konto_Nr_Gesamt"] = df_source["Konto_Nr"]
+        df_source["Konto_Nr"] = np.where(
+            df_source["Konto_Nr"].str.contains(r"^[4578]"),
+            df_source["Konto_Nr"] + "_" + df_source["Kostenstelle"].str.slice(stop=1),
+            df_source["Konto_Nr"],
+        )
+        df_source["Konto_Nr"] = np.where(
+            df_source["Konto_Nr"].str.contains(r"^5\d+_4"),
+            df_source["Konto_Nr"] + df_source["Kostenstelle"].str.slice(start=1, stop=2),
+            df_source["Konto_Nr"],
+        )
+
+        df_source = df_source.merge(df, how="left", on=["Konto_Nr"])
         # rows = df_source.shape[0]
-        df_source['value'] = ''
+        df_source["value"] = ""
 
         cols = get_default_cols(0)
         for t_from, t_to in self.translate.items():
-            if t_from in ['Marke_HBV', 'Standort_HBV']:
+            if t_from in ["Marke_HBV", "Standort_HBV"]:
                 continue
-            if t_from == 'Konto_Nr':
+            if t_from == "Konto_Nr":
                 df_source[t_to] = df_source[t_from]
             else:
-                df_source[t_to] = t_to + '_' + df_source[t_from]
+                df_source[t_to] = t_to + "_" + df_source[t_from]
             for e in cols:
-                df_source = df_source.merge(df_translate, how='left', left_on=[t_to, e], right_on=['Konto_Nr', 'key'],
-                                            suffixes=(None, '_' + t_to + '_' + e))
-                df_source[e] = np.where(df_source['value_' + t_to + '_' + e].notna(), df_source['value_' + t_to + '_' + e], df_source[e])
+                df_source = df_source.merge(
+                    df_translate,
+                    how="left",
+                    left_on=[t_to, e],
+                    right_on=["Konto_Nr", "key"],
+                    suffixes=(None, "_" + t_to + "_" + e),
+                )
+                df_source[e] = np.where(
+                    df_source["value_" + t_to + "_" + e].notna(), df_source["value_" + t_to + "_" + e], df_source[e]
+                )
                 # if df_source.shape[0] > rows:
                 #     print(t_to + '_' + e + ': ' + str(df_source.shape[0]))
         # df_source.to_csv(f"{self.config['path2']}/SKR51_Variablen2.csv", sep=';', encoding='latin-1', index=False)
         # df_source[t_to + '_Nr'] = np.where(df_source[t_to + '_Nr'].isna(), df_source[t_from], df_source[t_to + '_Nr'])
         for e in cols:
-            df_source[e] = np.where(df_source[e].str.startswith('{'), df_source[e].str.extract(r'\/(.*)}', expand=False) +
-                                    ' falsch', df_source[e])    # df_source[e].str.extract(r'/(.*)}') +
-            df_source[e] = np.where(df_source[e] == '[KTR]', df_source['Kostenträger_Ebene'], df_source[e])
+            df_source[e] = np.where(
+                df_source[e].str.startswith("{"),
+                df_source[e].str.extract(r"\/(.*)}", expand=False) + " falsch",
+                df_source[e],
+            )  # df_source[e].str.extract(r'/(.*)}') +
+            df_source[e] = np.where(df_source[e] == "[KTR]", df_source["Kostenträger_Ebene"], df_source[e])
         # df_all[df_all['Ebene1'] == ]
         # print(df_source.head())
-        df_source['Konto_neu'] = df_source['Marke'] + '-' + df_source['Standort'] + '-' + df_source['Konto_Nr'] + '-' + \
-            df_source['Kostenstelle'] + '-' + df_source['Absatzkanal'] + '-' + df_source['Kostenträger'] + ' - ' + \
-            df_source['Konto_Bezeichnung']
-        df_source['Ebene1_empty'] = df_source['Ebene1'].isna()    # , df_source['Ebene1'].map(lambda x: x == ''))
-        df_source['Konto_neu'] = np.where(df_source['Ebene1_empty'], 'keine Zuordnung', df_source['Konto_neu'])
-        df_source['Ebene1'] = np.where(df_source['Ebene1_empty'], 'keine Zuordnung', df_source['Ebene1'])
-        df_source['Konto_Gruppe'] = df_source['Konto_Nr'] + ' - ' + df_source['Konto_Bezeichnung']
-        df_source['Konto_Gruppe'] = np.where(df_source['Ebene1_empty'], 'keine Zuordnung', df_source['Konto_Gruppe'])
-        df_source['Konto_Gesamt'] = df_source['Konto_Nr_Gesamt'] + ' - ' + df_source['Konto_Bezeichnung']
-
-        df_amount = df_source[df_source['Ebene1'] == 'Umsatzerlöse'].reset_index()
-        df_amount['Ebene1'] = 'verkaufte Stückzahlen'
-        df_amount['Ebene72'] = 'verkaufte Stückzahlen'
-        df_amount['Konto_neu'] = 'STK ' + df_amount['Konto_neu']
-        df_amount['Konto_Nr_Händler'] = df_amount['Konto_Nr_Händler'] + '_STK'
-        df_amount['Konto_Gruppe'] = 'STK ' + df_amount['Konto_Gruppe']
-        df_amount['Konto_Gesamt'] = 'STK ' + df_amount['Konto_Gesamt']
+        df_source["Konto_neu"] = (
+            df_source["Marke"]
+            + "-"
+            + df_source["Standort"]
+            + "-"
+            + df_source["Konto_Nr"]
+            + "-"
+            + df_source["Kostenstelle"]
+            + "-"
+            + df_source["Absatzkanal"]
+            + "-"
+            + df_source["Kostenträger"]
+            + " - "
+            + df_source["Konto_Bezeichnung"]
+        )
+        df_source["Ebene1_empty"] = df_source["Ebene1"].isna()  # , df_source['Ebene1'].map(lambda x: x == ''))
+        df_source["Konto_neu"] = np.where(df_source["Ebene1_empty"], "keine Zuordnung", df_source["Konto_neu"])
+        df_source["Ebene1"] = np.where(df_source["Ebene1_empty"], "keine Zuordnung", df_source["Ebene1"])
+        df_source["Konto_Gruppe"] = df_source["Konto_Nr"] + " - " + df_source["Konto_Bezeichnung"]
+        df_source["Konto_Gruppe"] = np.where(df_source["Ebene1_empty"], "keine Zuordnung", df_source["Konto_Gruppe"])
+        df_source["Konto_Gesamt"] = df_source["Konto_Nr_Gesamt"] + " - " + df_source["Konto_Bezeichnung"]
+
+        df_amount = df_source[df_source["Ebene1"] == "Umsatzerlöse"].reset_index()
+        df_amount["Ebene1"] = "verkaufte Stückzahlen"
+        df_amount["Ebene72"] = "verkaufte Stückzahlen"
+        df_amount["Konto_neu"] = "STK " + df_amount["Konto_neu"]
+        df_amount["Konto_Nr_Händler"] = df_amount["Konto_Nr_Händler"] + "_STK"
+        df_amount["Konto_Gruppe"] = "STK " + df_amount["Konto_Gruppe"]
+        df_amount["Konto_Gesamt"] = "STK " + df_amount["Konto_Gesamt"]
 
         df_source = pd.concat([df_source, df_amount])
 
-        df_source['GuV'] = (df_source['Ebene71'] == 'GuV')
-        df_source['Ebene81'] = np.where(df_source['GuV'], df_source['Ebene72'], 'Bilanz')
-        df_source['Ebene82'] = np.where(df_source['GuV'], df_source['Ebene73'], '')
-        df_source['Ebene83'] = np.where(df_source['GuV'], df_source['Ebene74'], '')
-        df_source['Ebene84'] = np.where(df_source['GuV'], df_source['Ebene75'], '')
-        df_source['Ebene85'] = np.where(df_source['GuV'], df_source['Ebene76'], '')
-        df_source['Ebene86'] = np.where(df_source['GuV'], df_source['Ebene77'], '')
-        df_source['Ebene87'] = np.where(df_source['GuV'], df_source['Ebene78'], '')
-        df_source['Ebene88'] = np.where(df_source['GuV'], df_source['Ebene79'], '')
-        df_source['Ebene89'] = np.where(df_source['GuV'], df_source['Ebene80'], '')
-        df_source['Ebene90'] = ''
-        df_source['Ebene71'] = np.where(df_source['GuV'], 'GuV', df_source['Ebene72'])
-        df_source['Ebene72'] = np.where(df_source['GuV'], '', df_source['Ebene73'])
-        df_source['Ebene73'] = np.where(df_source['GuV'], '', df_source['Ebene74'])
-        df_source['Ebene74'] = np.where(df_source['GuV'], '', df_source['Ebene75'])
-        df_source['Ebene75'] = np.where(df_source['GuV'], '', df_source['Ebene76'])
-        df_source['Ebene76'] = np.where(df_source['GuV'], '', df_source['Ebene77'])
-        df_source['Ebene77'] = np.where(df_source['GuV'], '', df_source['Ebene78'])
-        df_source['Ebene78'] = np.where(df_source['GuV'], '', df_source['Ebene79'])
-        df_source['Ebene79'] = np.where(df_source['GuV'], '', df_source['Ebene80'])
-        df_source['Ebene80'] = ''
-        df_source['Susa'] = df_source['Konto_Gruppe'].str.slice(stop=1)
-        df_source['Konto_KST'] = ''
-        df_source['GuV_Bilanz'] = df_source['Konto_Art']
-
-        from_label = ['Konto_neu', 'Konto_Nr_Händler']
-        to_label = ['Konto', 'Acct_Nr']
+        df_source["GuV"] = df_source["Ebene71"] == "GuV"
+        df_source["Ebene81"] = np.where(df_source["GuV"], df_source["Ebene72"], "Bilanz")
+        df_source["Ebene82"] = np.where(df_source["GuV"], df_source["Ebene73"], "")
+        df_source["Ebene83"] = np.where(df_source["GuV"], df_source["Ebene74"], "")
+        df_source["Ebene84"] = np.where(df_source["GuV"], df_source["Ebene75"], "")
+        df_source["Ebene85"] = np.where(df_source["GuV"], df_source["Ebene76"], "")
+        df_source["Ebene86"] = np.where(df_source["GuV"], df_source["Ebene77"], "")
+        df_source["Ebene87"] = np.where(df_source["GuV"], df_source["Ebene78"], "")
+        df_source["Ebene88"] = np.where(df_source["GuV"], df_source["Ebene79"], "")
+        df_source["Ebene89"] = np.where(df_source["GuV"], df_source["Ebene80"], "")
+        df_source["Ebene90"] = ""
+        df_source["Ebene71"] = np.where(df_source["GuV"], "GuV", df_source["Ebene72"])
+        df_source["Ebene72"] = np.where(df_source["GuV"], "", df_source["Ebene73"])
+        df_source["Ebene73"] = np.where(df_source["GuV"], "", df_source["Ebene74"])
+        df_source["Ebene74"] = np.where(df_source["GuV"], "", df_source["Ebene75"])
+        df_source["Ebene75"] = np.where(df_source["GuV"], "", df_source["Ebene76"])
+        df_source["Ebene76"] = np.where(df_source["GuV"], "", df_source["Ebene77"])
+        df_source["Ebene77"] = np.where(df_source["GuV"], "", df_source["Ebene78"])
+        df_source["Ebene78"] = np.where(df_source["GuV"], "", df_source["Ebene79"])
+        df_source["Ebene79"] = np.where(df_source["GuV"], "", df_source["Ebene80"])
+        df_source["Ebene80"] = ""
+        df_source["Susa"] = df_source["Konto_Gruppe"].str.slice(stop=1)
+        df_source["Konto_KST"] = ""
+        df_source["GuV_Bilanz"] = df_source["Konto_Art"]
+
+        from_label = ["Konto_neu", "Konto_Nr_Händler"]
+        to_label = ["Konto", "Acct_Nr"]
 
         df_source = df_source.rename(columns=dict(zip(from_label, to_label)))
 
-        df_source = df_source[['Konto', 'Acct_Nr', 'Konto_Bezeichnung', 'GuV_Bilanz', 'Konto_KST', 'Konto_STK',
-                               'Konto_1', 'Konto_2', 'Konto_3', 'Konto_4', 'Konto_5'] +
-                              get_default_cols(0) + get_default_cols(7) + get_default_cols(8) +
-                              ['Konto_Gruppe', 'Konto_Nr_Gesamt', 'Konto_Gesamt', 'Susa']]
-        df_source.to_csv(f"{self.config['path2']}/SKR51_Uebersetzung.csv", sep=';', encoding='latin-1', index=False)
+        df_source = df_source[
+            [
+                "Konto",
+                "Acct_Nr",
+                "Konto_Bezeichnung",
+                "GuV_Bilanz",
+                "Konto_KST",
+                "Konto_STK",
+                "Konto_1",
+                "Konto_2",
+                "Konto_3",
+                "Konto_4",
+                "Konto_5",
+            ]
+            + get_default_cols(0)
+            + get_default_cols(7)
+            + get_default_cols(8)
+            + ["Konto_Gruppe", "Konto_Nr_Gesamt", "Konto_Gesamt", "Susa"]
+        ]
+        df_source.to_csv(f"{self.config['path2']}/SKR51_Uebersetzung.csv", sep=";", encoding="latin-1", index=False)
 
     def skr51_vars(self):
         self.get_structure_and_tree()
         cols = get_default_cols(0)
 
-        df_temp = pd.read_csv(f"{self.config['path']}/Export/Kostentraeger.csv", decimal=',', sep=';',
-                              encoding='latin-1', converters={i: str for i in range(0, 200)})
-        df_temp['value'] = df_temp['Ebene33']
-        df_temp['key'] = '[KTR]'
-        df_temp = df_temp[df_temp['value'].str.contains(' - ')]
-
-        df_list = [df_temp[['key', 'value']]]
-
-        for (s, entries) in self.json_result['flat'].items():
-            df = pd.DataFrame([x['id'].split(';') for x in entries], columns=cols)
-            df['key'] = df[cols[0]].apply(lambda x: '{' + s + '/' + x + '}')
-            df['value'] = df[cols[1]]
-            df_list.append(df[['key', 'value']])
+        df_temp = pd.read_csv(
+            f"{self.config['path']}/Export/Kostentraeger.csv",
+            decimal=",",
+            sep=";",
+            encoding="latin-1",
+            converters={i: str for i in range(0, 200)},
+        )
+        df_temp["value"] = df_temp["Ebene33"]
+        df_temp["key"] = "[KTR]"
+        df_temp = df_temp[df_temp["value"].str.contains(" - ")]
+
+        df_list = [df_temp[["key", "value"]]]
+
+        for s, entries in self.json_result["flat"].items():
+            df = pd.DataFrame([x["id"].split(";") for x in entries], columns=cols)
+            df["key"] = df[cols[0]].apply(lambda x: "{" + s + "/" + x + "}")
+            df["value"] = df[cols[1]]
+            df_list.append(df[["key", "value"]])
         df = pd.concat(df_list)
-        df_vars = df[df['value'] != '']
+        df_vars = df[df["value"] != ""]
         # df_vars.to_csv(f"{self.config['path2']}/SKR51_Variablen2.csv", sep=';', encoding='latin-1', index=False)
 
-        df_main = pd.DataFrame([x['id'].split(';') for x in self.json_result['flat']['SKR51']], columns=cols)
-        df_main['value'] = ''
+        df_main = pd.DataFrame([x["id"].split(";") for x in self.json_result["flat"]["SKR51"]], columns=cols)
+        df_main["value"] = ""
         for c in cols:
-            df_main = df_main.merge(df_vars, how='left', left_on=c, right_on='key', suffixes=(None, '_' + c))
-            df_main[c] = np.where(df_main['value_' + c].isna(), df_main[c], df_main['value_' + c])
+            df_main = df_main.merge(df_vars, how="left", left_on=c, right_on="key", suffixes=(None, "_" + c))
+            df_main[c] = np.where(df_main["value_" + c].isna(), df_main[c], df_main["value_" + c])
 
-        df_amount = df_main[df_main['Ebene1'] == 'Umsatzerlöse'].reset_index()
-        df_amount['Ebene1'] = 'verkaufte Stückzahlen'
+        df_amount = df_main[df_main["Ebene1"] == "Umsatzerlöse"].reset_index()
+        df_amount["Ebene1"] = "verkaufte Stückzahlen"
         df_main = pd.concat([df_main, df_amount])
 
         # from_label = cols
-        to_label = cols   # get_default_cols(9)
+        to_label = cols  # get_default_cols(9)
         # df_main = df_main.rename(columns=dict(zip(from_label, to_label)))
-        df_main[to_label].to_csv(f"{self.config['path2']}/SKR51_Struktur.csv", sep=';', encoding='latin-1', index_label='Sortierung')
+        df_main[to_label].to_csv(
+            f"{self.config['path2']}/SKR51_Struktur.csv", sep=";", encoding="latin-1", index_label="Sortierung"
+        )
 
 
 def gcstruct_uebersetzung():
     # base_dir = 'P:/SKR51_GCStruct/'
-    base_dir = Path('.').absolute()
+    base_dir = Path(".").absolute()
     import_dir = base_dir
-    if base_dir.name == 'scripts':
-        if base_dir.parent.parent.name == 'Portal':
+    if base_dir.name == "scripts":
+        if base_dir.parent.parent.name == "Portal":
             base_dir = base_dir.parent.parent.parent
-            import_dir = base_dir.joinpath('Portal/System/IQD/Belege/Kontenrahmen')
+            import_dir = base_dir.joinpath("Portal/System/IQD/Belege/Kontenrahmen")
         else:
             base_dir = base_dir.parent.parent
-            import_dir = base_dir.joinpath('System/OPTIMA/Export')
-    elif not base_dir.joinpath('GCStruct_Aufbereitung').exists():
-        base_dir = Path('//192.168.2.21/verwaltung/Kunden/Luchtenberg/1 Umstellung SKR51/')
+            import_dir = base_dir.joinpath("System/OPTIMA/Export")
+    elif not base_dir.joinpath("GCStruct_Aufbereitung").exists():
+        base_dir = Path("//192.168.2.21/verwaltung/Kunden/Luchtenberg/1 Umstellung SKR51/")
         if not base_dir.exists():
-            base_dir = Path('//media/fileserver1/verwaltung/Kunden/Luchtenberg/1 Umstellung SKR51/')
+            base_dir = Path("//media/fileserver1/verwaltung/Kunden/Luchtenberg/1 Umstellung SKR51/")
         import_dir = base_dir
 
-    struct = GCStruct(str(base_dir.joinpath('GCStruct_Aufbereitung')))
-    struct.skr51_translate(import_dir.glob('Kontenrahmen_kombiniert*.csv'))
-    print('Kontenrahmen_uebersetzt.csv erstellt.')
-    # copyfile('c:/Projekte/Python/Gcstruct/Kontenrahmen_kombiniert.csv', base_dir + 'GCStruct_Modell/Export/Kontenrahmen_kombiniert.csv')
+    struct = GCStruct(str(base_dir.joinpath("GCStruct_Aufbereitung")))
+    struct.skr51_translate(import_dir.glob("Kontenrahmen_kombiniert*.csv"))
+    print("Kontenrahmen_uebersetzt.csv erstellt.")
+    # copyfile('c:/Projekte/Python/Gcstruct/Kontenrahmen_kombiniert.csv',
+    # base_dir + 'GCStruct_Modell/Export/Kontenrahmen_kombiniert.csv')
 
-    struct2 = GCStruct(str(base_dir.joinpath('GCStruct_Modell')))
-    struct2.skr51_translate2(str(base_dir.joinpath('GCStruct_Aufbereitung/Export/Kontenrahmen_uebersetzt.csv')))
-    print('SKR51_Uebersetzung.csv erstellt.')
+    struct2 = GCStruct(str(base_dir.joinpath("GCStruct_Modell")))
+    struct2.skr51_translate2(str(base_dir.joinpath("GCStruct_Aufbereitung/Export/Kontenrahmen_uebersetzt.csv")))
+    print("SKR51_Uebersetzung.csv erstellt.")
     struct2.skr51_vars()
-    print('SKR51_Struktur.csv erstellt.')
+    print("SKR51_Struktur.csv erstellt.")
 
 
 def dresen():
-    struct = GCStruct('c:/projekte/GCHRStruct_Hyundai_Export')
+    struct = GCStruct("c:/projekte/GCHRStruct_Hyundai_Export")
     struct.get_structure_and_tree()
     struct.export()
 
 
 def reisacher():
-    struct = GCStruct('D:/GAPS_BMW/GCStruct_neue_Struktur_Planung', 'D:/Planung/Planner2022/export')
+    struct = GCStruct("D:/GAPS_BMW/GCStruct_neue_Struktur_Planung", "D:/Planung/Planner2022/export")
     struct.get_structure_and_tree()
     # json.dump(res['flat'], open(f"{self.config['path2']}/{self.config['output']}", 'w'), indent=2)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     # struct = GCStruct('c:/projekte/gcstruct_dresen')
     # struct = GCStruct('c:/projekte/python/gcstruct')
     # struct = GCStruct('c:/projekte/python/gcstruct_reisacher_planung')

+ 3 - 3
webservice/gnupg_encrypt.py

@@ -3,15 +3,15 @@ from pathlib import Path
 
 
 def encrypt(source_file):
-    config_dir = Path().cwd() / 'config/gnupg'
+    config_dir = Path().cwd() / "config/gnupg"
 
     gpg = gnupg.GPG(homedir=config_dir)
     # gpg.import_keys(base_dir + 'export.gpg')
     # public_keys = gpg.list_keys()
     # print(public_keys)
 
-    with open(source_file, 'rb') as frh:
-        result = gpg.encrypt(frh, '942170BF95EA785E8D4A5C94D1839587F0E1C01C', output=str(source_file) + '.gpg')
+    with open(source_file, "rb") as frh:
+        result = gpg.encrypt(frh, "942170BF95EA785E8D4A5C94D1839587F0E1C01C", output=str(source_file) + ".gpg")
         # 'BV_IFC; BMW Group <BV_IFC@softlab.de>'
         print(result.stderr)
         #  , verbose=True

+ 45 - 39
webservice/hbv_export.py

@@ -6,77 +6,83 @@ import os
 from pathlib import Path
 
 
-current_year = '2023'
-current_version = 'V1'
+current_year = "2023"
+current_version = "V1"
 
 base_dir = Path().cwd()
-config_dir = base_dir / 'config/hbv'
-export_dir = base_dir / 'export'
-hb_format = config_dir / 'hb_format.csv'
-hb_department = config_dir / 'hb_department.csv'
-hb_translation = config_dir / 'hb_translation.csv'
-plan_amount = export_dir / f'Planner_{current_year}_{current_version}_Stk.csv'
-plan_values = export_dir / f'Planner_{current_year}_{current_version}_Plan.csv'
+config_dir = base_dir / "config/hbv"
+export_dir = base_dir / "export"
+hb_format = config_dir / "hb_format.csv"
+hb_department = config_dir / "hb_department.csv"
+hb_translation = config_dir / "hb_translation.csv"
+plan_amount = export_dir / f"Planner_{current_year}_{current_version}_Stk.csv"
+plan_values = export_dir / f"Planner_{current_year}_{current_version}_Plan.csv"
 
-hb_ignored = export_dir / 'hbv/ignoriert.csv'
+hb_ignored = export_dir / "hbv/ignoriert.csv"
 
-current_date = datetime.now().strftime('%d%m%Y%H%M%S')
+current_date = datetime.now().strftime("%d%m%Y%H%M%S")
 # current_date = '24032021112656'
 
 
 def main():
     # Übersetzungstabelle importieren
-    df_translation = pd.read_csv(hb_translation, decimal=',', sep=';',
-                                 encoding='latin-1', converters={i: str for i in range(0, 200)})
+    df_translation = pd.read_csv(
+        hb_translation, decimal=",", sep=";", encoding="latin-1", converters={i: str for i in range(0, 200)}
+    )
     # df_translation['column_no_join'] = np.where(df_translation['column_no']
     # .isin(['1', '3', '4']), df_translation['column_no'], '0')
     # Department-Zuordnung importieren
-    df_department = pd.read_csv(hb_department, decimal=',', sep=';',
-                                encoding='latin-1', converters={i: str for i in range(0, 200)})
+    df_department = pd.read_csv(
+        hb_department, decimal=",", sep=";", encoding="latin-1", converters={i: str for i in range(0, 200)}
+    )
 
     # Planwerte importieren
     values_converter = {i: str for i in range(0, 200)}
-    values_converter[4] = lambda x: np.float64(x.replace(',', '.') if x != '' else 0.0)
+    values_converter[4] = lambda x: np.float64(x.replace(",", ".") if x != "" else 0.0)
     values_converter[5] = values_converter[4]
-    df_values = pd.read_csv(plan_values, decimal=',', sep=';',
-                            encoding='latin-1', converters=values_converter)
-    df_values['Gesamt'] = df_values['Gesamt'] + df_values['Periode13']
-    df_values['type'] = '2'
-    df_values['type'] = np.where(df_values['Vstufe 1'].isin(['Materialaufwand']), '3', df_values['type'])
-    df_amount = pd.read_csv(plan_amount, decimal=',', sep=';', 
-                            encoding='latin-1', converters=values_converter)
-    df_amount['type'] = '1'
+    df_values = pd.read_csv(plan_values, decimal=",", sep=";", encoding="latin-1", converters=values_converter)
+    df_values["Gesamt"] = df_values["Gesamt"] + df_values["Periode13"]
+    df_values["type"] = "2"
+    df_values["type"] = np.where(df_values["Vstufe 1"].isin(["Materialaufwand"]), "3", df_values["type"])
+    df_amount = pd.read_csv(plan_amount, decimal=",", sep=";", encoding="latin-1", converters=values_converter)
+    df_amount["type"] = "1"
     df: pd.DataFrame = df_values.append(df_amount)
 
     # Planwerte alle positiv
-    df['Minus1'] = np.where(df['Vstufe 1'].isin(['Umsatzerlöse', 'Verk. Stückzahlen']) | df['Zeile'].isin(['7410', '7440']), 1, -1)
-    df['Gesamt'] = df['Gesamt'] * df['Minus1']
+    df["Minus1"] = np.where(
+        df["Vstufe 1"].isin(["Umsatzerlöse", "Verk. Stückzahlen"]) | df["Zeile"].isin(["7410", "7440"]), 1, -1
+    )
+    df["Gesamt"] = df["Gesamt"] * df["Minus1"]
 
     # Planwerte übersetzen
-    df = df.merge(df_department, how='inner', left_on='Betrieb Nr', right_on='department_id')
-    df = df.merge(df_translation, how='left', left_on=['Zeile', 'type'], right_on=['from', 'type'])
+    df = df.merge(df_department, how="inner", left_on="Betrieb Nr", right_on="department_id")
+    df = df.merge(df_translation, how="left", left_on=["Zeile", "type"], right_on=["from", "type"])
     # fehlende Übersetzung
-    df_ignored = df[(df['to'].isna()) & (df['Gesamt'] != 0)]
-    df_ignored.to_csv(hb_ignored, decimal=',', sep=';', encoding='latin-1', index=False)
+    df_ignored = df[(df["to"].isna()) & (df["Gesamt"] != 0)]
+    df_ignored.to_csv(hb_ignored, decimal=",", sep=";", encoding="latin-1", index=False)
 
     # Planwerte formatieren und exportieren
-    rename_from = ['bm_code', 'BV_NUMMER', 'FILIAL_NR', 'to', 'column_no', 'Jahr', 'Gesamt']
-    rename_to = ['BM_CODE', 'BV_NUMMER', 'FILIAL_NR', 'ZEILE', 'SPALTE', 'JAHR', 'WERT']
-    df_valid = df[df['to'].notna()].rename(columns=dict(zip(rename_from, rename_to)))
-    df_valid['SPALTE'] = df_valid['SPALTE'].str.zfill(3)
-    group_by = ['BM_CODE', 'BV_NUMMER', 'FILIAL_NR']
+    rename_from = ["bm_code", "BV_NUMMER", "FILIAL_NR", "to", "column_no", "Jahr", "Gesamt"]
+    rename_to = ["BM_CODE", "BV_NUMMER", "FILIAL_NR", "ZEILE", "SPALTE", "JAHR", "WERT"]
+    df_valid = df[df["to"].notna()].rename(columns=dict(zip(rename_from, rename_to)))
+    df_valid["SPALTE"] = df_valid["SPALTE"].str.zfill(3)
+    group_by = ["BM_CODE", "BV_NUMMER", "FILIAL_NR"]
     df_valid = df_valid[rename_to].groupby(group_by)
 
     for group in df_valid.groups:
         g = dict(zip(group_by, group))
-        filename = export_dir / f"hbv/{current_year}/{g['BV_NUMMER']}_{g['FILIAL_NR']}/HB{g['BM_CODE']}{current_year}00{g['BV_NUMMER']}{g['FILIAL_NR']}0{current_date}.dat"
+        filename = (
+            export_dir
+            / f"hbv/{current_year}/{g['BV_NUMMER']}_{g['FILIAL_NR']}"
+            / f"HB{g['BM_CODE']}{current_year}00{g['BV_NUMMER']}{g['FILIAL_NR']}0{current_date}.dat"
+        )
         os.makedirs(filename.parent, exist_ok=True)
         df_group = df_valid.get_group(group).groupby(rename_to[:-1]).sum().reset_index()
-        with open(filename, 'w') as fwh:
-            for row in df_group.to_dict(orient='records'):
+        with open(filename, "w") as fwh:
+            for row in df_group.to_dict(orient="records"):
                 fwh.write("I0155{BV_NUMMER}{FILIAL_NR}0{ZEILE}{SPALTE}00{JAHR}{WERT:16.2f}03\n".format(**row))
         encrypt(filename)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

+ 45 - 37
webservice/marketing_plan.py

@@ -5,50 +5,58 @@ import json
 
 
 base_dir = Path(__file__).parent.parent
-filename = base_dir / 'data' / 'Marketingplanung_AHR_2023_V1.xls'
-output = base_dir / 'export' / 'marketing_2023.json'
-output_csv = base_dir / 'export' / 'marketing_2023.csv'
-debug_csv = base_dir / 'export' / 'marketing_2023_rest.csv'
-
-department = {
-    ' MM': '10', 
-    ' ULM': '40', 
-    ' LL': '50', 
-    'KRU': '30', 
-    'GZ': '55', 
-    'AAM': '82', 
-    'AAM-MOT': '81'
-}
+filename = base_dir / "data" / "Marketingplanung_AHR_2023_V1.xls"
+output = base_dir / "export" / "marketing_2023.json"
+output_csv = base_dir / "export" / "marketing_2023.csv"
+debug_csv = base_dir / "export" / "marketing_2023_rest.csv"
+
+department = {" MM": "10", " ULM": "40", " LL": "50", "KRU": "30", "GZ": "55", "AAM": "82", "AAM-MOT": "81"}
 
 columns = [
-    'Bezeichnung', 'Segment', 'Termin', 
-    'Wert1', 'Wert2', 'Wert3', 'Wert4', 'Wert5', 'Wert6',
-    'Wert7', 'Wert8', 'Wert9', 'Wert10', 'Wert11', 'Wert12', 'Summe'
+    "Bezeichnung",
+    "Segment",
+    "Termin",
+    "Wert1",
+    "Wert2",
+    "Wert3",
+    "Wert4",
+    "Wert5",
+    "Wert6",
+    "Wert7",
+    "Wert8",
+    "Wert9",
+    "Wert10",
+    "Wert11",
+    "Wert12",
+    "Summe",
 ]
 
-defaults = ['', '', '', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+defaults = ["", "", "", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
 types = [str] * 3  # + [float] * 13
 
 xls = pd.read_excel(
-    filename, 
+    filename,
     sheet_name=list(department.keys()),
     skiprows=4,
-    usecols='A:P',
+    usecols="A:P",
     names=columns,
     converters=dict(zip(columns, types)),
 )
 xls_temp = []
 
 for sheet, dept_no in department.items():
-    xls[sheet]['Betrieb Nr'] = dept_no
+    xls[sheet]["Betrieb Nr"] = dept_no
     xls[sheet] = xls[sheet].fillna(value=dict(zip(columns, defaults)))
-    xls[sheet]['Konto Nr'] = np.where(xls[sheet]['Segment'].str.startswith('Kto '), xls[sheet]['Segment'].str.slice(4, 9), 'Rest')
-    # xls[sheet]['Konto Nr'] = np.where(xls[sheet]['Segment'].str.startswith('50% '), xls[sheet]['Segment'].str.slice(-5), xls[sheet]['Konto Nr'])
-    temp = xls[sheet][xls[sheet]['Segment'].str.startswith('50% ')].copy()
-    temp['Konto Nr'] = temp['Segment'].str.slice(8, 13)
+    xls[sheet]["Konto Nr"] = np.where(
+        xls[sheet]["Segment"].str.startswith("Kto "), xls[sheet]["Segment"].str.slice(4, 9), "Rest"
+    )
+    # xls[sheet]['Konto Nr'] = np.where(xls[sheet]['Segment'].str.startswith('50% '),
+    # xls[sheet]['Segment'].str.slice(-5), xls[sheet]['Konto Nr'])
+    temp = xls[sheet][xls[sheet]["Segment"].str.startswith("50% ")].copy()
+    temp["Konto Nr"] = temp["Segment"].str.slice(8, 13)
     xls_temp.append(temp)
     temp2 = temp.copy()
-    temp2['Konto Nr'] = temp2['Segment'].str.slice(-5)
+    temp2["Konto Nr"] = temp2["Segment"].str.slice(-5)
     xls_temp.append(temp2)
 
 xls_temp_concat = pd.concat(xls_temp)
@@ -57,29 +65,29 @@ for c in columns[3:]:
 
 df = pd.concat(list(xls.values()) + [xls_temp_concat])
 
-df_rest = df[(df['Konto Nr'] == 'Rest') & (df['Summe'] != 0)]
-df_rest.to_csv(debug_csv, sep=';', decimal=',')
-df = df[df['Konto Nr'] != 'Rest']
+df_rest = df[(df["Konto Nr"] == "Rest") & (df["Summe"] != 0)]
+df_rest.to_csv(debug_csv, sep=";", decimal=",")
+df = df[df["Konto Nr"] != "Rest"]
 
-df['Wert11'] = df['Wert11'].astype(float)
-df['Wert12'] = df['Wert12'].astype(float)
+df["Wert11"] = df["Wert11"].astype(float)
+df["Wert12"] = df["Wert12"].astype(float)
 
 for c in columns[3:]:
     df[c] = df[c].round()
-df = df.drop(labels=['Summe'], axis=1)
+df = df.drop(labels=["Summe"], axis=1)
 
-df = df.groupby(['Konto Nr', 'Betrieb Nr']).sum()
+df = df.groupby(["Konto Nr", "Betrieb Nr"]).sum()
 print(df.info())
 # df.reset_index(inplace=True)
 
-df.to_csv(output_csv, sep=';', decimal=',')
-dict_split = df.to_dict(orient='split')
+df.to_csv(output_csv, sep=";", decimal=",")
+dict_split = df.to_dict(orient="split")
 
 res = {}
-for index, data in zip(dict_split['index'], dict_split['data']):
+for index, data in zip(dict_split["index"], dict_split["data"]):
     kto, dept = index
     if kto not in res:
         res[kto] = {}
     res[kto][dept] = data
 
-json.dump(res, open(output, 'w'), indent=2)
+json.dump(res, open(output, "w"), indent=2)

+ 112 - 60
webservice/plan_export.py

@@ -1,21 +1,57 @@
 import pandas as pd
 import numpy as np
 
-base_dir = '/home/robert/projekte/planner/export'
+base_dir = "/home/robert/projekte/planner/export"
 
-id_header = ['Ebene' + str(i) for i in range(1, 11)]
+id_header = ["Ebene" + str(i) for i in range(1, 11)]
 # values2_header = ['VJ', 'AJ', 'FC', 'Plan_ori', 'Plan_Prozent', 'Stk', 'VAK', 'BE_Prozent', 'Plan_VJ', 'Plan_Stk_VJ', 'Plan',
 #                   'Jan', 'Feb', 'Mar', 'Apr', 'Mai', 'Jun', 'Jul', 'Aug', 'Sep', 'Okt', 'Nov', 'Dez', 'Periode13']
-values2_header = ['Plan', 'Jan', 'Feb', 'Mar', 'Apr', 'Mai', 'Jun', 'Jul', 'Aug', 'Sep', 'Okt', 'Nov', 'Dez', 'Periode13',
-                  'Plan_ori', 'Plan_Prozent', 'Stk', 'VAK', 'BE_Prozent', 'frei', 
-                  'VJ', 'VJ_Stk', 'AJ', 'AJ_Stk', 'AJ_Okt', 'AJ_Okt_Stk', 'FC', 'FC_Stk', 'Plan_VJ', 'Plan_VJ_Stk']
-season_header = ['Jan', 'Feb', 'Mar', 'Apr', 'Mai', 'Jun', 'Jul', 'Aug', 'Sep', 'Okt', 'Nov', 'Dez']
-info_header = ['text', 'costcenter', 'department']
+values2_header = [
+    "Plan",
+    "Jan",
+    "Feb",
+    "Mar",
+    "Apr",
+    "Mai",
+    "Jun",
+    "Jul",
+    "Aug",
+    "Sep",
+    "Okt",
+    "Nov",
+    "Dez",
+    "Periode13",
+    "Plan_ori",
+    "Plan_Prozent",
+    "Stk",
+    "VAK",
+    "BE_Prozent",
+    "frei",
+    "VJ",
+    "VJ_Stk",
+    "AJ",
+    "AJ_Stk",
+    "AJ_Okt",
+    "AJ_Okt_Stk",
+    "FC",
+    "FC_Stk",
+    "Plan_VJ",
+    "Plan_VJ_Stk",
+]
+season_header = ["Jan", "Feb", "Mar", "Apr", "Mai", "Jun", "Jul", "Aug", "Sep", "Okt", "Nov", "Dez"]
+info_header = ["text", "costcenter", "department"]
 header = info_header + id_header + values2_header
 season_export_header = info_header + season_header
 
-source_header = ['department', 'text', 'costcenter', 'Ebene1', 'Plan', 'Periode13']
-export_header = ['Betrieb Nr', 'Zeile mit Bez', 'Bereich', 'Vstufe 1', 'Gesamt', 'Periode13']    # 'Version', 'Konto', 'Jahr']
+source_header = ["department", "text", "costcenter", "Ebene1", "Plan", "Periode13"]
+export_header = [
+    "Betrieb Nr",
+    "Zeile mit Bez",
+    "Bereich",
+    "Vstufe 1",
+    "Gesamt",
+    "Periode13",
+]  # 'Version', 'Konto', 'Jahr']
 
 
 def expand(df, header, values_label):
@@ -25,73 +61,89 @@ def expand(df, header, values_label):
 
 
 def apply_season(df):
-    df['Saison'] = df['Ebene1'].str.contains('Umsatzerlöse|Materialaufwand|Verkaufsabh. Kosten')
-    df['saison_sum'] = df[season_header].sum(axis=1)
+    df["Saison"] = df["Ebene1"].str.contains("Umsatzerlöse|Materialaufwand|Verkaufsabh. Kosten")
+    df["saison_sum"] = df[season_header].sum(axis=1)
     for i, key in enumerate(season_header):
-        df['temp'] = np.where((df['Saison']) & (df[key + '_2'] != 8.3333), df['Plan'] * df[key + '_2'] / 100, df['Plan'] / 12)
-        df[key] = np.where(df['saison_sum'] == 0, df['temp'], df[key] * df['Minus1'])
-    df['Dez'] = df['Plan'] - df[season_header].sum(axis=1) + df['Dez']
+        df["temp"] = np.where(
+            (df["Saison"]) & (df[key + "_2"] != 8.3333), df["Plan"] * df[key + "_2"] / 100, df["Plan"] / 12
+        )
+        df[key] = np.where(df["saison_sum"] == 0, df["temp"], df[key] * df["Minus1"])
+    df["Dez"] = df["Plan"] - df[season_header].sum(axis=1) + df["Dez"]
     return df
 
 
 def data_cleansing(filename):
     df = pd.read_json(filename)
-    df['values2'] = df['values2'].apply(lambda v: list(v.items()))
-    df = df.explode('values2')
-    df['department'], df['values2'] = zip(*df['values2'])
+    df["values2"] = df["values2"].apply(lambda v: list(v.items()))
+    df = df.explode("values2")
+    df["department"], df["values2"] = zip(*df["values2"])
 
-    df['id'] = df['id'].str.split(';')
-    df = expand(df, id_header, 'id')
-    df = expand(df, values2_header, 'values2')
+    df["id"] = df["id"].str.split(";")
+    df = expand(df, id_header, "id")
+    df = expand(df, values2_header, "values2")
     return df
 
 
 def export_plan(filename, version, target_year, amount_value):
-    df = data_cleansing(f'{base_dir}/{filename}.json')
-    season = df[(df['level'] == 2) & (df['Ebene1'] == 'Umsatzerlöse')].copy()
-    season['Dez'] = (100 - season[season_header].sum(axis=1) + season['Dez']).round(4)
-    season[season_export_header].to_csv(f'{base_dir}/Planner_{target_year}_{version}_Saison.csv',
-                                        encoding='latin_1', sep=';', decimal=',', index=False)
-
-    df['Minus1'] = np.where(df['Ebene1'] != 'Umsatzerlöse', -1, 1)
-    df['Plan'] = df[amount_value] * df['Minus1']
-    if amount_value == 'Plan':
-        df['Periode13'] = df['Periode13'] * df['Minus1']
+    df = data_cleansing(f"{base_dir}/{filename}.json")
+    season = df[(df["level"] == 2) & (df["Ebene1"] == "Umsatzerlöse")].copy()
+    season["Dez"] = (100 - season[season_header].sum(axis=1) + season["Dez"]).round(4)
+    season[season_export_header].to_csv(
+        f"{base_dir}/Planner_{target_year}_{version}_Saison.csv", encoding="latin_1", sep=";", decimal=",", index=False
+    )
+
+    df["Minus1"] = np.where(df["Ebene1"] != "Umsatzerlöse", -1, 1)
+    df["Plan"] = df[amount_value] * df["Minus1"]
+    if amount_value == "Plan":
+        df["Periode13"] = df["Periode13"] * df["Minus1"]
     else:
-        df['Periode13'] = 0
-    plan = df[df['planlevel'] == True]
+        df["Periode13"] = 0
+    plan = df[df["planlevel"] == True]
 
-    plan = pd.merge(plan, season, how='left', on=['Ebene2', 'department'], suffixes=('', '_2'))
+    plan = pd.merge(plan, season, how="left", on=["Ebene2", "department"], suffixes=("", "_2"))
     plan = apply_season(plan)
     plan = plan[source_header + season_header].rename(columns=dict(zip(source_header, export_header)))
 
     # Reisacher Spezialbedingungen
-    plan['Zeile'] = plan['Zeile mit Bez'].str.slice(stop=4)
-    plan['Zeile'] = np.where(plan['Zeile mit Bez'].isin(['BMW aus Leasingrücklauf BFS', 'BMW aus Leasingrücklauf Alphabet']),
-                             '3040', plan['Zeile'])
-    plan['Zeile'] = np.where(plan['Zeile mit Bez'].isin(['BMW an Wiederverkäufer BFS', 'BMW an Wiederverkäufer Alphabet']),
-                             '3120', plan['Zeile'])
-
-    desciption = pd.read_csv(f'{base_dir}/../data/Planner_Zeilen_Bez.csv', sep=';', encoding='latin-1', dtype={0: str, 1: str})
-    plan = pd.merge(plan, desciption, how='left', on=['Zeile'], suffixes=['', '_3'])
-    plan['Zeile mit Bez'] = plan['Zeile mit Bez_3']
-    plan.drop(['Zeile mit Bez_3'], axis=1, inplace=True)
-
-    if amount_value == 'Stk':
-        plan = plan[plan['Vstufe 1'] == 'Umsatzerlöse']
-        plan['Vstufe 1'] = 'Verk. Stückzahlen'
-
-    plan['Version'] = version
-    plan['Konto'] = ''
-    plan['Jahr'] = target_year
-
-    plan.to_csv(f'{base_dir}/Planner_{target_year}_{version}_{amount_value}.csv', encoding='latin_1',
-                sep=';', decimal=',', index=False)
-
-
-if __name__ == '__main__':
+    plan["Zeile"] = plan["Zeile mit Bez"].str.slice(stop=4)
+    plan["Zeile"] = np.where(
+        plan["Zeile mit Bez"].isin(["BMW aus Leasingrücklauf BFS", "BMW aus Leasingrücklauf Alphabet"]),
+        "3040",
+        plan["Zeile"],
+    )
+    plan["Zeile"] = np.where(
+        plan["Zeile mit Bez"].isin(["BMW an Wiederverkäufer BFS", "BMW an Wiederverkäufer Alphabet"]),
+        "3120",
+        plan["Zeile"],
+    )
+
+    desciption = pd.read_csv(
+        f"{base_dir}/../data/Planner_Zeilen_Bez.csv", sep=";", encoding="latin-1", dtype={0: str, 1: str}
+    )
+    plan = pd.merge(plan, desciption, how="left", on=["Zeile"], suffixes=["", "_3"])
+    plan["Zeile mit Bez"] = plan["Zeile mit Bez_3"]
+    plan.drop(["Zeile mit Bez_3"], axis=1, inplace=True)
+
+    if amount_value == "Stk":
+        plan = plan[plan["Vstufe 1"] == "Umsatzerlöse"]
+        plan["Vstufe 1"] = "Verk. Stückzahlen"
+
+    plan["Version"] = version
+    plan["Konto"] = ""
+    plan["Jahr"] = target_year
+
+    plan.to_csv(
+        f"{base_dir}/Planner_{target_year}_{version}_{amount_value}.csv",
+        encoding="latin_1",
+        sep=";",
+        decimal=",",
+        index=False,
+    )
+
+
+if __name__ == "__main__":
     # export_plan('V3', '2021', 'Plan')
     # export_plan('V3', '2021', 'Stk')
-    filename = '../save/2023_V1_20230214225753'
-    export_plan(filename, 'V1', '2023', 'Plan')
-    export_plan(filename, 'V1', '2023', 'Stk')
+    filename = "../save/2023_V1_20230214225753"
+    export_plan(filename, "V1", "2023", "Plan")
+    export_plan(filename, "V1", "2023", "Stk")

+ 139 - 111
webservice/planner_load.py

@@ -11,10 +11,39 @@ class PlannerLoad:
     new_structure: list
     structure: list
     config = {
-        'department': [1, 2, 3, 10, 30, 40, 50, 55, 81, 82],
-        'translation': [10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
-                        20, 21, 22, 23, 3, 4, 5, 6, 7, 4, 0, 4,
-                        4, 4, 1, 4, 2, 4, 8, 9]
+        "department": [1, 2, 3, 10, 30, 40, 50, 55, 81, 82],
+        "translation": [
+            10,
+            11,
+            12,
+            13,
+            14,
+            15,
+            16,
+            17,
+            18,
+            19,
+            20,
+            21,
+            22,
+            23,
+            3,
+            4,
+            5,
+            6,
+            7,
+            4,
+            0,
+            4,
+            4,
+            4,
+            1,
+            4,
+            2,
+            4,
+            8,
+            9,
+        ],
     }
 
     def __init__(self, base_dir: str):
@@ -25,16 +54,16 @@ class PlannerLoad:
         return self.convert_file(self.new_structure)
 
     def load_values(self, year: str):
-        with open(self.base_dir.joinpath(f'accounts_{year}.json'), 'r') as frh:
-            self.account_values = json.load(frh)['values']
-        with open(self.base_dir.joinpath(f'planning_{year}.json'), 'r') as frh:
-            self.plan_values = json.load(frh)['values']
-        with open(self.base_dir.joinpath(f'marketing_{year}.json'), 'r') as frh:
+        with open(self.base_dir.joinpath(f"accounts_{year}.json"), "r") as frh:
+            self.account_values = json.load(frh)["values"]
+        with open(self.base_dir.joinpath(f"planning_{year}.json"), "r") as frh:
+            self.plan_values = json.load(frh)["values"]
+        with open(self.base_dir.joinpath(f"marketing_{year}.json"), "r") as frh:
             self.marketing_values = json.load(frh)
-        with open(self.base_dir.joinpath('gcstruct.json'), 'r') as frh:
+        with open(self.base_dir.joinpath("gcstruct.json"), "r") as frh:
             gcstruct = json.load(frh)
-        self.account_info = dict([(a['Konto_Nr'], a) for a in gcstruct['accounts']])
-        self.new_structure = gcstruct['flat']['Struktur_FB']
+        self.account_info = dict([(a["Konto_Nr"], a) for a in gcstruct["accounts"]])
+        self.new_structure = gcstruct["flat"]["Struktur_FB"]
 
     def set_structure(self, structure: list):
         self.structure = structure
@@ -42,149 +71,150 @@ class PlannerLoad:
     def convert_file(self, structure_source):
         self.structure = []
         for s in structure_source:
-            accts = s['accounts']
-            s['accounts'] = []
-            s['planlevel'] = (len(accts) > 0)
+            accts = s["accounts"]
+            s["accounts"] = []
+            s["planlevel"] = len(accts) > 0
             self.structure.append(s)
 
             for a in accts:
-                if type(a) == str:
+                if type(a) is str:
                     acct = {
-                        'text': a + ' - ' + self.account_info[a]['Konto_Bezeichnung'],
-                        'id': a,
-                        'costcenter': self.account_info[a]['Konto_KST'],
-                        'values': [],
-                        'values2': None
+                        "text": a + " - " + self.account_info[a]["Konto_Bezeichnung"],
+                        "id": a,
+                        "costcenter": self.account_info[a]["Konto_KST"],
+                        "values": [],
+                        "values2": None,
                     }
                 else:
                     acct = a
-                    a = acct['id']
-                acct['id'] = s['id'].replace(';;', ';' + acct['text'] + ';', 1)
-                s['children'].append(acct['id'])
+                    a = acct["id"]
+                acct["id"] = s["id"].replace(";;", ";" + acct["text"] + ";", 1)
+                s["children"].append(acct["id"])
 
                 new_account = {
-                    "id": acct['id'],
-                    "text": acct['text'],
+                    "id": acct["id"],
+                    "text": acct["text"],
                     "children": [],
                     "children2": [],
-                    "parents": [s['id']] + s['parents'],
+                    "parents": [s["id"]] + s["parents"],
                     "accounts": [a],
-                    "costcenter": acct['costcenter'],
-                    "level": s['level'] + 1,
+                    "costcenter": acct["costcenter"],
+                    "level": s["level"] + 1,
                     "drilldown": False,
-                    "form": s['form'],
+                    "form": s["form"],
                     "accountlevel": False,
                     "absolute": True,
                     "seasonal": True,
                     "status": "0",
-                    "values": acct['values'],
-                    "values2": acct['values2']
+                    "values": acct["values"],
+                    "values2": acct["values2"],
                 }
                 self.structure.append(new_account)
 
         for s in self.structure:
-            s['values2'] = self.get_values2(s)
-            s['options'] = self.get_options(s)
-            del s['accountlevel']
-            del s['absolute']
-            del s['seasonal']
-            del s['status']
-            s['sumvalues'] = [0] * 30
+            s["values2"] = self.get_values2(s)
+            s["options"] = self.get_options(s)
+            del s["accountlevel"]
+            del s["absolute"]
+            del s["seasonal"]
+            del s["status"]
+            s["sumvalues"] = [0] * 30
 
         return self.structure
 
     def get_values2(self, s):
-        if 'values2' not in s or s['values2'] is None or len(s['values2'].keys()) == 0:
-            s['values2'] = dict([(str(d), [0] * 30) for d in self.config['department']])
+        if "values2" not in s or s["values2"] is None or len(s["values2"].keys()) == 0:
+            s["values2"] = dict([(str(d), [0] * 30) for d in self.config["department"]])
         else:
-            for d in s['values2'].keys():
-                if len(s['values2'][d]) < 30:
-                    s['values2'][d] = [s['values2'][d][i] if 0 <= i < len(s['values2'][d]) else 0
-                                       for i in self.config['translation']]            
-        
+            for d in s["values2"].keys():
+                if len(s["values2"][d]) < 30:
+                    s["values2"][d] = [
+                        s["values2"][d][i] if 0 <= i < len(s["values2"][d]) else 0 for i in self.config["translation"]
+                    ]
+
         self.update_account_values(s)
         self.update_plan_values(s)
         self.update_marketing_values(s)
-    
+
     def update_values(self, value_type):
         v_types = {
-            'accounts': self.update_account_values,
-            'plan': self.update_plan_values,
-            'marketing': self.update_marketing_values
+            "accounts": self.update_account_values,
+            "plan": self.update_plan_values,
+            "marketing": self.update_marketing_values,
         }
 
         parents = []
         for s in self.structure:
             if v_types[value_type](s):
-                parents.append(s['parents'][0])
-        if value_type == 'marketing':
+                parents.append(s["parents"][0])
+        if value_type == "marketing":
             for p_id in set(parents):
                 parent = self.get_structure_by_id(p_id)
-                parent['form'] = '9'
-                for d in parent['options'].keys():
-                    parent['options'][d]['status'] = '0'
-                for d in parent['values2'].keys():
+                parent["form"] = "9"
+                for d in parent["options"].keys():
+                    parent["options"][d]["status"] = "0"
+                for d in parent["values2"].keys():
                     for i in range(15):
-                        parent['values2'][d][i] = 0.0
+                        parent["values2"][d][i] = 0.0
         return self.structure
 
     def get_structure_by_id(self, id):
         for s in self.structure:
-            if s['id'] == id:
+            if s["id"] == id:
                 return s
         return None
 
     def update_account_values(self, s):
-        if len(s['accounts']) == 0:
+        if len(s["accounts"]) == 0:
             return False
-        a_values = self.account_values.get(s['accounts'][0], dict())
+        a_values = self.account_values.get(s["accounts"][0], dict())
 
-        for d in s['values2'].keys():
+        for d in s["values2"].keys():
             if d in a_values:
                 for i, v in enumerate(a_values[d], 20):
-                    s['values2'][d][i] = v
-        return s['values2']
+                    s["values2"][d][i] = v
+        return s["values2"]
 
     def update_plan_values(self, s):
         if self.plan_values is None:
             return False
-        p_values = self.plan_values.get(s['id'], dict())
+        p_values = self.plan_values.get(s["id"], dict())
 
-        for d in s['values2'].keys():
+        for d in s["values2"].keys():
             if d in p_values:
                 for i, v in enumerate(p_values[d], 28):
-                    s['values2'][d][i] = v
+                    s["values2"][d][i] = v
         return True
 
     def update_marketing_values(self, s):
-        if len(s['accounts']) == 0:
+        if len(s["accounts"]) == 0:
             return False
-        m_values = self.marketing_values.get(s['accounts'][0], dict())
+        m_values = self.marketing_values.get(s["accounts"][0], dict())
         if not m_values:
             return False
 
-        for d in s['values2'].keys():
+        for d in s["values2"].keys():
             if d in m_values:
                 for i, v in enumerate(m_values[d], 1):
-                    s['values2'][d][i] = v
-                s['values2'][d][0] = sum(m_values[d])
-                s['values2'][d][14] = sum(m_values[d])
-                if 'options' in s:
-                    s['options'][d]['seasonal'] = False
+                    s["values2"][d][i] = v
+                s["values2"][d][0] = sum(m_values[d])
+                s["values2"][d][14] = sum(m_values[d])
+                if "options" in s:
+                    s["options"][d]["seasonal"] = False
         return True
 
     def convert_values2(self, s):
         a_values = {}
-        if s['accounts']:
-            a_values = self.account_values.get(s['accounts'][0], dict())
-        p_values = self.plan_values.get(s['id'], dict())
-        values = dict([(str(d), [0] * 30) for d in self.config['department']])
+        if s["accounts"]:
+            a_values = self.account_values.get(s["accounts"][0], dict())
+        p_values = self.plan_values.get(s["id"], dict())
+        values = dict([(str(d), [0] * 30) for d in self.config["department"]])
         for d in values.keys():
-            if d in s['values2']:
-                if len(s['values2'][d]) == 30:
-                    values[d] = s['values2'][d]
+            if d in s["values2"]:
+                if len(s["values2"][d]) == 30:
+                    values[d] = s["values2"][d]
                 else:
-                    values[d] = [s['values2'][d].get(i, 0) for i in self.config['translation']]
+                    values[d] = [s["values2"][d].get(i, 0) for i in self.config["translation"]]
             if d in p_values:
                 for i, v in enumerate(p_values[d], 28):
                     values[d][i] = v
@@ -198,38 +228,36 @@ class PlannerLoad:
         seasonal = True
         absolute = True
         planlevel = False
-        if isinstance(s['absolute'], bool):
-            absolute = s['absolute']
-            s['absolute'] = {}
-        if isinstance(s['seasonal'], bool):
-            seasonal = s['seasonal']
-            s['seasonal'] = {}
-        if isinstance(s['status'], str):
-            status = s['status']
-            s['status'] = {}
-        if 'planlevel' in s:
-            planlevel = s['planlevel']
-
-        opts = dict([(str(d), {
-            'absolute': absolute,
-            'seasonal': seasonal,
-            'status': status,
-            'planlevel': planlevel
-        }) for d in self.config['department']])
+        if isinstance(s["absolute"], bool):
+            absolute = s["absolute"]
+            s["absolute"] = {}
+        if isinstance(s["seasonal"], bool):
+            seasonal = s["seasonal"]
+            s["seasonal"] = {}
+        if isinstance(s["status"], str):
+            status = s["status"]
+            s["status"] = {}
+        if "planlevel" in s:
+            planlevel = s["planlevel"]
+
+        opts = dict(
+            [
+                (str(d), {"absolute": absolute, "seasonal": seasonal, "status": status, "planlevel": planlevel})
+                for d in self.config["department"]
+            ]
+        )
 
         for d in opts.keys():
-            if d in s['absolute']:
-                opts[d]['absolute'] = s['absolute'][d]
-            if d in s['seasonal']:
-                opts[d]['seasonal'] = s['seasonal'][d]
-            if d in s['status']:
-                opts[d]['status'] = s['status'][d]
+            if d in s["absolute"]:
+                opts[d]["absolute"] = s["absolute"][d]
+            if d in s["seasonal"]:
+                opts[d]["seasonal"] = s["seasonal"][d]
+            if d in s["status"]:
+                opts[d]["status"] = s["status"][d]
         return opts
 
 
-if __name__ == '__main__':
-    planner_dir = Path(__file__).parent.parent.joinpath('export')
+if __name__ == "__main__":
+    planner_dir = Path(__file__).parent.parent.joinpath("export")
     p_load = PlannerLoad(planner_dir)
-    print(p_load.convert_file(
-        json.load(open(planner_dir.joinpath('2022_V1.json'), 'r'))
-    )[12])
+    print(p_load.convert_file(json.load(open(planner_dir.joinpath("2022_V1.json"), "r")))[12])