12 Commits d1bd38e6f0 ... 742f2c976d

Author SHA1 Message Date
  gc-server3 742f2c976d Fehler im Januar mit Monat-1 2 months ago
  gc-server3 f5762af559 Importe angepasst 2 months ago
  gc-server3 c0dce5636d GCHR verschoben 2 months ago
  gc-server3 1c70074f07 GCHR Bookings ausgelagert 2 months ago
  gc-server3 1233b7f602 GCStruct-Übersetzung mit Bilanz Aktiva/Passiva 2 months ago
  gc-server3 60cc306dc5 Translation und Convert ausgelagert 3 months ago
  gc-server3 8fa843d748 GCHR Translation auch statisch 3 months ago
  gc-server3 51357335b6 GCHR mit Tests 3 months ago
  gc-server3 a8d423263d pyinstaller-Aufrufe verschoben 3 months ago
  gc-server3 d27a8af1d1 Tests angepasst 3 months ago
  gc-server3 c6e846f4dc GCHR Config weiter ausgelagert, bereit für Seperation of Concerns 3 months ago
  gc-server3 8c5fd9507f GCHR typing 3 months ago

+ 1 - 1
.vscode/settings.json

@@ -13,7 +13,7 @@
     "files.associations": {
         "*.mac": "vbs"
     },
-    "python.testing.autoTestDiscoverOnSaveEnabled": false,
+    "python.testing.autoTestDiscoverOnSaveEnabled": true,
     "sqltools.connections": [
         {
             "mssqlOptions": {

+ 0 - 0
gcstruct/dist/gchr2.exe → dist/gchr2.exe


+ 0 - 0
gcstruct/dist/gchr2_10.exe → dist/gchr2_10.exe


BIN
gcstruct/dist/gcstruct_uebersetzung.exe → dist/gcstruct_uebersetzung.exe


+ 0 - 0
gcstruct/tests/__init__.py → gchr/__init__.py


+ 211 - 0
gchr/gchr.py

@@ -0,0 +1,211 @@
+import logging
+import os
+from dataclasses import dataclass
+from datetime import datetime
+from pathlib import Path
+from typing import Callable
+
+import pandas as pd
+
+from gchr.gchr_bookings import GchrBookings
+from gchr.gchr_export import (
+    ACCOUNT_INFO,
+    GchrExportConfig,
+    GchrExportFormat,
+    get_export_fn,
+)
+from gchr.gchr_translate import load_translation
+
+
+@dataclass
+class GchrConfig:
+    first_month_of_financial_year: str
+    data_dir: str
+    gcstruct_dir: str
+    export_dir: str
+    export_fn = Callable[[GchrExportConfig], None]
+
+
+class GCHR:
+    booking_date: datetime
+    bookings: GchrBookings
+    _df_translate: pd.DataFrame = None
+    df_translate2: pd.DataFrame = None
+    makes: dict[str, str] = None
+    sites: dict[str, str] = None
+    current_year: str
+    current_month: str
+    timestamp: str
+
+    def __init__(self, base_dir: str) -> None:
+        self.base_dir = base_dir
+        os.makedirs(self.base_dir + "/data", exist_ok=True)
+        os.makedirs(self.base_dir + "/export/temp", exist_ok=True)
+        os.makedirs(self.base_dir + "/logs", exist_ok=True)
+
+        self.account_translation = f"{self.base_dir}/data/Kontenrahmen_uebersetzt.csv"
+
+        self.first_month_of_financial_year = "10"
+        self.bookings = GchrBookings(self.base_dir, self.first_month_of_financial_year)
+
+        self.timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+
+        pd.set_option("display.max_rows", 500)
+        pd.set_option("display.float_format", lambda x: "%.2f" % x)
+
+    @property
+    def debug_file(self) -> str:
+        return f"{self.logs_dir}/debug_{self.timestamp}.csv"
+
+    @property
+    def account_ignored(self) -> str:
+        return f"{self.export_info_dir}/ignoriert_{self.period}.csv"
+
+    # self.account_invalid = f"{self.export_info_dir}/ungueltig_{self.period}.csv"
+
+    def export_all_periods(self, overwrite=False, today=None) -> None:
+        periods = GCHR.get_all_periods(today)
+
+        for year, month in periods:
+            filename = self.export_filename_for_period(year, month)
+            if overwrite or not Path(filename).exists():
+                os.makedirs(Path(filename).parent.joinpath("info"), exist_ok=True)
+                self.export_period(year, month)
+
+    @staticmethod
+    def get_all_periods(today=None) -> list[tuple[str, str]]:
+        dt = datetime.now()
+        if today is not None:
+            dt = datetime.fromisoformat(today)
+        prev = str(dt.year - 1)
+        periods = [(prev, str(x).zfill(2)) for x in range(dt.month, 13)] + [
+            (str(dt.year), str(x).zfill(2)) for x in range(1, dt.month)
+        ]
+        return periods
+
+    def export_period(self, year: str, month: str) -> str:
+        export_fn = get_export_fn(GchrExportFormat.SKR51)
+        # Kontensalden laden
+        df_bookings = self.bookings.filter_bookings(year, month)
+        all_periods = set(df_bookings["Bookkeep Period"].to_list())
+        bookkeep_period_date = datetime(int(year), int(month), 28)
+
+        if df_bookings.shape[0] == 0 or len(all_periods) <= 1 or self.booking_date < bookkeep_period_date:
+            logging.error("ABBRUCH!!! Keine Daten vorhanden!")
+            return False
+
+        filter_to = year + month
+        period_no = list(self.bookings.bookkeep_filter.keys()).index(filter_to) + 1
+
+        logging.info("df_bookings: " + str(df_bookings.shape))
+        # Join auf Übersetzung
+        df_combined = df_bookings.merge(self._df_translate, how="inner", on="Konto_Nr_Händler")
+        logging.info(f"df_combined: {df_combined.shape}")
+
+        df_pivot = df_combined.pivot_table(
+            index=["Konto_Nr_SKR51"],
+            columns=["period"],
+            values="amount",
+            aggfunc="sum",
+            margins=True,
+            margins_name="CumulatedYear",
+        )
+        df_pivot.drop(index="CumulatedYear", inplace=True)
+
+        logging.info("df_pivot: " + str(df_pivot.shape))
+
+        df = df_pivot.merge(self.df_translate2, how="inner", on="Konto_Nr_SKR51")
+
+        makes_used = {}
+        for m in sorted(list(set(df["Marke"].to_list()))):
+            if m not in self.makes:
+                continue
+            makes_used[m] = self.makes[m]
+        sites_used = {}
+        for s in sorted(list(set((df["Marke"] + "-" + df["Standort"]).to_list()))):
+            if s not in self.sites:
+                continue
+            sites_used[s] = self.sites[s]
+
+        from_label = ["Marke", "Standort", "Konto_Nr", "Kostenstelle", "Absatzkanal", "Kostenträger", "KRM"]
+        to_label = ["Make", "Site", "Account", "Origin", "SalesChannel", "CostCarrier", "CostAccountingString"]
+        col_dict = dict(zip(from_label, to_label))
+        df = df.rename(columns=col_dict)
+
+        export_filename = self.export_filename_for_period(year, month)
+        export_csv = export_filename[:-4] + ".csv"
+        df.to_csv(export_csv, decimal=",", sep=";", encoding="latin-1", index=False)
+        df = df[df["IsNumeric"] != False].groupby(ACCOUNT_INFO, as_index=False).aggregate("sum")
+
+        # Infos ergänzen
+        df["Decimals"] = 2
+        # df.sort_values(by=["Konto_Nr_SKR51"], inplace=True)
+        logging.info(df.shape)
+        main_sites = [self.sites[s] for s in sites_used if s in self.sites and self.sites[s] != "0000"]
+
+        for i, main_site in enumerate(main_sites):
+            filename = export_filename
+            if i > 0:
+                filename = f"{filename[:-4]}_{main_site}.xml"
+            export_cfg = GchrExportConfig(
+                main_site,
+                year,
+                month,
+                makes_used,
+                sites_used,
+                self.first_month_of_financial_year,
+                period_no,
+                self.bookings.bookkeep_filter,
+                filename,
+                df.to_dict(orient="records"),
+            )
+
+            export_fn(export_cfg)
+
+        # Join auf Übersetzung - nicht zugeordnet
+        df_ignored = df_bookings.merge(self.df_translate, how="left", on="Konto_Nr_Händler")
+        df_ignored = df_ignored[df_ignored["Konto_Nr_SKR51"].isna()]
+        if not df_ignored.empty:
+            df_ignored = df_ignored.pivot_table(
+                index=["Konto_Nr_Händler"],
+                columns=["period"],
+                values="amount",
+                aggfunc="sum",
+                margins=True,
+                margins_name="CumulatedYear",
+            )
+            df_ignored.to_csv(self.account_ignored, decimal=",", sep=";", encoding="latin-1")
+        return export_filename
+
+    @property
+    def df_translate(self) -> pd.DataFrame:
+        if self._df_translate is None:
+            self.makes, self.sites, self._df_translate, self.df_translate2 = load_translation(
+                self.account_translation, self.debug_file, self.export_invalid_filename
+            )
+        return self._df_translate
+
+    @property
+    def export_info_dir(self) -> str:
+        return f"{self.base_dir}/Export/{self.current_year}/info/"
+
+    @property
+    def logs_dir(self) -> str:
+        return f"{self.base_dir}/Logs/"
+
+    @property
+    def export_invalid_filename(self) -> str:
+        return f"{self.base_dir}/Export/ungueltig.csv"
+
+    def export_filename_for_period(self, year: str, month: str) -> str:
+        return f"{self.base_dir}/Export/{year}/export_{year}-{month}.xml"
+
+
+def gchr_local() -> None:
+    base_dir = os.getcwd() + "/../GCHR2_Testdaten/Kunden"
+    for path in Path(base_dir).glob("*"):
+        if not path.is_dir():
+            continue
+        print(path.name)
+        gchr = GCHR(str(path))
+        gchr.export_all_periods()

+ 142 - 0
gchr/gchr_bookings.py

@@ -0,0 +1,142 @@
+import logging
+from datetime import datetime
+from pathlib import Path
+
+import pandas as pd
+
+
+class GchrBookings:
+    base_dir: str
+    account_bookings: list[str]
+    _df_bookings: pd.DataFrame
+    booking_date: datetime
+
+    def __init__(self, base_dir: list[str], first_month: str = None):
+        self.base_dir = base_dir
+        self.account_bookings = list(Path(self.base_dir).joinpath("data").glob("GuV_Bilanz_Salden*.csv"))
+        self.first_month_of_financial_year = first_month or "01"
+
+        self._df_bookings = self.load_bookings_from_file()
+
+    def load_bookings_from_file(self) -> None:
+        df_list: list[pd.DataFrame] = []
+        timestamps: list[float] = []
+
+        for csv_file in self.account_bookings:
+            df_list.append(
+                pd.read_csv(
+                    csv_file,
+                    decimal=",",
+                    sep=";",
+                    encoding="latin-1",
+                    converters={0: str, 1: str},
+                )
+            )
+            timestamps.append(Path(csv_file).stat().st_mtime)
+        self.booking_date = datetime.fromtimestamp(max(timestamps))
+        df = pd.concat(df_list)
+        df["amount"] = (df["Debit Amount"] + df["Credit Amount"]).round(2)
+        return df
+
+    def set_bookkeep_period(self, year: str, month: str) -> None:
+        self.current_year = year
+        self.current_month = month
+        self.period = f"{year}-{month}"
+        prot_file = f"{self.export_info_dir}/protokoll_{self.period}.log"
+        logging.basicConfig(
+            filename=prot_file,
+            filemode="w",
+            encoding="utf-8",
+            level=logging.DEBUG,
+            force=True,
+        )
+
+    @property
+    def last_year(self) -> str:
+        return str(int(self.current_year) - 1)
+
+    @property
+    def last_year2(self) -> str:
+        return str(int(self.current_year) - 2)
+
+    @property
+    def next_year(self) -> str:
+        return str(int(self.current_year) + 1)
+
+    def filter_bookings(self, year: str, month: str) -> pd.DataFrame:
+        self.set_bookkeep_period(year, month)
+
+        # Kontensalden auf gegebenen Monat filtern
+        filter_from = self.current_year + self.first_month_of_financial_year
+        filter_prev = self.last_year + self.first_month_of_financial_year
+
+        if self.first_month_of_financial_year > self.current_month:
+            filter_from = self.last_year + self.first_month_of_financial_year
+            filter_prev = self.last_year2 + self.first_month_of_financial_year
+        filter_to = self.current_year + self.current_month
+        filter_opening = self.current_year + "00"
+        filter_prev_opening = self.last_year + "00"
+        prev_year_closed = True
+
+        df_opening_balance = self._df_bookings[(self._df_bookings["Bookkeep Period"] == filter_opening)]
+        if df_opening_balance.shape[0] == 0:
+            df_opening_balance = self._df_bookings[
+                (self._df_bookings["Bookkeep Period"] == filter_prev_opening)
+                | (
+                    (self._df_bookings["Bookkeep Period"] >= filter_prev)
+                    & (self._df_bookings["Bookkeep Period"] < filter_from)
+                )
+            ].copy()
+            df_opening_balance["Bookkeep Period"] = filter_opening
+            prev_year_closed = False
+
+        df_opening_balance = df_opening_balance[(df_opening_balance["Konto_Nr_Händler"].str.contains(r"-[013]\d\d+-"))]
+        opening_balance = df_opening_balance["amount"].aggregate("sum").round(2)
+        logging.info("Gewinn/Verlustvortrag")
+        logging.info(opening_balance)
+
+        if not prev_year_closed:
+            row = {
+                "Konto_Nr_Händler": "01-01-0861-00-00-00",
+                "Bookkeep Period": filter_opening,
+                "Debit Amount": opening_balance * -1,
+                "Credit Amount": 0,
+                "Debit Quantity": 0,
+                "Credit Quantity": 0,
+                "amount": opening_balance * -1,
+            }
+            df_opening_balance = pd.concat([df_opening_balance, pd.DataFrame.from_records([row])])
+
+        df_filtered = self._df_bookings[
+            (self._df_bookings["Bookkeep Period"] >= filter_from) & (self._df_bookings["Bookkeep Period"] <= filter_to)
+        ]
+
+        # Buchungen kopieren und als Statistikkonten anhängen
+        df_stats = df_filtered.copy()
+        # df_stats = df_stats[df_stats['Konto_Nr_Händler'].str.match(r'-[24578]\d\d\d-')]
+        df_stats["Konto_Nr_Händler"] = df_stats["Konto_Nr_Händler"].str.replace(r"-(\d\d\d+)-", r"-\1_STK-", regex=True)
+        df_stats["amount"] = (df_filtered["Debit Quantity"] + df_filtered["Credit Quantity"]).round(2)
+
+        df_combined = pd.concat([df_opening_balance, df_filtered, df_stats])
+
+        # Spalten konvertieren
+        df_combined["period"] = df_combined["Bookkeep Period"].apply(lambda x: self.bookkeep_filter[x])
+        return df_combined[df_combined["amount"] != 0.00]
+
+    @property
+    def bookkeep_filter(self) -> dict[str, str]:
+        period = [self.current_year + str(i).zfill(2) for i in range(1, 13)]
+        if self.first_month_of_financial_year != "01":
+            if self.first_month_of_financial_year > self.current_month:
+                period = [self.last_year + str(i).zfill(2) for i in range(1, 13)] + period
+            else:
+                period = period + [self.next_year + str(i).zfill(2) for i in range(1, 13)]
+            fm = int(self.first_month_of_financial_year)
+            period = period[fm - 1 : fm + 12]
+        period = [self.current_year + "00"] + period
+        rename_to = ["OpeningBalance"] + ["Period" + str(i).zfill(2) for i in range(1, 13)]
+        return dict(zip(period, rename_to))
+
+    @property
+    def export_info_dir(self) -> str:
+        return f"{self.base_dir}/Export/{self.current_year}/info/"

+ 28 - 0
gchr/gchr_convert.py

@@ -0,0 +1,28 @@
+import csv
+import xml.etree.ElementTree as ET
+
+from gchr.gchr import GCHR
+
+
+def convert_to_row(node: list[ET.Element]) -> list[str]:
+    return [child.text for child in node]
+
+
+def convert_xml_to_csv(xmlfile: str, csvfile: str) -> bool:
+    with open(xmlfile) as frh:
+        record_list = ET.parse(frh).getroot().find("RecordList")
+    header = [child.tag for child in record_list.find("Record")]
+    bookings = [GCHR.convert_to_row(node) for node in record_list.findall("Record")]
+    with open(csvfile, "w") as fwh:
+        cwh = csv.writer(fwh, delimiter=";")
+        cwh.writerow(header)
+        cwh.writerows(bookings)
+    return True
+
+
+def convert_csv_to_xml(self, csvfile: str, xmlfile: str) -> None:
+    self.makes = {"01": "1844"}
+    self.sites = {"01-01": "1844"}
+    with open(csvfile, "r", encoding="latin-1") as frh:
+        csv_reader = csv.DictReader(frh, delimiter=";")
+        GCHR.export_skr51_xml(csv_reader, self.bookkeep_filter(), 1, list(self.sites.values())[0], xmlfile)

+ 0 - 0
gcstruct/gchr_error.py → gchr/gchr_error.py


+ 110 - 0
gchr/gchr_export.py

@@ -0,0 +1,110 @@
+import xml.etree.ElementTree as ET
+from collections.abc import Callable
+from dataclasses import dataclass
+from datetime import datetime
+from enum import StrEnum, auto
+from xml.dom import minidom
+
+ACCOUNT_INFO = [
+    "Account",
+    "Make",
+    "Site",
+    "Origin",
+    "SalesChannel",
+    "CostCarrier",
+    "CostAccountingString",
+]
+
+
+class GchrExportFormat(StrEnum):
+    SKR51 = auto()
+    Volkswagen = auto()
+    Opel = auto()
+
+
+@dataclass
+class GchrExportConfig:
+    main_site: str
+    current_year: str
+    current_month: str
+    makes_used: dict[str, str]
+    sites_used: dict[str, str]
+    first_month: str
+    period_no: str
+    bookkeep_filter: dict[str, str]
+    extraction_date: datetime
+    export_file: str
+    bookkeep_records = dict[str, list[str]]
+
+
+def export_skr51_xml(export_cfg: GchrExportConfig):
+    record_elements = (
+        ACCOUNT_INFO
+        + ["Decimals"]
+        + list(export_cfg.bookkeep_filter.values())[: export_cfg.period_no]
+        + ["CumulatedYear"]
+    )
+    root = ET.Element("HbvData")
+    h = ET.SubElement(root, "Header")
+    for k, v in export_skr51_header(export_cfg).items():
+        ET.SubElement(h, k).text = str(v)
+
+    make_list = ET.SubElement(root, "MakeList")
+    for make, make_code in export_cfg.makes_used.items():
+        e = ET.SubElement(make_list, "MakeListEntry")
+        ET.SubElement(e, "Make").text = make
+        ET.SubElement(e, "MakeCode").text = make_code
+
+    bm_code_list = ET.SubElement(root, "BmCodeList")
+    for s, bmcode in export_cfg.sites_used.items():
+        make, site = s.split("-")
+        e = ET.SubElement(bm_code_list, "BmCodeEntry")
+        ET.SubElement(e, "Make").text = make
+        ET.SubElement(e, "Site").text = site
+        ET.SubElement(e, "BmCode").text = bmcode
+
+    record_list = ET.SubElement(root, "RecordList")
+    for row in export_cfg.bookkeep_records:
+        record = ET.SubElement(record_list, "Record")
+        for e in record_elements:
+            child = ET.SubElement(record, e)
+            field = row.get(e, 0.0)
+            if str(field) == "nan":
+                field = "0"
+            elif type(field) is float:
+                field = "{:.0f}".format(field * 100)
+            child.text = str(field)
+
+    with open(export_cfg.export_file, "w", encoding="utf-8") as fwh:
+        fwh.write(minidom.parseString(ET.tostring(root)).toprettyxml(indent="  "))
+
+
+def export_skr51_header(export_cfg: GchrExportConfig) -> dict[str, str]:
+    return {
+        "Country": "DE",
+        "MainBmCode": export_cfg.main_site,
+        "Month": export_cfg.current_month,
+        "Year": export_cfg.current_year,
+        "Currency": "EUR",
+        "NumberOfMakes": len(export_cfg.makes_used),
+        "NumberOfSites": len(export_cfg.sites_used),
+        "ExtractionDate": export_cfg.extraction_date.strftime("%d.%m.%Y"),
+        "ExtractionTime": export_cfg.extraction_date.strftime("%H:%M:%S"),
+        "BeginFiscalYear": export_cfg.first_month,
+    }
+
+
+GchrExportFn = Callable[[GchrExportConfig], None]
+
+
+EXPORT_FN: dict[GchrExportFormat, GchrExportFn] = {
+    GchrExportFormat.SKR51: export_skr51_xml,
+}
+
+
+def export_dummy(export_cfg: GchrExportConfig) -> None:
+    pass
+
+
+def get_export_fn(export_format: GchrExportFormat) -> GchrExportFn:
+    return EXPORT_FN.get(export_format, export_dummy)

+ 216 - 0
gchr/gchr_translate.py

@@ -0,0 +1,216 @@
+import logging
+
+import numpy as np
+import pandas as pd
+
+TRANSLATE = [
+    "Konto_Nr_Händler",
+    "Konto_Nr_SKR51",
+    "Marke",
+    "Standort",
+    "Konto_Nr",
+    "Kostenstelle",
+    "Absatzkanal",
+    "Kostenträger",
+    "Kontoart",
+    "Konto_1",
+    "KRM",
+    "IsNumeric",
+]
+
+
+def load_translation(
+    account_translation: str, debug_file: str, export_invalid_filename: str
+) -> tuple[dict[str, str], dict[str, str], pd.DataFrame, pd.DataFrame]:
+    df_translate_import = pd.read_csv(
+        account_translation,
+        decimal=",",
+        sep=";",
+        encoding="latin-1",
+        converters={i: str for i in range(0, 200)},
+    ).reset_index()
+
+    makes = get_makes_from_translation(df_translate_import)
+    sites = get_sites_from_translation(df_translate_import)
+
+    df_prepared = prepare_translation(df_translate_import)
+    df_translate = special_translation(df_prepared, makes, sites, debug_file, export_invalid_filename)
+    df_translate2 = df_translate.copy().drop(columns=["Konto_Nr_Händler"]).drop_duplicates().set_index("Konto_Nr_SKR51")
+
+    df_translate3 = (
+        df_translate[["Kontoart", "Konto_Nr_SKR51", "Konto_Nr_Händler"]]
+        .copy()
+        .sort_values(by=["Kontoart", "Konto_Nr_SKR51"])
+    )
+    df_translate3.to_csv(account_translation[:-4] + "_GCHR.csv", decimal=",", sep=";", encoding="latin-1", index=False)
+    return (makes, sites, df_translate, df_translate2)
+
+
+def get_makes_from_translation(df_translate_import: pd.DataFrame) -> dict[str, str]:
+    df_makes = df_translate_import[["Marke", "Marke_HBV"]].copy().drop_duplicates()
+    df_makes = df_makes[df_makes["Marke_HBV"] != "0000"]
+    makes = dict([(e["Marke"], e["Marke_HBV"]) for e in df_makes.to_dict(orient="records")])
+    makes["99"] = "0000"
+    return makes
+
+
+def get_sites_from_translation(df_translate_import: pd.DataFrame) -> dict[str, str]:
+    df_sites = df_translate_import[["Marke", "Standort", "Standort_HBV"]].copy().drop_duplicates()
+    df_sites["Standort_HBV"] = np.where(df_sites["Standort_HBV"].str.len() != 6, "0000", df_sites["Standort_HBV"])
+    sites = dict([(e["Marke"] + "-" + e["Standort"], e["Standort_HBV"]) for e in df_sites.to_dict(orient="records")])
+    return sites
+
+
+def prepare_translation(df_translate_import: pd.DataFrame) -> pd.DataFrame:
+    df = df_translate_import[
+        [
+            "Konto_Nr_Händler",
+            "Konto_Nr_SKR51",
+        ]
+    ].drop_duplicates()
+    logging.info(df.shape)
+
+    row = {
+        "Konto_Nr_Händler": "01-01-0861-00-00-00",
+        "Konto_Nr_SKR51": "01-01-0861-00-00-00",
+    }
+    df = pd.concat([df, pd.DataFrame.from_records([row])])
+    df.set_index("Konto_Nr_Händler")
+    return df
+
+
+def special_translation(
+    df: pd.DataFrame, makes: dict[str, str], sites: dict[str, str], debug_file: str, export_invalid_filename: str
+) -> pd.DataFrame:
+    df["Konto_Nr_Händler"] = df["Konto_Nr_Händler"].str.upper()
+    df["Konto_Nr_SKR51"] = df["Konto_Nr_SKR51"].str.upper()
+    df = extract_acct_info(df)
+    df["Konto_Nr"] = df["Konto_Nr"].str.upper()
+    logging.info(df.shape)
+    logging.info(df.columns)
+    logging.info(df.head())
+
+    logging.info("df: " + str(df.shape))
+    df["Bilanz"] = df["Konto_Nr"].str.match(r"^[013]")
+    df["Kontoart"] = np.where(df["Bilanz"], "1", "2")
+    df["Kontoart"] = np.where(df["Konto_Nr"].str.contains("_STK"), "3", df["Kontoart"])
+    df["Kontoart"] = np.where(df["Konto_Nr"].str.match(r"^[9]"), "3", df["Kontoart"])
+    df["Konto_1"] = df["Konto_Nr"].str.slice(0, 1)
+
+    # fehlende Marken- und Standortzuordnung
+    df["Marke"] = np.where(df["Marke"].isin(makes.keys()), df["Marke"], "99")
+    df["Marke_Standort"] = df["Marke"] + "-" + df["Standort"]
+    df["Standort"] = np.where(df["Marke_Standort"].isin(sites.keys()), df["Standort"], "01")
+
+    df_debug = df.drop(columns=["Bilanz"])
+    logging.info(df_debug.groupby(["Kontoart"]).aggregate("sum"))
+    logging.info(df_debug.groupby(["Kontoart", "Konto_1"]).aggregate("sum"))
+    logging.info(df_debug.groupby(["Konto_Nr"]).aggregate("sum"))
+    df_debug.groupby(["Konto_Nr"]).aggregate("sum").to_csv(debug_file, decimal=",", sep=";", encoding="latin-1")
+
+    # Bereinigung GW-Kostenträger
+    df["NW_Verkauf_1"] = (df["Konto_Nr"].str.match(r"^[78]0")) & (df["Kostenstelle"].str.match(r"^[^1]\d"))
+    df["Kostenstelle"] = np.where(df["NW_Verkauf_1"] == True, "11", df["Kostenstelle"])
+
+    df["Konto_7010"] = df["Konto_Nr"].str.match(r"^[78]01[01]")
+    df["Kostenstelle"] = np.where(df["Konto_7010"] == True, "14", df["Kostenstelle"])
+
+    df["GW_Verkauf_2"] = (df["Konto_Nr"].str.match(r"^[78]1")) & (df["Kostenstelle"].str.match(r"^[^2]\d"))
+    df["Kostenstelle"] = np.where(df["GW_Verkauf_2"] == True, "21", df["Kostenstelle"])
+
+    df["GW_Verkauf_3"] = (df["Konto_Nr"].str.match(r"^[78]3")) & (df["Kostenstelle"].str.match(r"^[^3]\d"))
+    df["Kostenstelle"] = np.where(df["GW_Verkauf_3"] == True, "31", df["Kostenstelle"])
+
+    df["GW_Verkauf_4"] = (df["Konto_Nr"].str.match(r"^[78]4")) & (df["Kostenstelle"].str.match(r"^[^4]\d"))
+    df["Kostenstelle"] = np.where(df["GW_Verkauf_4"] == True, "41", df["Kostenstelle"])
+
+    df["GW_Verkauf_x420"] = df["Konto_Nr"].str.match(r"^[78]420")
+    df["Kostenstelle"] = np.where(df["GW_Verkauf_x420"] == True, "42", df["Kostenstelle"])
+
+    df["GW_Verkauf_5"] = (df["Konto_Nr"].str.match(r"^[78]5")) & (df["Kostenstelle"].str.match(r"^[^5]\d"))
+    df["Kostenstelle"] = np.where(df["GW_Verkauf_5"] == True, "51", df["Kostenstelle"])
+
+    df["GW_Verkauf_50"] = (df["Konto_Nr"].str.match(r"^[78]")) & (df["Kostenstelle"].str.match(r"^2"))
+    df["Kostenträger"] = np.where(df["GW_Verkauf_50"] == True, "52", df["Kostenträger"])
+    df["Kostenträger"] = np.where(
+        (df["GW_Verkauf_50"] == True) & (df["Marke"] == "01"),
+        "50",
+        df["Kostenträger"],
+    )
+
+    df["NW_Verkauf_00"] = (
+        (df["Konto_Nr"].str.match(r"^[78]2"))
+        & (df["Kostenstelle"].str.match(r"^1"))
+        & (df["Kostenträger"].str.match(r"^[^01234]"))
+    )
+    df["Kostenträger"] = np.where(df["NW_Verkauf_00"] == True, "00", df["Kostenträger"])
+
+    df["GW_Stk_50"] = (df["Konto_Nr"].str.match(r"^9130")) & (df["Kostenstelle"].str.match(r"^2"))
+    df["Kostenträger"] = np.where(df["GW_Stk_50"] == True, "52", df["Kostenträger"])
+    df["Kostenträger"] = np.where((df["GW_Stk_50"] == True) & (df["Marke"] == "01"), "50", df["Kostenträger"])
+
+    df["Kostenträger"] = np.where(df["Bilanz"] == True, "00", df["Kostenträger"])
+
+    df["Konto_5er"] = (df["Konto_Nr"].str.match("^5")) | (df["Konto_Nr"].str.match("^9143"))
+    df["Absatzkanal"] = np.where(df["Konto_5er"] == True, "99", df["Absatzkanal"])
+
+    df["Konto_5005"] = (df["Konto_Nr"].str.match("^5005")) & (df["Kostenstelle"].str.match(r"^[^12]"))
+    df["Kostenstelle"] = np.where(df["Konto_5005"] == True, "20", df["Kostenstelle"])
+    df["Kostenträger"] = np.where(df["Konto_5005"] == True, "50", df["Kostenträger"])
+
+    df["Konto_5007"] = (df["Konto_Nr"].str.match("^5007")) & (df["Kostenstelle"].str.match(r"^([^4]|42)"))
+    df["Kostenstelle"] = np.where(df["Konto_5007"] == True, "41", df["Kostenstelle"])
+    df["Kostenträger"] = np.where(df["Konto_5007"] == True, "70", df["Kostenträger"])
+
+    df["Konto_914er"] = (df["Konto_Nr"].str.match("^914[34]")) & (df["Kostenträger"].str.match(r"^[^7]"))
+    df["Kostenträger"] = np.where(df["Konto_914er"] == True, "70", df["Kostenträger"])
+
+    df["Teile_30_60"] = (
+        (df["Konto_Nr"].str.match(r"^[578]"))
+        & (df["Kostenstelle"].str.match(r"^[3]"))
+        & (df["Kostenträger"].str.match(r"^[^6]"))
+    )
+    df["Kostenträger"] = np.where(df["Teile_30_60"] == True, "60", df["Kostenträger"])
+
+    df["Service_40_70"] = (
+        (df["Konto_Nr"].str.match(r"^[578]"))
+        & (df["Kostenstelle"].str.match(r"^[4]"))
+        & (df["Kostenträger"].str.match(r"^[^7]"))
+    )
+    df["Kostenträger"] = np.where(df["Service_40_70"] == True, "70", df["Kostenträger"])
+
+    df["KRM"] = df["Marke"] + df["Standort"] + df["Kostenstelle"] + df["Absatzkanal"] + df["Kostenträger"]
+    df["Konto_Nr_SKR51"] = (
+        (df["Marke"] + "-" + df["Standort"] + "-" + df["Konto_Nr"])
+        + "-"
+        + (df["Kostenstelle"] + "-" + df["Absatzkanal"] + "-" + df["Kostenträger"])
+    )
+    df["IsNumeric"] = (
+        (df["KRM"].str.isdigit())
+        & (df["Konto_Nr"].str.isdigit())
+        & (df["Konto_Nr"].str.len() == 4)
+        # & (df["Konto_Nr_SKR51"].str.len() == 19)
+    )
+    df_invalid = df[df["IsNumeric"] == False]
+    df_invalid.to_csv(export_invalid_filename, decimal=",", sep=";", encoding="latin-1", index=False)
+    return df[df["IsNumeric"] == True][TRANSLATE]
+
+
+def extract_acct_info(df: pd.DataFrame) -> pd.DataFrame:
+    acct_info = [
+        "Marke",
+        "Standort",
+        "Konto_Nr",
+        "Kostenstelle",
+        "Absatzkanal",
+        "Kostenträger",
+    ]
+    df["HasFiveDashes"] = df["Konto_Nr_SKR51"].str.count("-") == 5
+    df["Invalid"] = "XX-XX-XXXX-XX-XX-XX"
+    df["Konto_Nr_SKR51"] = np.where(
+        df["HasFiveDashes"],
+        df["Konto_Nr_SKR51"],
+        df["Invalid"],
+    )
+    df[acct_info] = df["Konto_Nr_SKR51"].str.split(pat="-", n=6, expand=True)
+    return df

+ 0 - 0
gchr/tests/__init__.py


+ 39 - 0
gchr/tests/test_gchr.py

@@ -0,0 +1,39 @@
+import unittest
+
+import pandas as pd
+
+from gchr.gchr import GCHR
+from gchr.gchr_bookings import GchrBookings
+from gchr.gchr_translate import TRANSLATE
+
+
+class TestGchr(unittest.TestCase):
+    base_dir_1: str = "C:\\Projekte\\GCHR2_Testdaten\\Kunden\\Altermann"
+    base_dir_2: str = "C:\\Projekte\\GCHR2_Testdaten\\Kunden\\Koenig-und-Partner"
+
+    def test_single_booking_files(self):
+        gchr = GCHR(self.base_dir_1)
+        self.assertIsInstance(gchr.bookings, GchrBookings)
+        self.assertEqual(len(gchr.bookings.account_bookings), 1)
+        self.assertEqual(gchr.bookings.account_bookings[0].name, "GuV_Bilanz_Salden.csv")
+
+    def test_multiple_booking_files(self):
+        gchr = GCHR(self.base_dir_2)
+        self.assertIsInstance(gchr.bookings, GchrBookings)
+        self.assertEqual(len(gchr.bookings.account_bookings), 2)
+        self.assertEqual(gchr.bookings.account_bookings[0].name, "GuV_Bilanz_Salden.csv")
+        self.assertEqual(gchr.bookings.account_bookings[1].name, "GuV_Bilanz_Salden_deop03.csv")
+
+    def test_translation_existing(self):
+        gchr = GCHR(self.base_dir_1)
+        df = gchr.df_translate
+        self.assertIsInstance(df, pd.DataFrame)
+        self.assertEqual(df.shape[1], 12)
+        self.assertListEqual(list(df.columns), TRANSLATE)
+        self.assertGreater(df.shape[0], 0, "Translation not empty")
+
+    def test_all_periods(self):
+        periods = GCHR.get_all_periods("2024-12-23")
+        self.assertEqual(len(periods), 12)
+        self.assertEqual(periods[0], ("2023", "12"))
+        self.assertEqual(periods[-1], ("2024", "11"))

+ 1 - 1
gcstruct/gchr2.bat → gchr2.bat

@@ -1,4 +1,4 @@
 cd /d %~dp0
-call ..\.venv\Scripts\activate.bat
+call .venv\Scripts\activate.bat
 pyinstaller -F --path %~dp0 gchr2.py
 pause

+ 7 - 0
gchr2.py

@@ -0,0 +1,7 @@
+import os
+
+from gchr.gchr import GCHR
+
+if __name__ == "__main__":
+    gchr = GCHR(os.getcwd())
+    gchr.export_all_periods()

+ 1 - 1
gcstruct/gchr2.spec → gchr2.spec

@@ -6,7 +6,7 @@ block_cipher = None
 
 a = Analysis(
     ['gchr2.py'],
-    pathex=['C:\\Projekte\\Python\\gcstruct\\'],
+    pathex=['C:\\Projekte\\Python\\'],
     binaries=[],
     datas=[],
     hiddenimports=[],

+ 0 - 566
gcstruct/gchr.py

@@ -1,566 +0,0 @@
-import csv
-import logging
-import os
-import xml.etree.ElementTree as ET
-from dataclasses import dataclass
-from datetime import datetime
-from pathlib import Path
-from xml.dom import minidom
-
-import numpy as np
-import pandas as pd
-
-ACCOUNT_INFO = [
-    "Account",
-    "Make",
-    "Site",
-    "Origin",
-    "SalesChannel",
-    "CostCarrier",
-    "CostAccountingString",
-]
-
-TRANSLATE = [
-    "Konto_Nr_Händler",
-    "Konto_Nr_SKR51",
-    "Marke",
-    "Standort",
-    "Konto_Nr",
-    "Kostenstelle",
-    "Absatzkanal",
-    "Kostenträger",
-    "Kontoart",
-    "Konto_1",
-    "KRM",
-    "IsNumeric",
-]
-
-
-@dataclass
-class GchrConfig:
-    first_month_of_financial_year: str
-
-
-class GCHR:
-    booking_date: datetime
-    df_bookings: pd.DataFrame = None
-    df_translate: pd.DataFrame = None
-    df_translate2: pd.DataFrame = None
-    makes: dict[str, str] = None
-    sites: dict[str, str] = None
-
-    def __init__(self, base_dir) -> None:
-        self.base_dir = base_dir
-
-        self.account_translation = f"{self.base_dir}/data/Kontenrahmen_uebersetzt.csv"
-        self.account_bookings = list(Path(self.base_dir).joinpath("data").glob("GuV_Bilanz_Salden*.csv"))
-        self.first_month_of_financial_year = "10"
-
-        pd.set_option("display.max_rows", 500)
-        pd.set_option("display.float_format", lambda x: "%.2f" % x)
-
-    def set_bookkeep_period(self, year, month):
-        self.current_year = year
-        self.current_month = month
-        period = f"{year}-{month}"
-        prot_file = f"{self.export_info_dir}/protokoll_{period}.log"
-        logging.basicConfig(
-            filename=prot_file,
-            filemode="w",
-            encoding="utf-8",
-            level=logging.DEBUG,
-            force=True,
-        )
-        self.debug_file = f"{self.export_info_dir}/debug_{period}.csv"
-        self.account_ignored = f"{self.export_info_dir}/ignoriert_{period}.csv"
-        # self.account_invalid = f"{self.export_info_dir}/ungueltig_{period}.csv"
-
-        self.last_year = str(int(self.current_year) - 1)
-        self.last_year2 = str(int(self.current_year) - 2)
-        self.next_year = str(int(self.current_year) + 1)
-
-    def header(self, makes_used, sites_used, main_site):
-        return {
-            "Country": "DE",
-            "MainBmCode": main_site,
-            "Month": self.current_month,
-            "Year": self.current_year,
-            "Currency": "EUR",
-            "NumberOfMakes": len(makes_used),
-            "NumberOfSites": len(sites_used),
-            "ExtractionDate": self.booking_date.strftime("%d.%m.%Y"),
-            "ExtractionTime": self.booking_date.strftime("%H:%M:%S"),
-            "BeginFiscalYear": self.first_month_of_financial_year,
-        }
-
-    @property
-    def bookkeep_filter(self):
-        period = [self.current_year + str(i).zfill(2) for i in range(1, 13)]
-        if self.first_month_of_financial_year != "01":
-            if self.first_month_of_financial_year > self.current_month:
-                period = [self.last_year + str(i).zfill(2) for i in range(1, 13)] + period
-            else:
-                period = period + [self.next_year + str(i).zfill(2) for i in range(1, 13)]
-            fm = int(self.first_month_of_financial_year)
-            period = period[fm - 1 : fm + 12]
-        period = [self.current_year + "00"] + period
-        rename_to = ["OpeningBalance"] + ["Period" + str(i).zfill(2) for i in range(1, 13)]
-        return dict(zip(period, rename_to))
-
-    def extract_acct_info(self, df: pd.DataFrame):
-        acct_info = [
-            "Marke",
-            "Standort",
-            "Konto_Nr",
-            "Kostenstelle",
-            "Absatzkanal",
-            "Kostenträger",
-        ]
-        df["HasFiveDashes"] = df["Konto_Nr_SKR51"].str.count("-") == 5
-        df["Invalid"] = "XX-XX-XXXX-XX-XX-XX"
-        df["Konto_Nr_SKR51"] = np.where(
-            df["HasFiveDashes"],
-            df["Konto_Nr_SKR51"],
-            df["Invalid"],
-        )
-        df[acct_info] = df["Konto_Nr_SKR51"].str.split(pat="-", n=6, expand=True)
-        return df
-
-    def export_all_periods(self, overwrite=False, today=None):
-        dt = datetime.now()
-        if today is not None:
-            dt = datetime.fromisoformat(today)
-        prev = str(dt.year - 1)
-        periods = [(prev, str(x).zfill(2)) for x in range(dt.month, 13)] + [
-            (str(dt.year), str(x).zfill(2)) for x in range(1, dt.month)
-        ]
-        for year, month in periods:
-            filename = self.export_filename_for_period(year, month)
-            if overwrite or not Path(filename).exists():
-                os.makedirs(Path(filename).parent.joinpath("info"), exist_ok=True)
-                self.export_period(year, month)
-
-    def export_period(self, year, month):
-        self.set_bookkeep_period(year, month)
-        # Übersetzungstabelle laden
-        self.get_translation()
-
-        # Kontensalden laden
-        df_bookings = self.filter_bookings()
-        all_periods = set(df_bookings["Bookkeep Period"].to_list())
-        bookkeep_period_date = datetime(int(year), int(month), 28)
-
-        if df_bookings.shape[0] == 0 or len(all_periods) <= 1 or self.booking_date < bookkeep_period_date:
-            logging.error("ABBRUCH!!! Keine Daten vorhanden!")
-            return False
-
-        filter_to = self.current_year + self.current_month
-        period_no = list(self.bookkeep_filter.keys()).index(filter_to) + 1
-
-        logging.info("df_bookings: " + str(df_bookings.shape))
-        # Join auf Übersetzung
-        df_combined = df_bookings.merge(self.df_translate, how="inner", on="Konto_Nr_Händler")
-        logging.info(f"df_combined: {df_combined.shape}")
-
-        df_pivot = df_combined.pivot_table(
-            index=["Konto_Nr_SKR51"],
-            columns=["period"],
-            values="amount",
-            aggfunc="sum",
-            margins=True,
-            margins_name="CumulatedYear",
-        )
-        df_pivot.drop(index="CumulatedYear", inplace=True)
-
-        logging.info("df_pivot: " + str(df_pivot.shape))
-
-        df = df_pivot.merge(self.df_translate2, how="inner", on="Konto_Nr_SKR51")
-
-        makes_used = sorted(list(set(df["Marke"].to_list())))
-        sites_used = sorted(list(set((df["Marke"] + "-" + df["Standort"]).to_list())))
-
-        from_label = ["Marke", "Standort", "Konto_Nr", "Kostenstelle", "Absatzkanal", "Kostenträger", "KRM"]
-        to_label = ["Make", "Site", "Account", "Origin", "SalesChannel", "CostCarrier", "CostAccountingString"]
-        col_dict = dict(zip(from_label, to_label))
-        df = df.rename(columns=col_dict)
-
-        export_csv = self.export_filename[:-4] + ".csv"
-        df.to_csv(export_csv, decimal=",", sep=";", encoding="latin-1", index=False)
-        df = df[df["IsNumeric"] != False].groupby(ACCOUNT_INFO, as_index=False).aggregate("sum")
-
-        # Infos ergänzen
-        df["Decimals"] = 2
-        # df.sort_values(by=["Konto_Nr_SKR51"], inplace=True)
-        logging.info(df.shape)
-        main_sites = [self.sites[s] for s in sites_used if s in self.sites and self.sites[s] != "0000"]
-
-        for i, main_site in enumerate(main_sites):
-            filename = self.export_filename
-            if i > 0:
-                filename = f"{filename[:-4]}_{main_site}.xml"
-            self.export_skr51_xml(
-                df.to_dict(orient="records"),
-                self.bookkeep_filter,
-                period_no,
-                makes_used,
-                sites_used,
-                main_site,
-                filename,
-            )
-
-        # Join auf Übersetzung - nicht zugeordnet
-        df_ignored = df_bookings.merge(self.df_translate, how="left", on="Konto_Nr_Händler")
-        df_ignored = df_ignored[df_ignored["Konto_Nr_SKR51"].isna()]
-        if not df_ignored.empty:
-            df_ignored = df_ignored.pivot_table(
-                index=["Konto_Nr_Händler"],
-                columns=["period"],
-                values="amount",
-                aggfunc="sum",
-                margins=True,
-                margins_name="CumulatedYear",
-            )
-            df_ignored.to_csv(self.account_ignored, decimal=",", sep=";", encoding="latin-1")
-        return self.export_filename
-
-    def get_translation(self):
-        if self.df_translate is None:
-            df_translate_import = pd.read_csv(
-                self.account_translation,
-                decimal=",",
-                sep=";",
-                encoding="latin-1",
-                converters={i: str for i in range(0, 200)},
-            ).reset_index()
-
-            df_makes = df_translate_import[["Marke", "Marke_HBV"]].copy().drop_duplicates()
-            df_makes = df_makes[df_makes["Marke_HBV"] != "0000"]
-            self.makes = dict([(e["Marke"], e["Marke_HBV"]) for e in df_makes.to_dict(orient="records")])
-            self.makes["99"] = "0000"
-            df_sites = df_translate_import[["Marke", "Standort", "Standort_HBV"]].copy().drop_duplicates()
-            df_sites["Standort_HBV"] = np.where(
-                df_sites["Standort_HBV"].str.len() != 6, "0000", df_sites["Standort_HBV"]
-            )
-            self.sites = dict(
-                [(e["Marke"] + "-" + e["Standort"], e["Standort_HBV"]) for e in df_sites.to_dict(orient="records")]
-            )
-
-            df_prepared = self.prepare_translation(df_translate_import)
-            self.df_translate = self.special_translation(df_prepared)
-            self.df_translate2 = (
-                self.df_translate.drop(columns=["Konto_Nr_Händler"])
-                .copy()
-                .drop_duplicates()
-                .set_index("Konto_Nr_SKR51")
-            )
-        return self.df_translate
-
-    def prepare_translation(self, df_translate_import: pd.DataFrame):
-        df_translate = df_translate_import[
-            [
-                "Konto_Nr_Händler",
-                "Konto_Nr_SKR51",
-            ]
-        ].drop_duplicates()
-        logging.info(df_translate.shape)
-
-        row = {
-            "Konto_Nr_Händler": "01-01-0861-00-00-00",
-            "Konto_Nr_SKR51": "01-01-0861-00-00-00",
-        }
-        df_translate = pd.concat([df_translate, pd.DataFrame.from_records([row])])
-        df_translate.set_index("Konto_Nr_Händler")
-        return df_translate
-
-    def special_translation(self, df: pd.DataFrame):
-        df["Konto_Nr_Händler"] = df["Konto_Nr_Händler"].str.upper()
-        df["Konto_Nr_SKR51"] = df["Konto_Nr_SKR51"].str.upper()
-        df = self.extract_acct_info(df)
-        df["Konto_Nr"] = df["Konto_Nr"].str.upper()
-        logging.info(df.shape)
-        logging.info(df.columns)
-        logging.info(df.head())
-
-        logging.info("df: " + str(df.shape))
-        df["Bilanz"] = df["Konto_Nr"].str.match(r"^[013]")
-        df["Kontoart"] = np.where(df["Bilanz"], "1", "2")
-        df["Kontoart"] = np.where(df["Konto_Nr"].str.contains("_STK"), "3", df["Kontoart"])
-        df["Kontoart"] = np.where(df["Konto_Nr"].str.match(r"^[9]"), "3", df["Kontoart"])
-        df["Konto_1"] = df["Konto_Nr"].str.slice(0, 1)
-
-        # fehlende Marken- und Standortzuordnung
-        df["Marke"] = np.where(df["Marke"].isin(self.makes.keys()), df["Marke"], "99")
-        df["Marke_Standort"] = df["Marke"] + "-" + df["Standort"]
-        df["Standort"] = np.where(df["Marke_Standort"].isin(self.sites.keys()), df["Standort"], "01")
-
-        df_debug = df.drop(columns=["Bilanz"])
-        logging.info(df_debug.groupby(["Kontoart"]).aggregate("sum"))
-        logging.info(df_debug.groupby(["Kontoart", "Konto_1"]).aggregate("sum"))
-        logging.info(df_debug.groupby(["Konto_Nr"]).aggregate("sum"))
-        df_debug.groupby(["Konto_Nr"]).aggregate("sum").to_csv(
-            self.debug_file, decimal=",", sep=";", encoding="latin-1"
-        )
-
-        # Bereinigung GW-Kostenträger
-        df["NW_Verkauf_1"] = (df["Konto_Nr"].str.match(r"^[78]0")) & (df["Kostenstelle"].str.match(r"^[^1]\d"))
-        df["Kostenstelle"] = np.where(df["NW_Verkauf_1"] == True, "11", df["Kostenstelle"])
-
-        df["Konto_7010"] = df["Konto_Nr"].str.match(r"^[78]01[01]")
-        df["Kostenstelle"] = np.where(df["Konto_7010"] == True, "14", df["Kostenstelle"])
-
-        df["GW_Verkauf_2"] = (df["Konto_Nr"].str.match(r"^[78]1")) & (df["Kostenstelle"].str.match(r"^[^2]\d"))
-        df["Kostenstelle"] = np.where(df["GW_Verkauf_2"] == True, "21", df["Kostenstelle"])
-
-        df["GW_Verkauf_3"] = (df["Konto_Nr"].str.match(r"^[78]3")) & (df["Kostenstelle"].str.match(r"^[^3]\d"))
-        df["Kostenstelle"] = np.where(df["GW_Verkauf_3"] == True, "31", df["Kostenstelle"])
-
-        df["GW_Verkauf_4"] = (df["Konto_Nr"].str.match(r"^[78]4")) & (df["Kostenstelle"].str.match(r"^[^4]\d"))
-        df["Kostenstelle"] = np.where(df["GW_Verkauf_4"] == True, "41", df["Kostenstelle"])
-
-        df["GW_Verkauf_x420"] = df["Konto_Nr"].str.match(r"^[78]420")
-        df["Kostenstelle"] = np.where(df["GW_Verkauf_x420"] == True, "42", df["Kostenstelle"])
-
-        df["GW_Verkauf_5"] = (df["Konto_Nr"].str.match(r"^[78]5")) & (df["Kostenstelle"].str.match(r"^[^5]\d"))
-        df["Kostenstelle"] = np.where(df["GW_Verkauf_5"] == True, "51", df["Kostenstelle"])
-
-        df["GW_Verkauf_50"] = (df["Konto_Nr"].str.match(r"^[78]")) & (df["Kostenstelle"].str.match(r"^2"))
-        df["Kostenträger"] = np.where(df["GW_Verkauf_50"] == True, "52", df["Kostenträger"])
-        df["Kostenträger"] = np.where(
-            (df["GW_Verkauf_50"] == True) & (df["Marke"] == "01"),
-            "50",
-            df["Kostenträger"],
-        )
-
-        df["NW_Verkauf_00"] = (
-            (df["Konto_Nr"].str.match(r"^[78]2"))
-            & (df["Kostenstelle"].str.match(r"^1"))
-            & (df["Kostenträger"].str.match(r"^[^01234]"))
-        )
-        df["Kostenträger"] = np.where(df["NW_Verkauf_00"] == True, "00", df["Kostenträger"])
-
-        df["GW_Stk_50"] = (df["Konto_Nr"].str.match(r"^9130")) & (df["Kostenstelle"].str.match(r"^2"))
-        df["Kostenträger"] = np.where(df["GW_Stk_50"] == True, "52", df["Kostenträger"])
-        df["Kostenträger"] = np.where((df["GW_Stk_50"] == True) & (df["Marke"] == "01"), "50", df["Kostenträger"])
-
-        df["Kostenträger"] = np.where(df["Bilanz"] == True, "00", df["Kostenträger"])
-
-        df["Konto_5er"] = (df["Konto_Nr"].str.match("^5")) | (df["Konto_Nr"].str.match("^9143"))
-        df["Absatzkanal"] = np.where(df["Konto_5er"] == True, "99", df["Absatzkanal"])
-
-        df["Konto_5005"] = (df["Konto_Nr"].str.match("^5005")) & (df["Kostenstelle"].str.match(r"^[^12]"))
-        df["Kostenstelle"] = np.where(df["Konto_5005"] == True, "20", df["Kostenstelle"])
-        df["Kostenträger"] = np.where(df["Konto_5005"] == True, "50", df["Kostenträger"])
-
-        df["Konto_5007"] = (df["Konto_Nr"].str.match("^5007")) & (df["Kostenstelle"].str.match(r"^([^4]|42)"))
-        df["Kostenstelle"] = np.where(df["Konto_5007"] == True, "41", df["Kostenstelle"])
-        df["Kostenträger"] = np.where(df["Konto_5007"] == True, "70", df["Kostenträger"])
-
-        df["Konto_914er"] = (df["Konto_Nr"].str.match("^914[34]")) & (df["Kostenträger"].str.match(r"^[^7]"))
-        df["Kostenträger"] = np.where(df["Konto_914er"] == True, "70", df["Kostenträger"])
-
-        df["Teile_30_60"] = (
-            (df["Konto_Nr"].str.match(r"^[578]"))
-            & (df["Kostenstelle"].str.match(r"^[3]"))
-            & (df["Kostenträger"].str.match(r"^[^6]"))
-        )
-        df["Kostenträger"] = np.where(df["Teile_30_60"] == True, "60", df["Kostenträger"])
-
-        df["Service_40_70"] = (
-            (df["Konto_Nr"].str.match(r"^[578]"))
-            & (df["Kostenstelle"].str.match(r"^[4]"))
-            & (df["Kostenträger"].str.match(r"^[^7]"))
-        )
-        df["Kostenträger"] = np.where(df["Service_40_70"] == True, "70", df["Kostenträger"])
-
-        df["KRM"] = df["Marke"] + df["Standort"] + df["Kostenstelle"] + df["Absatzkanal"] + df["Kostenträger"]
-        df["Konto_Nr_SKR51"] = (
-            (df["Marke"] + "-" + df["Standort"] + "-" + df["Konto_Nr"])
-            + "-"
-            + (df["Kostenstelle"] + "-" + df["Absatzkanal"] + "-" + df["Kostenträger"])
-        )
-        df["IsNumeric"] = (
-            (df["KRM"].str.isdigit())
-            & (df["Konto_Nr"].str.isdigit())
-            & (df["Konto_Nr"].str.len() == 4)
-            # & (df["Konto_Nr_SKR51"].str.len() == 19)
-        )
-        df_invalid = df[df["IsNumeric"] == False]
-        df_invalid.to_csv(self.export_invalid_filename, decimal=",", sep=";", encoding="latin-1", index=False)
-        return df[df["IsNumeric"] == True][TRANSLATE]
-
-    def load_bookings_from_file(self):
-        df2 = []
-        timestamps = []
-
-        for csv_file in self.account_bookings:
-            df2.append(
-                pd.read_csv(
-                    csv_file,
-                    decimal=",",
-                    sep=";",
-                    encoding="latin-1",
-                    converters={0: str, 1: str},
-                )
-            )
-            timestamps.append(Path(csv_file).stat().st_mtime)
-        self.booking_date = datetime.fromtimestamp(max(timestamps))
-        self.df_bookings = pd.concat(df2)
-        self.df_bookings["amount"] = (self.df_bookings["Debit Amount"] + self.df_bookings["Credit Amount"]).round(2)
-
-    def filter_bookings(self):
-        if self.df_bookings is None:
-            self.load_bookings_from_file()
-        # Kontensalden auf gegebenen Monat filtern
-        filter_from = self.current_year + self.first_month_of_financial_year
-        filter_prev = self.last_year + self.first_month_of_financial_year
-
-        if self.first_month_of_financial_year > self.current_month:
-            filter_from = self.last_year + self.first_month_of_financial_year
-            filter_prev = self.last_year2 + self.first_month_of_financial_year
-        filter_to = self.current_year + self.current_month
-        filter_opening = self.current_year + "00"
-        filter_prev_opening = self.last_year + "00"
-        prev_year_closed = True
-
-        df_opening_balance = self.df_bookings[(self.df_bookings["Bookkeep Period"] == filter_opening)]
-        if df_opening_balance.shape[0] == 0:
-            df_opening_balance = self.df_bookings[
-                (self.df_bookings["Bookkeep Period"] == filter_prev_opening)
-                | (
-                    (self.df_bookings["Bookkeep Period"] >= filter_prev)
-                    & (self.df_bookings["Bookkeep Period"] < filter_from)
-                )
-            ].copy()
-            df_opening_balance["Bookkeep Period"] = filter_opening
-            prev_year_closed = False
-
-        df_opening_balance = df_opening_balance[(df_opening_balance["Konto_Nr_Händler"].str.contains(r"-[013]\d\d+-"))]
-        opening_balance = df_opening_balance["amount"].aggregate("sum").round(2)
-        logging.info("Gewinn/Verlustvortrag")
-        logging.info(opening_balance)
-
-        if not prev_year_closed:
-            row = {
-                "Konto_Nr_Händler": "01-01-0861-00-00-00",
-                "Bookkeep Period": filter_opening,
-                "Debit Amount": opening_balance * -1,
-                "Credit Amount": 0,
-                "Debit Quantity": 0,
-                "Credit Quantity": 0,
-                "amount": opening_balance * -1,
-            }
-            df_opening_balance = pd.concat([df_opening_balance, pd.DataFrame.from_records([row])])
-
-        df_filtered = self.df_bookings[
-            (self.df_bookings["Bookkeep Period"] >= filter_from) & (self.df_bookings["Bookkeep Period"] <= filter_to)
-        ]
-
-        # Buchungen kopieren und als Statistikkonten anhängen
-        df_stats = df_filtered.copy()
-        # df_stats = df_stats[df_stats['Konto_Nr_Händler'].str.match(r'-[24578]\d\d\d-')]
-        df_stats["Konto_Nr_Händler"] = df_stats["Konto_Nr_Händler"].str.replace(r"-(\d\d\d+)-", r"-\1_STK-", regex=True)
-        df_stats["amount"] = (df_filtered["Debit Quantity"] + df_filtered["Credit Quantity"]).round(2)
-
-        df_combined = pd.concat([df_opening_balance, df_filtered, df_stats])
-
-        # Spalten konvertieren
-        df_combined["period"] = df_combined["Bookkeep Period"].apply(lambda x: self.bookkeep_filter[x])
-        return df_combined[df_combined["amount"] != 0.00]
-
-    @property
-    def export_filename(self):
-        return self.export_filename_for_period(self.current_year, self.current_month)
-
-    @property
-    def export_info_dir(self):
-        return f"{self.base_dir}/Export/{self.current_year}/info/"
-
-    @property
-    def export_invalid_filename(self):
-        return f"{self.base_dir}/Export/ungueltig.csv"
-
-    def export_filename_for_period(self, year, month):
-        return f"{self.base_dir}/Export/{year}/export_{year}-{month}.xml"
-
-    def export_skr51_xml(self, records, bk_filter, period_no, makes_used, sites_used, main_site, filename):
-        record_elements = ACCOUNT_INFO + ["Decimals"] + list(bk_filter.values())[:period_no] + ["CumulatedYear"]
-        root = ET.Element("HbvData")
-        h = ET.SubElement(root, "Header")
-        for k, v in self.header(makes_used, sites_used, main_site).items():
-            ET.SubElement(h, k).text = str(v)
-
-        make_list = ET.SubElement(root, "MakeList")
-        for make in makes_used:
-            if make not in self.makes:
-                continue
-            e = ET.SubElement(make_list, "MakeListEntry")
-            ET.SubElement(e, "Make").text = make
-            ET.SubElement(e, "MakeCode").text = self.makes[make]
-
-        bm_code_list = ET.SubElement(root, "BmCodeList")
-        for s in sites_used:
-            make, site = s.split("-")
-            if s not in self.sites:
-                continue
-            e = ET.SubElement(bm_code_list, "BmCodeEntry")
-            ET.SubElement(e, "Make").text = make
-            ET.SubElement(e, "Site").text = site
-            ET.SubElement(e, "BmCode").text = self.sites[s]
-
-        record_list = ET.SubElement(root, "RecordList")
-        for row in records:
-            record = ET.SubElement(record_list, "Record")
-            for e in record_elements:
-                child = ET.SubElement(record, e)
-                field = row.get(e, 0.0)
-                if str(field) == "nan":
-                    field = "0"
-                elif type(field) is float:
-                    field = "{:.0f}".format(field * 100)
-                child.text = str(field)
-
-        with open(filename, "w", encoding="utf-8") as fwh:
-            fwh.write(minidom.parseString(ET.tostring(root)).toprettyxml(indent="  "))
-
-    def convert_to_row(self, node):
-        return [child.text for child in node]
-
-    def convert_xml_to_csv(self, xmlfile, csvfile):
-        record_list = ET.parse(xmlfile).getroot().find("RecordList")
-        header = [child.tag for child in record_list.find("Record")]
-        bookings = [self.convert_to_row(node) for node in record_list.findall("Record")]
-        with open(csvfile, "w") as fwh:
-            cwh = csv.writer(fwh, delimiter=";")
-            cwh.writerow(header)
-            cwh.writerows(bookings)
-        return True
-
-    def convert_csv_to_xml(self, csvfile, xmlfile):
-        self.makes = {"01": "1844"}
-        self.sites = {"01-01": "1844"}
-        with open(csvfile, "r", encoding="latin-1") as frh:
-            csv_reader = csv.DictReader(frh, delimiter=";")
-            self.export_skr51_xml(csv_reader, self.bookkeep_filter(), 1, list(self.sites.values())[0], xmlfile)
-
-
-def gchr_local():
-    base_dir = os.getcwd() + "/../GCHR2_Testdaten/Kunden"
-    for path in Path(base_dir).glob("*"):
-        if path.is_dir():
-            print(path.name)
-            gchr_export(str(path))
-
-
-def gchr_export(base_dir):
-    gchr = GCHR(base_dir)
-    # gchr.export_all_periods(overwrite=True, today="2022-08-01")
-    gchr.export_all_periods()
-
-
-if __name__ == "__main__":
-    gchr_local()
-    # import cProfile
-    # cProfile.run(
-    #     "gchr_local()",
-    #     "gchr_local.prof",
-    # )

+ 0 - 6
gcstruct/gchr2.py

@@ -1,6 +0,0 @@
-import os
-
-import gchr
-
-if __name__ == "__main__":
-    gchr.gchr_export(os.getcwd())

+ 16 - 2
gcstruct/gcstruct.py

@@ -400,7 +400,16 @@ class GCStruct:
             df_translate[t_from] = df[df[last + "_Nr"] != ""][from_label].rename(
                 columns=dict(zip(from_label, to_label))
             )
-            # print(df_translate[t_to].head())
+
+        df_t4 = df_translate["Konto_Nr"].copy()
+        df_t4 = df_t4[df_t4["SKR51_Nr"].str.match(r"^[01]")]
+        df_t5 = df_t4.copy()
+        df_t4["SKR51_Nr"] = df_t4["SKR51_Nr"] + "_A"
+        df_t4["SKR51"] = df_t4["SKR51"] + "_A"
+        df_t5["SKR51_Nr"] = df_t5["SKR51_Nr"] + "_P"
+        df_t5["SKR51"] = df_t5["SKR51"] + "_P"
+        df_translate["Konto_Nr"] = pd.concat([df_translate["Konto_Nr"], df_t4, df_t5])
+        # print(df_translate[t_to].head())
 
         df2 = []
         for ac_file in accounts_combined_files:
@@ -416,7 +425,12 @@ class GCStruct:
         df_source = pd.concat(df2)
         df3 = df_source.copy()
         df3["Konto_Nr"] = df3["Konto_Nr"] + "_STK"
-        df_source = pd.concat([df_source, df3])
+        df4 = df_source.copy()
+        df4 = df4[df4["Konto_Nr"].str.match(r"^[01]")]
+        df5 = df4.copy()
+        df4["Konto_Nr"] = df4["Konto_Nr"] + "_A"
+        df5["Konto_Nr"] = df5["Konto_Nr"] + "_P"
+        df_source = pd.concat([df_source, df3, df4, df5])
 
         for t_from, t_to in self.translate.items():
             if t_to == "SKR51":

+ 1 - 1
gcstruct/gcstruct_uebersetzung.bat → gcstruct_uebersetzung.bat

@@ -1,4 +1,4 @@
 cd /d %~dp0
-call ..\.venv\Scripts\activate.bat
+call .venv\Scripts\activate.bat
 pyinstaller -F --path %~dp0 gcstruct_uebersetzung.py
 pause

+ 1 - 3
gcstruct/gcstruct_uebersetzung.py → gcstruct_uebersetzung.py

@@ -1,8 +1,6 @@
-import sys
 from pathlib import Path
 
-sys.path.insert(0, "C:\\Projekte\\Python\\gcstruct")
-from gcstruct import GCStruct  # noqa:E402
+from gcstruct.gcstruct import GCStruct
 
 
 def get_base_and_import_dir(base_dir):

+ 38 - 44
gcstruct/gcstruct_uebersetzung.spec → gcstruct_uebersetzung.spec

@@ -1,44 +1,38 @@
-# -*- mode: python ; coding: utf-8 -*-
-
-
-block_cipher = None
-
-
-a = Analysis(
-    ['gcstruct_uebersetzung.py'],
-    pathex=['C:\\Projekte\\Python\\gcstruct\\'],
-    binaries=[],
-    datas=[],
-    hiddenimports=[],
-    hookspath=[],
-    hooksconfig={},
-    runtime_hooks=[],
-    excludes=[],
-    win_no_prefer_redirects=False,
-    win_private_assemblies=False,
-    cipher=block_cipher,
-    noarchive=False,
-)
-pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
-
-exe = EXE(
-    pyz,
-    a.scripts,
-    a.binaries,
-    a.zipfiles,
-    a.datas,
-    [],
-    name='gcstruct_uebersetzung',
-    debug=False,
-    bootloader_ignore_signals=False,
-    strip=False,
-    upx=True,
-    upx_exclude=[],
-    runtime_tmpdir=None,
-    console=True,
-    disable_windowed_traceback=False,
-    argv_emulation=False,
-    target_arch=None,
-    codesign_identity=None,
-    entitlements_file=None,
-)
+# -*- mode: python ; coding: utf-8 -*-
+
+
+a = Analysis(
+    ['gcstruct_uebersetzung.py'],
+    pathex=['C:\\Projekte\\Python\\'],
+    binaries=[],
+    datas=[],
+    hiddenimports=[],
+    hookspath=[],
+    hooksconfig={},
+    runtime_hooks=[],
+    excludes=[],
+    noarchive=False,
+    optimize=0,
+)
+pyz = PYZ(a.pure)
+
+exe = EXE(
+    pyz,
+    a.scripts,
+    a.binaries,
+    a.datas,
+    [],
+    name='gcstruct_uebersetzung',
+    debug=False,
+    bootloader_ignore_signals=False,
+    strip=False,
+    upx=True,
+    upx_exclude=[],
+    runtime_tmpdir=None,
+    console=True,
+    disable_windowed_traceback=False,
+    argv_emulation=False,
+    target_arch=None,
+    codesign_identity=None,
+    entitlements_file=None,
+)

+ 6 - 5
mailserver/imap.py

@@ -1,10 +1,11 @@
-from imap_tools import MailBox, AND, MailMessage
-import re
 import json
 import os
-import plac
-from datetime import datetime, date
+import re
 from dataclasses import dataclass
+from datetime import datetime, timedelta
+
+import plac
+from imap_tools import AND, MailBox, MailMessage
 
 
 @dataclass
@@ -35,7 +36,7 @@ class Imap:
 
     def cleanup(self):
         date_now = datetime.now()
-        date_criteria = date(date_now.year, date_now.month - 1, 1)
+        date_criteria = (date_now - timedelta(days=60)).date()
         msg_limit = 100
 
         with self.connect("archiv") as mb:

+ 4 - 6
sandbox/test_ohno.py

@@ -1,17 +1,15 @@
 import unittest
-import ohno
+
+from sandbox import ohno
 
 
 class test_ohno(unittest.TestCase):
     def test_simple(self):
         board = ohno.Board(4)
-        board.set_initial([[-1, -1, -1, -1],
-                           [-1, -1, -1, -1],
-                           [-1, -1, -1, -1],
-                           [-1, -1, -1, -1]])
+        board.set_initial([[-1, -1, -1, -1], [-1, -1, -1, -1], [-1, -1, -1, -1], [-1, -1, -1, -1]])
         solved = board.solve()
         self.assertEqual(len(solved), 1)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()

+ 124 - 128
sandbox/test_relationship.py

@@ -1,164 +1,160 @@
-import relationship
 import unittest
 
+from sandbox import relationship
 
 relations_first_grade_female = [
-    (('1.2.3.4.5', '1.2.3.4', 'w'), 'Mutter'),
-    (('1.2.3.4.5', '1.2.3', 'w'), 'Großmutter'),
-    (('1.2.3.4.5', '1.2', 'w'), 'Ur-Großmutter'),
-    (('1.2.3.4.5', '1', 'w'), 'Ur-Ur-Großmutter'),
-    (('1.2.3.4.5', '1.2.3.4.5.6', 'w'), 'Tochter'),
-    (('1.2.3.4.5', '1.2.3.4.5.6.7', 'w'), 'Enkelin'),
-    (('1.2.3.4.5', '1.2.3.4.5.6.7.8', 'w'), 'Ur-Enkelin'),
-    (('1.2.3.4.5', '1.2.3.4.5.6.7.8.9', 'w'), 'Ur-Ur-Enkelin'),
+    (("1.2.3.4.5", "1.2.3.4", "w"), "Mutter"),
+    (("1.2.3.4.5", "1.2.3", "w"), "Großmutter"),
+    (("1.2.3.4.5", "1.2", "w"), "Ur-Großmutter"),
+    (("1.2.3.4.5", "1", "w"), "Ur-Ur-Großmutter"),
+    (("1.2.3.4.5", "1.2.3.4.5.6", "w"), "Tochter"),
+    (("1.2.3.4.5", "1.2.3.4.5.6.7", "w"), "Enkelin"),
+    (("1.2.3.4.5", "1.2.3.4.5.6.7.8", "w"), "Ur-Enkelin"),
+    (("1.2.3.4.5", "1.2.3.4.5.6.7.8.9", "w"), "Ur-Ur-Enkelin"),
 ]
 
 relations_second_grade_female = [
-    (('1.2.3.4.5', '1.2.3.x', 'w'), 'Tante'),
-    (('1.2.3.4.5', '1.2.x', 'w'), 'Großtante'),
-    (('1.2.3.4.5', '1.x', 'w'), 'Ur-Großtante'),
-    (('1.2.3.4.5', 'x', 'w'), 'Ur-Ur-Großtante'),
-    (('1.2.3.4.5', '1.2.3.4.x', 'w'), 'Schwester'),
-    (('1.2.3.4.5', '1.2.3.4.x.6', 'w'), 'Nichte'),
-    (('1.2.3.4.5', '1.2.3.4.x.6.7', 'w'), 'Großnichte'),
-    (('1.2.3.4.5', '1.2.3.4.x.6.7.8', 'w'), 'Ur-Großnichte'),
-    (('1.2.3.4.5', '1.2.3.4.x.6.7.8.9', 'w'), 'Ur-Ur-Großnichte'),
+    (("1.2.3.4.5", "1.2.3.x", "w"), "Tante"),
+    (("1.2.3.4.5", "1.2.x", "w"), "Großtante"),
+    (("1.2.3.4.5", "1.x", "w"), "Ur-Großtante"),
+    (("1.2.3.4.5", "x", "w"), "Ur-Ur-Großtante"),
+    (("1.2.3.4.5", "1.2.3.4.x", "w"), "Schwester"),
+    (("1.2.3.4.5", "1.2.3.4.x.6", "w"), "Nichte"),
+    (("1.2.3.4.5", "1.2.3.4.x.6.7", "w"), "Großnichte"),
+    (("1.2.3.4.5", "1.2.3.4.x.6.7.8", "w"), "Ur-Großnichte"),
+    (("1.2.3.4.5", "1.2.3.4.x.6.7.8.9", "w"), "Ur-Ur-Großnichte"),
 ]
 
 relations_higher_grade_female = [
-    (('1.2.3.4.5', '1.2.3.x', 'w'), 'Tante'),
-    (('1.2.3.4.5', '1.2.x.x', 'w'), 'Tante 2. Grades'),
-    (('1.2.3.4.5', '1.x.x.x', 'w'), 'Tante 3. Grades'),
-    (('1.2.3.4.5', 'x.x.x.x', 'w'), 'Tante 4. Grades'),
-    (('1.2.3.4.5', '1.2.x', 'w'), 'Großtante'),
-    (('1.2.3.4.5', '1.x.x', 'w'), 'Großtante 2. Grades'),
-    (('1.2.3.4.5', 'x.x.x', 'w'), 'Großtante 3. Grades'),
-    (('1.2.3.4.5', '1.x', 'w'), 'Ur-Großtante'),
-    (('1.2.3.4.5', 'x.x', 'w'), 'Ur-Großtante 2. Grades'),
-    (('1.2.3.4.5', '1.2.3.4.x.x', 'w'), 'Nichte'),
-    (('1.2.3.4.5', '1.2.3.x.x.x', 'w'), 'Nichte 2. Grades'),
-    (('1.2.3.4.5', '1.2.x.x.x.x', 'w'), 'Nichte 3. Grades'),
-    (('1.2.3.4.5', '1.x.x.x.x.x', 'w'), 'Nichte 4. Grades'),
-    (('1.2.3.4.5', '1.2.3.4.x.x.x', 'w'), 'Großnichte'),
-    (('1.2.3.4.5', '1.2.3.x.x.x.x', 'w'), 'Großnichte 2. Grades'),
-    (('1.2.3.4.5', '1.2.x.x.x.x.x', 'w'), 'Großnichte 3. Grades'),
-    (('1.2.3.4.5', '1.x.x.x.x.x.x', 'w'), 'Großnichte 4. Grades'),
-    (('1.2.3.4.5', '1.2.3.4.x.x.x.x', 'w'), 'Ur-Großnichte'),
-    (('1.2.3.4.5', '1.2.3.x.x.x.x.x', 'w'), 'Ur-Großnichte 2. Grades'),
-    (('1.2.3.4.5', '1.2.x.x.x.x.x.x', 'w'), 'Ur-Großnichte 3. Grades'),
-    (('1.2.3.4.5', '1.x.x.x.x.x.x.x', 'w'), 'Ur-Großnichte 4. Grades'),
-    (('1.2.3.4.5', '1.2.3.4.x.x.x.x.x', 'w'), 'Ur-Ur-Großnichte'),
-    (('1.2.3.4.5', '1.2.3.x.x.x.x.x.x', 'w'), 'Ur-Ur-Großnichte 2. Grades'),
+    (("1.2.3.4.5", "1.2.3.x", "w"), "Tante"),
+    (("1.2.3.4.5", "1.2.x.x", "w"), "Tante 2. Grades"),
+    (("1.2.3.4.5", "1.x.x.x", "w"), "Tante 3. Grades"),
+    (("1.2.3.4.5", "x.x.x.x", "w"), "Tante 4. Grades"),
+    (("1.2.3.4.5", "1.2.x", "w"), "Großtante"),
+    (("1.2.3.4.5", "1.x.x", "w"), "Großtante 2. Grades"),
+    (("1.2.3.4.5", "x.x.x", "w"), "Großtante 3. Grades"),
+    (("1.2.3.4.5", "1.x", "w"), "Ur-Großtante"),
+    (("1.2.3.4.5", "x.x", "w"), "Ur-Großtante 2. Grades"),
+    (("1.2.3.4.5", "1.2.3.4.x.x", "w"), "Nichte"),
+    (("1.2.3.4.5", "1.2.3.x.x.x", "w"), "Nichte 2. Grades"),
+    (("1.2.3.4.5", "1.2.x.x.x.x", "w"), "Nichte 3. Grades"),
+    (("1.2.3.4.5", "1.x.x.x.x.x", "w"), "Nichte 4. Grades"),
+    (("1.2.3.4.5", "1.2.3.4.x.x.x", "w"), "Großnichte"),
+    (("1.2.3.4.5", "1.2.3.x.x.x.x", "w"), "Großnichte 2. Grades"),
+    (("1.2.3.4.5", "1.2.x.x.x.x.x", "w"), "Großnichte 3. Grades"),
+    (("1.2.3.4.5", "1.x.x.x.x.x.x", "w"), "Großnichte 4. Grades"),
+    (("1.2.3.4.5", "1.2.3.4.x.x.x.x", "w"), "Ur-Großnichte"),
+    (("1.2.3.4.5", "1.2.3.x.x.x.x.x", "w"), "Ur-Großnichte 2. Grades"),
+    (("1.2.3.4.5", "1.2.x.x.x.x.x.x", "w"), "Ur-Großnichte 3. Grades"),
+    (("1.2.3.4.5", "1.x.x.x.x.x.x.x", "w"), "Ur-Großnichte 4. Grades"),
+    (("1.2.3.4.5", "1.2.3.4.x.x.x.x.x", "w"), "Ur-Ur-Großnichte"),
+    (("1.2.3.4.5", "1.2.3.x.x.x.x.x.x", "w"), "Ur-Ur-Großnichte 2. Grades"),
 ]
 
 relations_special_grade_female = [
-    (('1.2.3.4.5', '1.2.3.4.x', 'w'), 'Schwester'),
-    (('1.2.3.4.5', '1.2.3.x.x', 'w'), 'Cousine'),
-    (('1.2.3.4.5', '1.2.x.x.x', 'w'), 'Cousine 2. Grades'),
-    (('1.2.3.4.5', '1.x.x.x.x', 'w'), 'Cousine 3. Grades')
+    (("1.2.3.4.5", "1.2.3.4.x", "w"), "Schwester"),
+    (("1.2.3.4.5", "1.2.3.x.x", "w"), "Cousine"),
+    (("1.2.3.4.5", "1.2.x.x.x", "w"), "Cousine 2. Grades"),
+    (("1.2.3.4.5", "1.x.x.x.x", "w"), "Cousine 3. Grades"),
 ]
 
 relations_first_grade_male = [
-    (('1.2.3.4.5', '1.2.3.4', 'm'), 'Vater'),
-    (('1.2.3.4.5', '1.2.3', 'm'), 'Großvater'),
-    (('1.2.3.4.5', '1.2', 'm'), 'Ur-Großvater'),
-    (('1.2.3.4.5', '1', 'm'), 'Ur-Ur-Großvater'),
-    (('1.2.3.4.5', '1.2.3.4.5.6', 'm'), 'Sohn'),
-    (('1.2.3.4.5', '1.2.3.4.5.6.7', 'm'), 'Enkel'),
-    (('1.2.3.4.5', '1.2.3.4.5.6.7.8', 'm'), 'Ur-Enkel'),
-    (('1.2.3.4.5', '1.2.3.4.5.6.7.8.9', 'm'), 'Ur-Ur-Enkel'),
+    (("1.2.3.4.5", "1.2.3.4", "m"), "Vater"),
+    (("1.2.3.4.5", "1.2.3", "m"), "Großvater"),
+    (("1.2.3.4.5", "1.2", "m"), "Ur-Großvater"),
+    (("1.2.3.4.5", "1", "m"), "Ur-Ur-Großvater"),
+    (("1.2.3.4.5", "1.2.3.4.5.6", "m"), "Sohn"),
+    (("1.2.3.4.5", "1.2.3.4.5.6.7", "m"), "Enkel"),
+    (("1.2.3.4.5", "1.2.3.4.5.6.7.8", "m"), "Ur-Enkel"),
+    (("1.2.3.4.5", "1.2.3.4.5.6.7.8.9", "m"), "Ur-Ur-Enkel"),
 ]
 
 relations_second_grade_male = [
-    (('1.2.3.4.5', '1.2.3.x', 'm'), 'Onkel'),
-    (('1.2.3.4.5', '1.2.x', 'm'), 'Großonkel'),
-    (('1.2.3.4.5', '1.x', 'm'), 'Ur-Großonkel'),
-    (('1.2.3.4.5', 'x', 'm'), 'Ur-Ur-Großonkel'),
-    (('1.2.3.4.5', '1.2.3.4.x', 'm'), 'Bruder'),
-    (('1.2.3.4.5', '1.2.3.4.x.6', 'm'), 'Neffe'),
-    (('1.2.3.4.5', '1.2.3.4.x.6.7', 'm'), 'Großneffe'),
-    (('1.2.3.4.5', '1.2.3.4.x.6.7.8', 'm'), 'Ur-Großneffe'),
-    (('1.2.3.4.5', '1.2.3.4.x.6.7.8.9', 'm'), 'Ur-Ur-Großneffe'),
+    (("1.2.3.4.5", "1.2.3.x", "m"), "Onkel"),
+    (("1.2.3.4.5", "1.2.x", "m"), "Großonkel"),
+    (("1.2.3.4.5", "1.x", "m"), "Ur-Großonkel"),
+    (("1.2.3.4.5", "x", "m"), "Ur-Ur-Großonkel"),
+    (("1.2.3.4.5", "1.2.3.4.x", "m"), "Bruder"),
+    (("1.2.3.4.5", "1.2.3.4.x.6", "m"), "Neffe"),
+    (("1.2.3.4.5", "1.2.3.4.x.6.7", "m"), "Großneffe"),
+    (("1.2.3.4.5", "1.2.3.4.x.6.7.8", "m"), "Ur-Großneffe"),
+    (("1.2.3.4.5", "1.2.3.4.x.6.7.8.9", "m"), "Ur-Ur-Großneffe"),
 ]
 
 relations_higher_grade_male = [
-    (('1.2.3.4.5', '1.2.3.x', 'm'), 'Onkel'),
-    (('1.2.3.4.5', '1.2.x.x', 'm'), 'Onkel 2. Grades'),
-    (('1.2.3.4.5', '1.x.x.x', 'm'), 'Onkel 3. Grades'),
-    (('1.2.3.4.5', 'x.x.x.x', 'm'), 'Onkel 4. Grades'),
-    (('1.2.3.4.5', '1.2.x', 'm'), 'Großonkel'),
-    (('1.2.3.4.5', '1.x.x', 'm'), 'Großonkel 2. Grades'),
-    (('1.2.3.4.5', 'x.x.x', 'm'), 'Großonkel 3. Grades'),
-    (('1.2.3.4.5', '1.x', 'm'), 'Ur-Großonkel'),
-    (('1.2.3.4.5', 'x.x', 'm'), 'Ur-Großonkel 2. Grades'),
-    (('1.2.3.4.5', '1.2.3.4.x.x', 'm'), 'Neffe'),
-    (('1.2.3.4.5', '1.2.3.x.x.x', 'm'), 'Neffe 2. Grades'),
-    (('1.2.3.4.5', '1.2.x.x.x.x', 'm'), 'Neffe 3. Grades'),
-    (('1.2.3.4.5', '1.x.x.x.x.x', 'm'), 'Neffe 4. Grades'),
-    (('1.2.3.4.5', '1.2.3.4.x.x.x', 'm'), 'Großneffe'),
-    (('1.2.3.4.5', '1.2.3.x.x.x.x', 'm'), 'Großneffe 2. Grades'),
-    (('1.2.3.4.5', '1.2.x.x.x.x.x', 'm'), 'Großneffe 3. Grades'),
-    (('1.2.3.4.5', '1.x.x.x.x.x.x', 'm'), 'Großneffe 4. Grades'),
-    (('1.2.3.4.5', '1.2.3.4.x.x.x.x', 'm'), 'Ur-Großneffe'),
-    (('1.2.3.4.5', '1.2.3.x.x.x.x.x', 'm'), 'Ur-Großneffe 2. Grades'),
-    (('1.2.3.4.5', '1.2.x.x.x.x.x.x', 'm'), 'Ur-Großneffe 3. Grades'),
-    (('1.2.3.4.5', '1.x.x.x.x.x.x.x', 'm'), 'Ur-Großneffe 4. Grades'),
-    (('1.2.3.4.5', '1.2.3.4.x.x.x.x.x', 'm'), 'Ur-Ur-Großneffe'),
-    (('1.2.3.4.5', '1.2.3.x.x.x.x.x.x', 'm'), 'Ur-Ur-Großneffe 2. Grades'),
+    (("1.2.3.4.5", "1.2.3.x", "m"), "Onkel"),
+    (("1.2.3.4.5", "1.2.x.x", "m"), "Onkel 2. Grades"),
+    (("1.2.3.4.5", "1.x.x.x", "m"), "Onkel 3. Grades"),
+    (("1.2.3.4.5", "x.x.x.x", "m"), "Onkel 4. Grades"),
+    (("1.2.3.4.5", "1.2.x", "m"), "Großonkel"),
+    (("1.2.3.4.5", "1.x.x", "m"), "Großonkel 2. Grades"),
+    (("1.2.3.4.5", "x.x.x", "m"), "Großonkel 3. Grades"),
+    (("1.2.3.4.5", "1.x", "m"), "Ur-Großonkel"),
+    (("1.2.3.4.5", "x.x", "m"), "Ur-Großonkel 2. Grades"),
+    (("1.2.3.4.5", "1.2.3.4.x.x", "m"), "Neffe"),
+    (("1.2.3.4.5", "1.2.3.x.x.x", "m"), "Neffe 2. Grades"),
+    (("1.2.3.4.5", "1.2.x.x.x.x", "m"), "Neffe 3. Grades"),
+    (("1.2.3.4.5", "1.x.x.x.x.x", "m"), "Neffe 4. Grades"),
+    (("1.2.3.4.5", "1.2.3.4.x.x.x", "m"), "Großneffe"),
+    (("1.2.3.4.5", "1.2.3.x.x.x.x", "m"), "Großneffe 2. Grades"),
+    (("1.2.3.4.5", "1.2.x.x.x.x.x", "m"), "Großneffe 3. Grades"),
+    (("1.2.3.4.5", "1.x.x.x.x.x.x", "m"), "Großneffe 4. Grades"),
+    (("1.2.3.4.5", "1.2.3.4.x.x.x.x", "m"), "Ur-Großneffe"),
+    (("1.2.3.4.5", "1.2.3.x.x.x.x.x", "m"), "Ur-Großneffe 2. Grades"),
+    (("1.2.3.4.5", "1.2.x.x.x.x.x.x", "m"), "Ur-Großneffe 3. Grades"),
+    (("1.2.3.4.5", "1.x.x.x.x.x.x.x", "m"), "Ur-Großneffe 4. Grades"),
+    (("1.2.3.4.5", "1.2.3.4.x.x.x.x.x", "m"), "Ur-Ur-Großneffe"),
+    (("1.2.3.4.5", "1.2.3.x.x.x.x.x.x", "m"), "Ur-Ur-Großneffe 2. Grades"),
 ]
 
 relations_special_grade_male = [
-    (('1.2.3.4.5', '1.2.3.4.x', 'm'), 'Bruder'),
-    (('1.2.3.4.5', '1.2.3.x.x', 'm'), 'Cousin'),
-    (('1.2.3.4.5', '1.2.x.x.x', 'm'), 'Cousin 2. Grades'),
-    (('1.2.3.4.5', '1.x.x.x.x', 'm'), 'Cousin 3. Grades')
+    (("1.2.3.4.5", "1.2.3.4.x", "m"), "Bruder"),
+    (("1.2.3.4.5", "1.2.3.x.x", "m"), "Cousin"),
+    (("1.2.3.4.5", "1.2.x.x.x", "m"), "Cousin 2. Grades"),
+    (("1.2.3.4.5", "1.x.x.x.x", "m"), "Cousin 3. Grades"),
 ]
 
 relations_higher_grade_distance = [
-    (('1.2.3.4.5', '1.2.3.4.5.6', 'w'), 'Tochter', 1),
-    (('1.2.3.4.5', '1.2.3.4.5.6.7', 'w'), 'Enkelin', 2),
-    (('1.2.3.4.5', '1', 'w'), 'Ur-Ur-Großmutter', 4),
-    (('1.2.3.4.5', '1.2.3.x', 'w'), 'Tante', 3),
-    (('1.2.3.4.5', '1.2.x.x', 'w'), 'Tante 2. Grades', 5),
-    (('1.2.3.4.5', '1.x.x.x', 'w'), 'Tante 3. Grades', 7),
-    (('1.2.3.4.5', 'x.x.x.x', 'w'), 'Tante 4. Grades', 9),
-    (('1.2.3.4.5', '1.2.x', 'w'), 'Großtante', 4),
-    (('1.2.3.4.5', '1.x.x', 'w'), 'Großtante 2. Grades', 6),
-    (('1.2.3.4.5', 'x.x.x', 'w'), 'Großtante 3. Grades', 8),
-    (('1.2.3.4.5', '1.x', 'w'), 'Ur-Großtante', 5),
-    (('1.2.3.4.5', 'x.x', 'w'), 'Ur-Großtante 2. Grades', 7),
-    (('1.2.3.4.5', '1.2.3.4.x.x', 'w'), 'Nichte', 3),
-    (('1.2.3.4.5', '1.2.3.x.x.x', 'w'), 'Nichte 2. Grades', 5),
-    (('1.2.3.4.5', '1.2.x.x.x.x', 'w'), 'Nichte 3. Grades', 7),
-    (('1.2.3.4.5', '1.x.x.x.x.x', 'w'), 'Nichte 4. Grades', 9),
-    (('1.2.3.4.5', '1.2.3.4.x.x.x', 'w'), 'Großnichte', 4),
-    (('1.2.3.4.5', '1.2.3.x.x.x.x', 'w'), 'Großnichte 2. Grades', 6),
-    (('1.2.3.4.5', '1.2.x.x.x.x.x', 'w'), 'Großnichte 3. Grades', 8),
-    (('1.2.3.4.5', '1.x.x.x.x.x.x', 'w'), 'Großnichte 4. Grades', 10),
-    (('1.2.3.4.5', '1.2.3.4.x.x.x.x', 'w'), 'Ur-Großnichte', 5),
-    (('1.2.3.4.5', '1.2.3.x.x.x.x.x', 'w'), 'Ur-Großnichte 2. Grades', 7),
-    (('1.2.3.4.5', '1.2.x.x.x.x.x.x', 'w'), 'Ur-Großnichte 3. Grades', 9),
-    (('1.2.3.4.5', '1.x.x.x.x.x.x.x', 'w'), 'Ur-Großnichte 4. Grades', 11),
-    (('1.2.3.4.5', '1.2.3.4.x.x.x.x.x', 'w'), 'Ur-Ur-Großnichte', 6),
-    (('1.2.3.4.5', '1.2.3.x.x.x.x.x.x', 'w'), 'Ur-Ur-Großnichte 2. Grades', 8),
+    (("1.2.3.4.5", "1.2.3.4.5.6", "w"), "Tochter", 1),
+    (("1.2.3.4.5", "1.2.3.4.5.6.7", "w"), "Enkelin", 2),
+    (("1.2.3.4.5", "1", "w"), "Ur-Ur-Großmutter", 4),
+    (("1.2.3.4.5", "1.2.3.x", "w"), "Tante", 3),
+    (("1.2.3.4.5", "1.2.x.x", "w"), "Tante 2. Grades", 5),
+    (("1.2.3.4.5", "1.x.x.x", "w"), "Tante 3. Grades", 7),
+    (("1.2.3.4.5", "x.x.x.x", "w"), "Tante 4. Grades", 9),
+    (("1.2.3.4.5", "1.2.x", "w"), "Großtante", 4),
+    (("1.2.3.4.5", "1.x.x", "w"), "Großtante 2. Grades", 6),
+    (("1.2.3.4.5", "x.x.x", "w"), "Großtante 3. Grades", 8),
+    (("1.2.3.4.5", "1.x", "w"), "Ur-Großtante", 5),
+    (("1.2.3.4.5", "x.x", "w"), "Ur-Großtante 2. Grades", 7),
+    (("1.2.3.4.5", "1.2.3.4.x.x", "w"), "Nichte", 3),
+    (("1.2.3.4.5", "1.2.3.x.x.x", "w"), "Nichte 2. Grades", 5),
+    (("1.2.3.4.5", "1.2.x.x.x.x", "w"), "Nichte 3. Grades", 7),
+    (("1.2.3.4.5", "1.x.x.x.x.x", "w"), "Nichte 4. Grades", 9),
+    (("1.2.3.4.5", "1.2.3.4.x.x.x", "w"), "Großnichte", 4),
+    (("1.2.3.4.5", "1.2.3.x.x.x.x", "w"), "Großnichte 2. Grades", 6),
+    (("1.2.3.4.5", "1.2.x.x.x.x.x", "w"), "Großnichte 3. Grades", 8),
+    (("1.2.3.4.5", "1.x.x.x.x.x.x", "w"), "Großnichte 4. Grades", 10),
+    (("1.2.3.4.5", "1.2.3.4.x.x.x.x", "w"), "Ur-Großnichte", 5),
+    (("1.2.3.4.5", "1.2.3.x.x.x.x.x", "w"), "Ur-Großnichte 2. Grades", 7),
+    (("1.2.3.4.5", "1.2.x.x.x.x.x.x", "w"), "Ur-Großnichte 3. Grades", 9),
+    (("1.2.3.4.5", "1.x.x.x.x.x.x.x", "w"), "Ur-Großnichte 4. Grades", 11),
+    (("1.2.3.4.5", "1.2.3.4.x.x.x.x.x", "w"), "Ur-Ur-Großnichte", 6),
+    (("1.2.3.4.5", "1.2.3.x.x.x.x.x.x", "w"), "Ur-Ur-Großnichte 2. Grades", 8),
 ]
 
 burghard = [
-    ('Harald', '1.4.6.3.4', 'm'),
-    ('Alice', '1.4.6.3.4.1', 'w'),
-    ('Janina', '1.4.6.3.4.2', 'w'),
-    ('Theresa', '1.4.6.3.4.2.1', 'w'),
-    ('Valentin', '1.4.6.3.4.2.2', 'm'),
-    ('Freya', '1.4.6.3.4.3', 'w')
+    ("Harald", "1.4.6.3.4", "m"),
+    ("Alice", "1.4.6.3.4.1", "w"),
+    ("Janina", "1.4.6.3.4.2", "w"),
+    ("Theresa", "1.4.6.3.4.2.1", "w"),
+    ("Valentin", "1.4.6.3.4.2.2", "m"),
+    ("Freya", "1.4.6.3.4.3", "w"),
 ]
 
-burghard_freya = [
-    ('Freya', '1.4.6.3.4.3', 'w', 'Tochter', 'Harald', '1.4.6.3.4', 'm', 'Vater', 1)
-]
+burghard_freya = [("Freya", "1.4.6.3.4.3", "w", "Tochter", "Harald", "1.4.6.3.4", "m", "Vater", 1)]
 
-burghard_all = [
-    ('Harald', '1.4.6.3.4', 'm', 'ich', 'Harald', '1.4.6.3.4', 'm', 'ich', 0)
-]
+burghard_all = [("Harald", "1.4.6.3.4", "m", "ich", "Harald", "1.4.6.3.4", "m", "ich", 0)]
 
 
 class test_relatives(unittest.TestCase):
@@ -205,5 +201,5 @@ class test_relatives(unittest.TestCase):
         self.assertEqual(result[0], burghard_all[0])
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()

+ 54 - 54
sandbox/test_sudoku_solver.py

@@ -1,6 +1,6 @@
 import unittest
-import sudoku_solver
 
+from sandbox import sudoku_solver
 
 board_1 = [
     ".1.82....",
@@ -11,19 +11,19 @@ board_1 = [
     ".89...2..",
     "....8..79",
     "...5.3...",
-    "...792.3."
+    "...792.3.",
 ]
 
 solution_1 = [
-    '317825496',
-    '298346715',
-    '546971328',
-    '432618957',
-    '765239184',
-    '189457263',
-    '623184579',
-    '971563842',
-    '854792631'
+    "317825496",
+    "298346715",
+    "546971328",
+    "432618957",
+    "765239184",
+    "189457263",
+    "623184579",
+    "971563842",
+    "854792631",
 ]
 
 board_2 = [
@@ -35,46 +35,46 @@ board_2 = [
     "5....3...",
     "..75.....",
     ".869.173.",
-    ".4.3...86"
+    ".4.3...86",
 ]
 
 solution_2 = [
-    '163427598',
-    '428159673',
-    '759638241',
-    '632795814',
-    '891264357',
-    '574813962',
-    '317586429',
-    '286941735',
-    '945372186'
+    "163427598",
+    "428159673",
+    "759638241",
+    "632795814",
+    "891264357",
+    "574813962",
+    "317586429",
+    "286941735",
+    "945372186",
 ]
 
 board_3 = [
-    '..1..8763',
-    '8..9.....',
-    '....1..5.',
-    '.8....4..',
-    '1.54.3...',
-    '.6....2..',
-    '....6..2.',
-    '4..8.....',
-    '..6..1378'
+    "..1..8763",
+    "8..9.....",
+    "....1..5.",
+    ".8....4..",
+    "1.54.3...",
+    ".6....2..",
+    "....6..2.",
+    "4..8.....",
+    "..6..1378",
 ]
 
 solution_3 = [
-    '941258763',
-    '857936142',
-    '632714859',
-    '789625431',
-    '125483697',
-    '364179285',
-    '518367924',
-    '473892516',
-    '296541378'
+    "941258763",
+    "857936142",
+    "632714859",
+    "789625431",
+    "125483697",
+    "364179285",
+    "518367924",
+    "473892516",
+    "296541378",
 ]
 
-empty = ['.' * 9 for i in range(9)]
+empty = ["." * 9 for i in range(9)]
 
 
 class test_sudoku_solver(unittest.TestCase):
@@ -93,7 +93,7 @@ class test_sudoku_solver(unittest.TestCase):
     def test_simple_set(self):
         board = sudoku_solver.Board()
         board.set_initial(board_1)
-        board.set_cell(5, 7, '6')
+        board.set_cell(5, 7, "6")
         row = board.get_row(5)
         self.assertEqual(row, ".89...26.")
 
@@ -101,27 +101,27 @@ class test_sudoku_solver(unittest.TestCase):
         board = sudoku_solver.Board()
         board.set_initial(board_1)
         p = board.get_possibilities(1, 4)
-        self.assertEqual(p, {'1', '4', '5'})
+        self.assertEqual(p, {"1", "4", "5"})
 
     def test_all_possibilities(self):
         board = sudoku_solver.Board()
         board.set_initial(board_1)
         p = board.get_all_possibilities()
-        self.assertEqual(p[1][4], {'1', '4', '5'})
+        self.assertEqual(p[1][4], {"1", "4", "5"})
 
     def test_unique_cell(self):
         board = sudoku_solver.Board()
         board.set_initial(board_1)
         solving_steps = [
-            (5, 7, '6'),
-            (4, 7, '8'),
-            (4, 5, '9'),
-            (2, 5, '1'),
-            (2, 3, '9'),
-            (2, 7, '2'),
-            (6, 5, '4'),
-            (0, 5, '5'),
-            (1, 4, '4')
+            (5, 7, "6"),
+            (4, 7, "8"),
+            (4, 5, "9"),
+            (2, 5, "1"),
+            (2, 3, "9"),
+            (2, 7, "2"),
+            (6, 5, "4"),
+            (0, 5, "5"),
+            (1, 4, "4"),
         ]
         for x, y, v in solving_steps:
             cell = board.get_unique_cell()
@@ -142,8 +142,8 @@ class test_sudoku_solver(unittest.TestCase):
         board = sudoku_solver.Board()
         board.set_initial(board_3)
         b = board.solve()
-        self.assertEqual(b, empty)
+        self.assertEqual(b, solution_3)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()