123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138 |
- import codecs
- import sys
- import pandas as pd
- from pyodbc import ProgrammingError
- sys.path.insert(0, "C:\\Projekte\\tools")
- from database.db_create import get_table_config # noqa:E402
- from database.model import DbCreateConfig, DestTable, SourceTable2 # noqa:E402
- def decode_ts(ts_binary):
- return "0x" + codecs.encode(ts_binary, "hex_codec").decode()
- def compare(config_file: str = "database/CARLO.json"):
- cfg = DbCreateConfig.load_config(config_file)
- table_config = get_table_config(cfg)
- for dest_table in table_config:
- dest_row_count = {}
- dest_timestamp = {}
- if dest_table.dest not in cfg.dest_inspect.tables_list:
- print(f"Ziel-Tabelle '{dest_table.dest}' existiert nicht!")
- continue
- query_count_dest = (
- f"SELECT [Client_DB], COUNT(*) as [Rows] FROM {dest_table.full_table_name} GROUP BY [Client_DB]"
- )
- q = cfg.dest_inspect.cursor.execute(query_count_dest)
- dest_row_count = dict([(col[0], col[1]) for col in q.fetchall()])
- query_timestamp_dest = (
- f"SELECT [Client_DB], max(timestamp) as [TS] FROM {dest_table.full_table_name} GROUP BY [Client_DB]"
- )
- q = cfg.dest_inspect.cursor.execute(query_timestamp_dest)
- dest_timestamp = dict([(col[0], decode_ts(col[1])) for col in q.fetchall()])
- source_row_count = {}
- source_row_count_ts = {}
- for source_table in dest_table.source_tables:
- source_table2 = cfg.source_inspect.convert_table(source_table.table_name)
- client_db = source_table.client_db
- if (
- source_table.table_name in cfg.source_inspect.tables_list
- or source_table2 in cfg.source_inspect.tables_list
- ):
- query_count_source = source_table.select_query.replace("T1.*", "COUNT(*) as [Rows]")
- # print(query_count_source)
- q = cfg.source_inspect.cursor.execute(query_count_source)
- source_row_count[client_db] = q.fetchone()[0]
- query_ts = query_count_source
- ts = dest_timestamp.get(client_db, "0x0000000000000000")
- if "WHERE" in query_ts:
- query_ts = query_ts.replace("WHERE", f"WHERE T1.[timestamp] <= convert(binary(8), '{ts}', 1) AND")
- else:
- query_ts += f" WHERE T1.[timestamp] <= convert(binary(8), '{ts}', 1)"
- # print(query_ts)
- try:
- q = cfg.source_inspect.cursor.execute(query_ts)
- source_row_count_ts[client_db] = q.fetchone()[0]
- except ProgrammingError:
- pass
- if dest_row_count.get(client_db, 0) != source_row_count.get(client_db, 0):
- print(f"Tabelle {dest_table.dest} mit Client {client_db} stimmt nicht ueberein.")
- print(f" Quelle: {source_row_count.get(client_db, 0):>8}")
- print(f" Quelle (bis ts): {source_row_count_ts.get(client_db, 0):>8}")
- print(f" dest: {dest_row_count.get(client_db, 0):>8}")
- compare_details(source_table, dest_table, query_count_source, cfg)
- def compare_details(source_table: SourceTable2, dest_table: DestTable, query_count_source: str, cfg: DbCreateConfig):
- cols = dest_table.primary_key + ["timestamp"]
- if "Client_DB" in cols:
- cols.remove("Client_DB")
- if "CLIENT_DB" in cols:
- cols.remove("CLIENT_DB")
- query_cols = ", ".join([f"T1.[{c}]" for c in cols])
- query_source = query_count_source.replace("COUNT(*) as [Rows]", query_cols)
- query_source += f" ORDER BY {query_cols}"
- query_dest = (
- f"SELECT {query_cols} FROM {dest_table.full_table_name} T1 "
- f"WHERE T1.[Client_DB] = '{source_table.client_db}' ORDER BY {query_cols}"
- )
- source_file = f"{cfg.stage_dir}\\source\\{source_table.table_client}.csv"
- source_data = pd.read_sql(query_source, cfg.source_inspect.sqlalchemy_engine)
- source_data["timestamp"] = source_data["timestamp"].apply(decode_ts)
- source_data.to_csv(source_file, index=False)
- dest_file = f"{cfg.stage_dir}\\dest\\{source_table.table_client}.csv"
- dest_data = pd.read_sql(query_dest, cfg.dest_inspect.sqlalchemy_engine)
- dest_data["timestamp"] = dest_data["timestamp"].apply(decode_ts)
- dest_data.to_csv(dest_file, index=False)
- cols_without_ts = cols[:-1]
- only_in_source_file = f"{cfg.stage_dir}\\diff\\{source_table.table_client}_only_in_source.sql"
- only_in_source = pd.merge(source_data, dest_data, how="left", on=cols_without_ts)
- only_in_source = only_in_source[pd.isna(only_in_source["timestamp_y"])]
- if only_in_source.shape[0] > 0:
- ts_list = ", ".join(only_in_source.to_dict(orient="list")["timestamp_x"])
- query = query_count_source.replace("COUNT(*) as [Rows]", "T1.*")
- query += f" AND T1.[timestamp] IN ({ts_list})"
- with open(only_in_source_file, "w") as fwh:
- fwh.write(query)
- only_in_dest_file = f"{cfg.stage_dir}\\diff\\{source_table.table_client}_only_in_dest.sql"
- only_in_dest = pd.merge(dest_data, source_data, how="left", on=cols_without_ts)
- only_in_dest = only_in_dest[pd.isna(only_in_dest["timestamp_y"])]
- if only_in_dest.shape[0] > 0:
- ts_list = ", ".join(only_in_dest.to_dict(orient="list")["timestamp_x"])
- query = (
- f"SELECT T1.* FROM {dest_table.full_table_name} T1 "
- f"WHERE T1.[Client_DB] = '{source_table.client_db}' AND T1.[timestamp] IN ({ts_list})"
- )
- with open(only_in_dest_file, "w") as fwh:
- fwh.write(query)
- not_updated_file = f"{cfg.stage_dir}\\diff\\{source_table.table_client}_not_updated.sql"
- not_updated = pd.merge(source_data, dest_data, how="inner", on=cols_without_ts)
- not_updated = not_updated[not_updated["timestamp_x"] != not_updated["timestamp_y"]]
- if not_updated.shape[0] > 0:
- ts_list = ", ".join(not_updated.to_dict(orient="list")["timestamp_x"])
- query = query_count_source.replace("COUNT(*) as [Rows]", "T1.*")
- query += f" AND T1.[timestamp] IN ({ts_list})"
- with open(not_updated_file, "w") as fwh:
- fwh.write(query)
- if __name__ == "__main__":
- compare()
|