import.py 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. import sqlite3
  2. import pandas as pd
  3. from sqlalchemy import create_engine, inspect
  4. from threading import Thread
  5. csv_file = "CARLO.csv"
  6. clients = {'1': "M und S Fahrzeughandel GmbH" }
  7. client_db = "1"
  8. date_filter = "'2018-01-01'"
  9. date_filter2 = "'2019-01-01'"
  10. source_dsn = { 'user': "sa", 'pass': "Mffu3011#", 'server': "GC-SERVER1\\GLOBALCUBE", 'database': "DE0017" }
  11. source_schema = "dbo"
  12. target_dsn = { 'user': "sa", 'pass': "Mffu3011#", 'server': "GC-SERVER1\\GLOBALCUBE", 'database': "CARLO2" }
  13. target_schema = "import"
  14. # stage_dir = "\\\\gc-server1\Austausch\\stage"
  15. stage_dir = "C:\\GlobalCube\\System\\CARLO\\Export\\stage"
  16. def db_import (select_query, source_db, current_table, target_db, target_schema):
  17. pd.read_sql(select_query, source_db).to_sql(current_table['target'], target_db, schema=target_schema, index=False, if_exists='append')
  18. def conn_string (dsn):
  19. return f"mssql+pyodbc://{dsn['user']}:{dsn['pass']}@{dsn['server']}/{dsn['database']}?driver=SQL+Server+Native+Client+11.0"
  20. def conn_params (dsn):
  21. return f"-S {dsn['server']} -d {dsn['database']} -U {dsn['user']} -P {dsn['pass']}"
  22. df = pd.read_csv(csv_file, sep=";", encoding="ansi")
  23. config = df[df['target'].notnull()]
  24. print(config.head())
  25. source_db = create_engine(conn_string(source_dsn))
  26. source_insp = inspect(source_db)
  27. source_tables = source_insp.get_table_names(schema=source_schema)
  28. source_tables_prefix = set([t.split("$")[0] for t in source_tables if "$" in t])
  29. print(source_tables_prefix)
  30. target_db = create_engine(conn_string(target_dsn))
  31. target_insp = inspect(target_db)
  32. target_tables = target_insp.get_table_names(schema=target_schema)
  33. for index, current_table in config.iterrows():
  34. with open(stage_dir + "\\batch\\" + current_table['target'] + ".bat", "w", encoding="cp850") as f:
  35. f.write("@echo off \n")
  36. f.write("rem ==" + current_table['target'] + "==\n")
  37. if not current_table['target'] in target_tables:
  38. f.write(f"echo Ziel-Tabelle '{current_table['target']}' existiert nicht!\n")
  39. continue
  40. f.write(f"del {stage_dir}\\{current_table['target']}*.* /Q /F >nul 2>nul \n")
  41. f.write(f"sqlcmd.exe {conn_params(target_dsn)} -p -Q \"TRUNCATE TABLE [{target_schema}].[{current_table['target']}]\" \n")
  42. target_insp_cols = target_insp.get_columns(current_table['target'], schema=target_schema)
  43. target_columns_list = [col['name'] for col in target_insp_cols]
  44. target_columns = set(target_columns_list)
  45. for client_db, prefix in clients.items():
  46. source_table = current_table['source'].format(prefix)
  47. if not source_table in source_tables:
  48. f.write(f"echo Quell-Tabelle '{source_table}' existiert nicht!\n")
  49. continue
  50. stage_csv = f"{stage_dir}\\{current_table['target']}_{client_db}.csv"
  51. if not pd.isnull(current_table['query']):
  52. select_query = current_table['query'].format(prefix, date_filter, date_filter2)
  53. else:
  54. select_query = f"SELECT T1.* FROM [{source_schema}].[{source_table}] T1 "
  55. if not pd.isnull(current_table['filter']):
  56. select_query += " WHERE " + current_table['filter'].format("", date_filter, date_filter2)
  57. source_insp_cols = source_insp.get_columns(source_table)
  58. source_columns = set([col['name'] for col in source_insp_cols])
  59. intersect = source_columns.intersection(target_columns)
  60. #print("Auf beiden Seiten: " + ";".join(intersect))
  61. diff1 = source_columns.difference(target_columns)
  62. if len(diff1) > 0:
  63. f.write("rem Nur in Quelle: " + ";".join(diff1) + "\n")
  64. diff2 = target_columns.difference(source_columns)
  65. if not "Client_DB" in diff2:
  66. f.write("echo Spalte 'Client_DB' fehlt!\n")
  67. continue
  68. diff2.remove("Client_DB")
  69. if len(diff2) > 0:
  70. f.write("rem Nur in Ziel: " + ";".join(diff2) + "\n")
  71. #select_columns = "T1.[" + "], T1.[".join(intersect) + "],"
  72. select_columns = ""
  73. for col in target_columns_list:
  74. if col in intersect:
  75. select_columns += "T1.[" + col + "], "
  76. elif col == "Client_DB":
  77. select_columns += "'" + client_db + "' as \\\"Client_DB\\\", "
  78. else:
  79. select_columns += "'' as \\\"" + col + "\\\", "
  80. select_query = select_query.replace("T1.*", select_columns[:-2])
  81. select_query = select_query.replace("%", "%%") # batch-Problem
  82. #print(select_query)
  83. f.write(f"bcp \"{select_query}\" queryout \"{stage_csv}\" {conn_params(source_dsn)} -c -C 65001 -e \"{stage_csv[:-4]}.queryout.log\" > \"{stage_csv[:-4]}.bcp1.log\" \n")
  84. f.write(f"type \"{stage_csv[:-4]}.bcp1.log\" | findstr -v \"1000\" \n")
  85. f.write(f"bcp [{target_schema}].[{current_table['target']}] in \"{stage_csv}\" {conn_params(target_dsn)} -c -C 65001 -e \"{stage_csv[:-4]}.in.log\" > \"{stage_csv[:-4]}.bcp2.log\" \n")
  86. f.write(f"type \"{stage_csv[:-4]}.bcp2.log\" | findstr -v \"1000\" \n")
  87. with open(stage_dir + "\\batch\\_all.bat", "w", encoding="cp850") as f:
  88. f.write("@echo off & cd /d %~dp0 \n")
  89. f.write(f"del {stage_dir}\\*.* /Q /F >nul 2>nul \n\n")
  90. for index, current_table in config.iterrows():
  91. f.write("echo ==" + current_table['target'] + "==\n")
  92. f.write("echo " + current_table['target'] + " >CON \n")
  93. f.write("call " + current_table['target'] + ".bat\n\n")