csv_import.py 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. import plac
  2. from pathlib import Path
  3. import csv
  4. import re
  5. import pandas as pd
  6. from sqlalchemy import create_engine, inspect
  7. import json
  8. csv_dir = Path('C:\\GlobalCube\\System\\AUTOLINE\\Datenbank\\Full_zusammengesetllt')
  9. target_dsn = {'user': 'sa', 'pass': 'Mffu3011#', 'server': 'GC-SERVER1\\GLOBALCUBE', 'database': 'AUTOLINE'}
  10. temp_schema = 'temp'
  11. target_schema = 'import'
  12. transform = []
  13. def get_dtype(db_type):
  14. if db_type == "DATETIME":
  15. return "datetime64"
  16. if db_type == "DECIMAL(28, 8)" or db_type == "DECIMAL(18, 0)" or db_type == "NUMERIC(18, 0)":
  17. return "float64"
  18. return "object"
  19. def conn_string(dsn):
  20. return f"mssql+pyodbc://{dsn['user']}:{dsn['pass']}@{dsn['server']}/{dsn['database']}?driver=SQL+Server+Native+Client+11.0"
  21. def conn_params(dsn):
  22. return f"-S {dsn['server']} -d {dsn['database']} -U {dsn['user']} -P {dsn['pass']}"
  23. def columns_from_csv(source_csv):
  24. df = pd.read_csv(source_csv, sep=",", encoding="utf-8", decimal=".", nrows=1)
  25. source_columns_list = list(df.columns)
  26. for i, col in enumerate([col for col in source_columns_list if col[-2:] == ".1"]):
  27. source_columns_list[i] = col[:-2] + "2"
  28. source_columns_list = [col.replace(".", " ") for col in source_columns_list]
  29. return source_columns_list
  30. def transform_template(target_insp, source_csv, table, target_schema):
  31. target_insp_cols = target_insp.get_columns(table, schema=target_schema)
  32. target_columns_list = [col['name'] for col in target_insp_cols]
  33. source_columns_list = columns_from_csv(source_csv)
  34. target_columns = set(target_columns_list)
  35. source_columns = set(source_columns_list)
  36. # intersect = source_columns.intersection(target_columns)
  37. # print("Auf beiden Seiten: " + ";".join(intersect))
  38. diff1 = source_columns.difference(target_columns)
  39. if len(diff1) > 0:
  40. print("rem Nur in Quelle: " + ";".join(diff1))
  41. diff2 = target_columns.difference(source_columns)
  42. if len(diff2) > 0:
  43. print("rem Nur in Ziel: " + ";".join(diff2))
  44. template = []
  45. for i, col in enumerate(target_columns_list):
  46. if col in source_columns_list:
  47. pos = source_columns_list.index(col)
  48. else:
  49. pos = -1
  50. template.append((pos, get_dtype(str(target_insp_cols[i]['type']))))
  51. return template
  52. def transform_line(line):
  53. pattern = re.compile(r"\d{4}-\d\d-\d\d \d\d:\d\d")
  54. pattern2 = re.compile(r"(\d{2})[/\.](\d{2})[/\.](\d{4})")
  55. result = []
  56. for pos, f in transform:
  57. e = ""
  58. if pos > -1 and pos < len(line):
  59. e = line[pos]
  60. if f == "float64":
  61. e = e.replace(",", ".")
  62. if f == "datetime64":
  63. m = pattern2.match(e)
  64. if m:
  65. e = f"{m[3]}-{m[2]}-{m[1]}"
  66. elif not pattern.match(e):
  67. e = ""
  68. # e += ":00.000"
  69. result.append(e)
  70. return result
  71. def fix_nulls(s):
  72. for line in s:
  73. yield line.replace('\0', ' ')
  74. def transform_file(source_csv, template):
  75. global transform
  76. transform = json.loads(template)
  77. stage_csv = Path(f"{source_csv.parent}\\stage\\{source_csv.name}")
  78. if stage_csv.exists() and stage_csv.stat().st_ctime > source_csv.stat().st_ctime:
  79. print(f"Stage-CSV '{stage_csv.name}' ist bereits aktuell.")
  80. return False
  81. print(f"Importiere {source_csv.name}...")
  82. with open(source_csv, "r", encoding="utf-8", errors="ignore", newline="") as source_file, \
  83. open(stage_csv, "w", encoding="utf-8", newline="") as target_file:
  84. csv_read = csv.reader(fix_nulls(source_file), delimiter=",")
  85. csv_write = csv.writer(target_file, delimiter="\t")
  86. next(csv_read) # ignore header
  87. i = 0
  88. for cols in csv_read:
  89. csv_write.writerow(transform_line(cols))
  90. i += 1
  91. print(f"...{i} Zeilen konvertiert.")
  92. def csv_tables(csv_dir, target_tables_ci):
  93. p = re.compile(r"_\d+$")
  94. result = []
  95. if not csv_dir.is_dir():
  96. print(f"Verzeichnis {csv_dir} existiert nicht!")
  97. return result
  98. for source_csv in csv_dir.glob("*.csv"):
  99. if source_csv.is_dir():
  100. continue
  101. table = source_csv.name[:-4].lower()
  102. if table not in target_tables_ci:
  103. table = p.sub("", table)
  104. if table not in target_tables_ci:
  105. print(f"rem Ziel-Tabelle '{table}' existiert nicht!")
  106. continue
  107. result.append((table, source_csv))
  108. return result
  109. def target_tables(target_dsn, target_schema):
  110. engine = create_engine(conn_string(target_dsn))
  111. target_insp = inspect(engine)
  112. target_tables = target_insp.get_table_names(schema=target_schema)
  113. return (target_insp, list(map(str.lower, target_tables)))
  114. def batch(csv_dir, action):
  115. target_insp, target_tables_ci = target_tables(target_dsn, target_schema)
  116. stage_schema = target_schema if action == "overwrite" else temp_schema
  117. print("@echo off")
  118. print("cd /d %~dp0")
  119. print("set PYTHON=\"C:\\dev\\Python\\Python38-32\"")
  120. for (table, source_csv) in csv_tables(csv_dir, target_tables_ci):
  121. print(f"echo =={table}==")
  122. stage_csv = Path(f"{source_csv.parent}\\stage\\{source_csv.name}")
  123. try:
  124. tf_template = transform_template(target_insp, source_csv, table, target_schema)
  125. template_json = json.dumps(tf_template).replace("\"", "\\\"")
  126. print(f"sqlcmd.exe {conn_params(target_dsn)} -p -Q \"TRUNCATE TABLE [{stage_schema}].[{table}]\" ")
  127. print(f"%PYTHON%\\python.exe csv_import.py transform \"{source_csv}\" -t \"{template_json}\" ")
  128. print(f"bcp.exe [{stage_schema}].[{table}] in \"{stage_csv}\" {conn_params(target_dsn)} -c -C 65001 -e \"{stage_csv}.log\" ")
  129. pkeys = target_insp.get_pk_constraint(table, schema=target_schema)
  130. if len(pkeys['constrained_columns']) > 0:
  131. delete_sql = f"DELETE T1 FROM [{target_schema}].[{table}] T1 INNER JOIN [{temp_schema}].[{table}] T2 ON " + \
  132. " AND ".join([f"T1.[{col}] = T2.[{col}]" for col in pkeys['constrained_columns']])
  133. print(f"sqlcmd.exe {conn_params(target_dsn)} -p -Q \"{delete_sql}\" ")
  134. insert_sql = f"INSERT INTO [{target_schema}].[{table}] SELECT * FROM [{temp_schema}].[{table}]"
  135. print(f"sqlcmd.exe {conn_params(target_dsn)} -p -Q \"{insert_sql}\" ")
  136. print("")
  137. except Exception:
  138. print(f"rem {source_csv} fehlerhaft!")
  139. @plac.pos('action', "", choices=['batch', 'transform'])
  140. @plac.pos('csv_dir', "", type=Path)
  141. @plac.opt('mode', "", choices=['overwrite', 'append', 'update'])
  142. @plac.opt('template', "")
  143. def main(action, csv_dir, mode="overwrite", template="[]"):
  144. if action == "transform":
  145. transform_file(csv_dir, template)
  146. else:
  147. batch(csv_dir, mode)
  148. if __name__ == '__main__':
  149. plac.call(main)
  150. # main("batch", csv_dir, "append")