mdl_convert.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. import json
  2. import os
  3. import re
  4. from pathlib import Path
  5. CONVERSION = [
  6. "Name",
  7. "CognosSource",
  8. "CognosPackageDatasourceConnection",
  9. "DataSource",
  10. "OrgName",
  11. "Dimension",
  12. "Root",
  13. "Drill",
  14. "Levels",
  15. "Associations",
  16. "Category",
  17. "SpecialCategory",
  18. "MapDrills",
  19. "ViewName",
  20. "Measure",
  21. "Signon",
  22. "Cube",
  23. "CustomView",
  24. "CustomViewChildList",
  25. "SecurityNameSpace",
  26. "SecurityObject",
  27. "AllocationAdd",
  28. ]
  29. id_lookup = {}
  30. find_words = re.compile(r'("[^"]+"|\w+) ')
  31. def convert_block(block):
  32. block = block.replace("\n", "")
  33. block_type = block.split(" ")[0]
  34. words = find_words.findall(block)
  35. if len(words) < 3:
  36. return {"Type": block_type}
  37. result = {"Type": words[0], "ID": words[1], "Name": words[2].strip('"')}
  38. offset = 0
  39. for i in range(3, len(words), 2):
  40. if len(words) < i + offset + 2:
  41. break
  42. key = words[i + offset]
  43. if key in ["PackageReportSource", "Database"]:
  44. result[key] = {
  45. "ID": words[i + offset + 1],
  46. "Name": words[i + offset + 2].strip('"'),
  47. }
  48. offset += 1
  49. elif key in ["DimensionView"]:
  50. if key + "s" not in result:
  51. result[key + "s"] = []
  52. result[key + "s"].append({"ID": words[i + offset + 1], "Name": words[i + offset + 2].strip('"')})
  53. offset += 1
  54. elif key in ["MeasureInclude"]:
  55. if key + "s" not in result:
  56. result[key + "s"] = []
  57. result[key + "s"].append({"ID": words[i + offset + 1], "Include": words[i + offset + 2]})
  58. offset += 1
  59. elif key == "Calc":
  60. match = re.search(r"Calc (.*)", block)
  61. if match:
  62. calc_split = re.split(r"(Sign|Filtered|Format)", match[1])
  63. result["Calc"] = calc_split[0].strip()
  64. for j in range(i + offset + 1, len(words)):
  65. if words[j] in ["Sign", "Format", "Filtered"] or j == len(words) - 1:
  66. # result["Calc"] = " ".join(words[i + offset + 1 : j])
  67. offset = j
  68. break
  69. elif key == "EncryptedPW":
  70. result["EncryptedPW"] = words[i + offset + 1].strip('"')
  71. result["Salt"] = words[i + offset + 2].strip('"')
  72. offset += 1
  73. elif key == "AllocationAdd":
  74. if key + "s" not in result:
  75. result[key + "s"] = []
  76. result[key + "s"].append({"Measure": words[i + offset + 2], "Type": words[i + offset + 4]})
  77. offset += 3
  78. elif key in [
  79. "CustomViewList",
  80. "DrillThrough",
  81. "DeployLocations",
  82. "PowerCubeCustomViewList",
  83. "StartList",
  84. "TransientLevelList",
  85. ]:
  86. for j in range(i + offset + 1, len(words)):
  87. if words[j] in ["EndList"]:
  88. result[key] = " ".join(words[i + offset + 1 : j])
  89. offset = j - i - 1
  90. break # for
  91. # elif words[i + offset].isnumeric() or words[i + offset].startswith('"'):
  92. # offset += 1
  93. else:
  94. result[key] = words[i + offset + 1].strip('"')
  95. if block_type == "DataSource":
  96. result["Columns"] = []
  97. if block_type in ["OrgName", "Levels", "Measure"]:
  98. result["Associations"] = []
  99. if block_type == "Dimension":
  100. result["Root"] = {}
  101. result["Levels"] = []
  102. result["Categories"] = []
  103. result["SpecialCategories"] = []
  104. if block_type == "Root":
  105. result["Drill"] = {}
  106. if block_type == "Associations":
  107. result["Parent"] = 0
  108. if block_type == "CustomView":
  109. result["ChildList"] = {}
  110. if block_type == "SecurityNameSpace":
  111. result["Objects"] = []
  112. return result
  113. def remove_ids(nested):
  114. nested.pop("ID", "")
  115. nested.pop("DateDrill", "")
  116. nested.pop("Primary", "")
  117. nested.pop("Lastuse", "")
  118. nested.pop("AssociationContext", "")
  119. if nested.get("Type", "") == "SpecialCategory" and "Label" in nested and "20" in nested["Label"]:
  120. nested.pop("Label", "")
  121. for col in ["Parent", "Levels", "CustomViewList"]:
  122. if col not in nested:
  123. continue
  124. if col == "Levels" and (isinstance(nested["Levels"], list) or nested["Levels"] == "0"):
  125. continue
  126. nested[col] = id_lookup.get(nested[col], {}).get("Name", "undefined")
  127. for child in nested.values():
  128. if isinstance(child, dict):
  129. remove_ids(child)
  130. if isinstance(child, list):
  131. for entry in child:
  132. remove_ids(entry)
  133. return nested
  134. def prepare_mdl_str(mdl_str):
  135. mdl_str = re.sub(r"\n+", "\n", mdl_str)
  136. mdl_str = re.sub(r"^\n?Name ", "ModelName 1 ", mdl_str)
  137. mdl_str = re.sub(r'\nLevels (\d+ [^"])', r"Levels \1", mdl_str)
  138. mdl_str = re.sub(r" Associations ", " \nAssociations ", mdl_str)
  139. mdl_str = re.sub(r'([^ ])""', r"\1'", mdl_str)
  140. mdl_str = re.sub(r'""([^ ])', r"'\1", mdl_str)
  141. tags = "|".join(CONVERSION)
  142. mdl_str = re.sub(r"\n(" + tags + r") ", r"\n\n\1 ", mdl_str)
  143. return mdl_str
  144. def group_mdl_blocks(converted):
  145. result = {
  146. "Model": {},
  147. "Connections": [],
  148. "DataSources": [],
  149. "Dimensions": [],
  150. "Measures": [],
  151. "Signons": [],
  152. "CustomViews": [],
  153. "Security": [],
  154. "Cubes": [],
  155. }
  156. types = [c["Type"] for c in converted]
  157. ids = [c.get("ID", "0") for c in converted]
  158. id_lookup.update(dict(zip(ids, converted)))
  159. current = None
  160. level_ids = []
  161. for c, t in zip(converted, types):
  162. if t in [""]:
  163. continue
  164. if t in ["Category", "SpecialCategory"] and result["Dimensions"][-1]["Name"] == "Zeit":
  165. if t == "Category" or c["Name"][0].isnumeric():
  166. continue
  167. if t in ["ModelName"]:
  168. result["Model"] = c
  169. elif t in ["CognosSource", "CognosPackageDatasourceConnection"]:
  170. result["Connections"].append(c)
  171. elif t in ["DataSource"]:
  172. result["DataSources"].append(c)
  173. elif t in ["OrgName"]:
  174. result["DataSources"][-1]["Columns"].append(c)
  175. elif t in ["Dimension"]:
  176. level_ids = []
  177. result["Dimensions"].append(c)
  178. elif t in ["Root"]:
  179. result["Dimensions"][-1]["Root"] = c
  180. elif t in ["Drill"]:
  181. result["Dimensions"][-1]["Root"]["Drill"] = c
  182. elif t in ["Levels"]:
  183. current = c
  184. level_ids.append(c["ID"])
  185. result["Dimensions"][-1]["Levels"].append(c)
  186. elif t in ["Category"]:
  187. if c.get("Levels", "") in level_ids[0:2]:
  188. result["Dimensions"][-1]["Categories"].append(c)
  189. elif t in ["SpecialCategory"]:
  190. result["Dimensions"][-1]["SpecialCategories"].append(c)
  191. elif t in ["Measure"]:
  192. current = c
  193. result["Measures"].append(c)
  194. elif t in ["Associations"]:
  195. c["Parent"] = current["ID"]
  196. current["Associations"].append(c)
  197. for ds in result["DataSources"]:
  198. for col in ds["Columns"]:
  199. if col["Column"] == c["AssociationReferenced"]:
  200. col["Associations"].append(c)
  201. elif t in ["Signon"]:
  202. result["Signons"].append(c)
  203. elif t in ["Cube"]:
  204. result["Cubes"].append(c)
  205. elif t in ["CustomView"]:
  206. result["CustomViews"].append(c)
  207. elif t in ["CustomViewChildList"]:
  208. for cv in result["CustomViews"]:
  209. if cv["ID"] == c["ID"]:
  210. cv["ChildList"] = c
  211. elif t in ["SecurityNameSpace"]:
  212. result["Security"].append(c)
  213. elif t in ["SecurityObject"]:
  214. result["Security"][-1]["Objects"].append(c)
  215. # else:
  216. # print(t, c)
  217. return result
  218. def build_query(datasource):
  219. table = datasource["Name"]
  220. # suffix = "_fm" if datasource["SourceType"] == "CognosSourceQuery" else "_imr"
  221. # table_name = f"[staging].[{table}{suffix}]"
  222. table_name = f"[export_csv].[{table}]"
  223. view_name = f"[load].[{table}]"
  224. columns = ",\n\t".join([extract_column(c) for c in datasource["Columns"]])
  225. return f"CREATE\n\tOR\n\nALTER VIEW {view_name}\nAS\nSELECT {columns} \nFROM {table_name}\nGO\n\n"
  226. def extract_column(col):
  227. name = col["Name"]
  228. if "]." in name:
  229. name = name.split("].")[-1]
  230. else:
  231. name = f"[{name}]"
  232. alias = col["Column"]
  233. is_used = "" if len(col["Associations"]) > 0 else "--"
  234. if col.get("Origin") == "Calculated" and "Calc" in col:
  235. name = col["Calc"]
  236. name = re.sub(r"\"([^\"]+)@\d+\"", r"[\1]", name)
  237. return f"{is_used}{name} AS [{alias}]"
  238. def convert_file(filename: str) -> None:
  239. with open(filename, "r", encoding="latin-1") as frh:
  240. mdl_str = frh.read()
  241. mdl_str = prepare_mdl_str(mdl_str)
  242. mdl_blocks = mdl_str.split("\n\n")
  243. converted = [convert_block(b) for b in mdl_blocks]
  244. grouped = group_mdl_blocks(converted)
  245. with open(filename[:-4] + "_ori.json", "w") as fwh:
  246. json.dump(grouped, fwh, indent=2)
  247. # yaml.safe_dump(result, open(filename[:-4] + ".yaml", "w"))
  248. without_ids = remove_ids(grouped)
  249. with open(filename[:-4] + ".json", "w") as fwh:
  250. json.dump(without_ids, fwh, indent=2)
  251. queries = {ds["Name"]: build_query(ds) for ds in grouped["DataSources"]}
  252. with open(filename[:-4] + "_queries.sql", "w", encoding="latin-1") as fwh:
  253. fwh.writelines(queries.values())
  254. base_dir = str(Path(filename).parent.parent / "SQL")
  255. os.makedirs(base_dir, exist_ok=True)
  256. model = Path(filename).stem
  257. for ds, query in queries.items():
  258. with open(f"{base_dir}\\{ds}_{model}.sql", "w", encoding="latin-1") as fwh:
  259. fwh.write(query)
  260. cat_name_to_label = dict(
  261. [
  262. (d["Name"] + "//" + c["Name"], c.get("Label", c.get("SourceValue", "")))
  263. for d in grouped["Dimensions"]
  264. for c in d["Categories"]
  265. ]
  266. )
  267. filename_ids = filename[:-4] + "_ids.json"
  268. if len(grouped["Cubes"]):
  269. cube_name = Path(grouped["Cubes"][0]["MdcFile"]).name
  270. filename_ids = str(Path(filename).parent / cube_name[:-4]) + "_ids.json"
  271. with open(filename_ids, "w") as fwh:
  272. json.dump(cat_name_to_label, fwh, indent=2)
  273. def convert_folder(base_dir: str) -> None:
  274. files = sorted([(f.stat().st_mtime, f) for f in Path(base_dir).rglob("*.mdl")])
  275. for _, filename in files:
  276. convert_file(str(filename))
  277. if __name__ == "__main__":
  278. # convert_file("data/S_Offene_Auftraege.mdl")
  279. # convert_file("data/F_Belege_SKR_SKR_Boettche.mdl")
  280. convert_folder("cognos7/data/mdl/")