浏览代码

Transformer (nicht funktionsfähig)
Crewmeister

Robert Bedner 4 年之前
父节点
当前提交
c9d6eabad4
共有 13 个文件被更改,包括 521 次插入32 次删除
  1. 114 0
      .vscode/.ropeproject/config.py
  2. 二进制
      .vscode/.ropeproject/objectdb
  3. 11 1
      .vscode/settings.json
  4. 二进制
      __pycache__/ca_webscraper.cpython-38.pyc
  5. 100 24
      crewmeister.py
  6. 34 0
      data.py
  7. 115 0
      db_create.py
  8. 47 3
      notebook.ipynb
  9. 22 0
      powerplay.vbs
  10. 17 0
      sql_excel.py
  11. 5 4
      transformer7.py
  12. 0 0
      transformer_automation.py
  13. 56 0
      zip_backup.py

+ 114 - 0
.vscode/.ropeproject/config.py

@@ -0,0 +1,114 @@
+# The default ``config.py``
+# flake8: noqa
+
+
+def set_prefs(prefs):
+    """This function is called before opening the project"""
+
+    # Specify which files and folders to ignore in the project.
+    # Changes to ignored resources are not added to the history and
+    # VCSs.  Also they are not returned in `Project.get_files()`.
+    # Note that ``?`` and ``*`` match all characters but slashes.
+    # '*.pyc': matches 'test.pyc' and 'pkg/test.pyc'
+    # 'mod*.pyc': matches 'test/mod1.pyc' but not 'mod/1.pyc'
+    # '.svn': matches 'pkg/.svn' and all of its children
+    # 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o'
+    # 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o'
+    prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject',
+                                  '.hg', '.svn', '_svn', '.git', '.tox']
+
+    # Specifies which files should be considered python files.  It is
+    # useful when you have scripts inside your project.  Only files
+    # ending with ``.py`` are considered to be python files by
+    # default.
+    # prefs['python_files'] = ['*.py']
+
+    # Custom source folders:  By default rope searches the project
+    # for finding source folders (folders that should be searched
+    # for finding modules).  You can add paths to that list.  Note
+    # that rope guesses project source folders correctly most of the
+    # time; use this if you have any problems.
+    # The folders should be relative to project root and use '/' for
+    # separating folders regardless of the platform rope is running on.
+    # 'src/my_source_folder' for instance.
+    # prefs.add('source_folders', 'src')
+
+    # You can extend python path for looking up modules
+    # prefs.add('python_path', '~/python/')
+
+    # Should rope save object information or not.
+    prefs['save_objectdb'] = True
+    prefs['compress_objectdb'] = False
+
+    # If `True`, rope analyzes each module when it is being saved.
+    prefs['automatic_soa'] = True
+    # The depth of calls to follow in static object analysis
+    prefs['soa_followed_calls'] = 0
+
+    # If `False` when running modules or unit tests "dynamic object
+    # analysis" is turned off.  This makes them much faster.
+    prefs['perform_doa'] = True
+
+    # Rope can check the validity of its object DB when running.
+    prefs['validate_objectdb'] = True
+
+    # How many undos to hold?
+    prefs['max_history_items'] = 32
+
+    # Shows whether to save history across sessions.
+    prefs['save_history'] = True
+    prefs['compress_history'] = False
+
+    # Set the number spaces used for indenting.  According to
+    # :PEP:`8`, it is best to use 4 spaces.  Since most of rope's
+    # unit-tests use 4 spaces it is more reliable, too.
+    prefs['indent_size'] = 4
+
+    # Builtin and c-extension modules that are allowed to be imported
+    # and inspected by rope.
+    prefs['extension_modules'] = []
+
+    # Add all standard c-extensions to extension_modules list.
+    prefs['import_dynload_stdmods'] = True
+
+    # If `True` modules with syntax errors are considered to be empty.
+    # The default value is `False`; When `False` syntax errors raise
+    # `rope.base.exceptions.ModuleSyntaxError` exception.
+    prefs['ignore_syntax_errors'] = False
+
+    # If `True`, rope ignores unresolvable imports.  Otherwise, they
+    # appear in the importing namespace.
+    prefs['ignore_bad_imports'] = False
+
+    # If `True`, rope will insert new module imports as
+    # `from <package> import <module>` by default.
+    prefs['prefer_module_from_imports'] = False
+
+    # If `True`, rope will transform a comma list of imports into
+    # multiple separate import statements when organizing
+    # imports.
+    prefs['split_imports'] = False
+
+    # If `True`, rope will remove all top-level import statements and
+    # reinsert them at the top of the module when making changes.
+    prefs['pull_imports_to_top'] = True
+
+    # If `True`, rope will sort imports alphabetically by module name instead
+    # of alphabetically by import statement, with from imports after normal
+    # imports.
+    prefs['sort_imports_alphabetically'] = False
+
+    # Location of implementation of
+    # rope.base.oi.type_hinting.interfaces.ITypeHintingFactory In general
+    # case, you don't have to change this value, unless you're an rope expert.
+    # Change this value to inject you own implementations of interfaces
+    # listed in module rope.base.oi.type_hinting.providers.interfaces
+    # For example, you can add you own providers for Django Models, or disable
+    # the search type-hinting in a class hierarchy, etc.
+    prefs['type_hinting_factory'] = (
+        'rope.base.oi.type_hinting.factory.default_type_hinting_factory')
+
+
+def project_opened(project):
+    """This function is called after opening the project"""
+    # Do whatever you like here!

二进制
.vscode/.ropeproject/objectdb


+ 11 - 1
.vscode/settings.json

@@ -1,3 +1,13 @@
 {
-    "python.pythonPath": "C:\\dev\\Python\\Python38-32\\python.exe"
+    "python.pythonPath": "C:\\dev\\Python\\Python38-32\\python.exe",
+    "python.testing.unittestArgs": [
+        "-v",
+        "-s",
+        "./tests",
+        "-p",
+        "test_*.py"
+    ],
+    "python.testing.pytestEnabled": false,
+    "python.testing.nosetestsEnabled": false,
+    "python.testing.unittestEnabled": true
 }

二进制
__pycache__/ca_webscraper.cpython-38.pyc


+ 100 - 24
crewmeister.py

@@ -2,35 +2,111 @@ import requests
 import json
 import pandas as pd
 import datetime
+from colorama import Fore
+from colorama import Style
 
-webservice = "https://api.crewmeister.com/api/v2/"
+def pt_to_hours(x):
+    delta = int(x[2:-1]) / 3600
+    return round(delta, 2)
+    #return str(round(delta)) + ":" + str(round((delta - round(delta)) * 60))  
 
-user_name = "bedner@global-cube.de"
-user_pass = "7pmicg1w"
-crew_id = "26515"
+def identity(x):
+    return x
 
-r = requests.post(webservice + "user/authentication", { 'userIdentifier': user_name, 'password': user_pass })
-#print(r.json())
-payload = r.json()['payload']
-cookies = { 'cmAuthenticationUserToken': payload['token'], 'cmAuthenticationUserId': str(payload['id']) }
+def type_id (x):
+    if x == 1:
+        return "Urlaub"
+    if x == 2:
+        return "Krank"
+    if x == 3:
+        return "Freizeit"
+    return "anwesend"
 
-r = requests.post(webservice + "crew/authentication", { 'crewId': crew_id }, cookies=cookies)
-#print(r.json())
-payload = r.json()['payload']
-cookies['cmAuthenticationCrewToken'] = payload['token']
-cookies['cmAuthenticationCrewId'] = payload['id']
+class crewmeister():
+    webservice = "https://api.crewmeister.com/api/v2"
 
-#r = requests.get(webservice + "context", cookies=cookies)
-#print(r.json())
+    user_name = "bedner@global-cube.de"
+    user_pass = "7pmicg1w"
+    crew_id = "26515"
 
-r = requests.get(webservice + f"crew/{crew_id}/member", cookies=cookies)
-users = pd.DataFrame(r.json()['payload'])
-print(users.query("activeTimeAccount==1"))
+    suffix_count = 0
 
-timestamp = datetime.datetime.now().isoformat()
-print(timestamp)
-r = requests.get(webservice + f"crew/{crew_id}/time-tracking/stamps", { 'startTime': timestamp, 'endTime': timestamp },  cookies=cookies)
-stamps =pd.DataFrame(r.json()['payload'])
+    def auth(self):
+        cookies = {}    
+        r = requests.post(self.webservice + "/user/authentication", { 'userIdentifier': self.user_name, 'password': self.user_pass })
+        payload = r.json()['payload']
+        cookies['cmAuthenticationUserToken'] = payload['token']
+        cookies['cmAuthenticationUserId'] = str(payload['id'])
 
-online = pd.merge(stamps, users, on='userId').query("timeAccount==1").groupby('email')
-print(online['timestamp'].max())
+        r = requests.post(self.webservice + "/crew/authentication", { 'crewId': self.crew_id }, cookies=cookies)
+        payload = r.json()['payload']
+        cookies['cmAuthenticationCrewToken'] = payload['token']
+        cookies['cmAuthenticationCrewId'] = payload['id']
+        return cookies
+
+    def userlist(self):
+        r = requests.get(self.webservice + f"/crew/{self.crew_id}/member", cookies=self.cookies)
+        return pd.DataFrame(r.json()['payload']).set_index("userId")
+    
+    def add_to_userlist (self, df):
+        self.users = self.users.join(df, rsuffix="_"+str(self.suffix_count))
+        self.suffix_count += 1
+
+    def __init__(self):
+        self.cookies = self.auth()        
+        self.users = self.userlist()
+    
+    def cm_request(self, querystring, params, cols):
+        r = requests.get(self.webservice + querystring, params, cookies=self.cookies)
+        try:
+            df = pd.DataFrame(r.json()['payload'])
+            if 'userId' not in df.columns:
+                df['userId'] = df['groupBy'].map(lambda x: x['userId'])
+            for (key, (col, map_fun)) in cols.items():
+                df[key] = df[col].map(map_fun)
+            items = list(cols.keys())
+            items.append("userId")
+            self.add_to_userlist(df.filter(items=items).set_index("userId"))
+            return True
+        except Exception as e:
+            print(r.json())
+            print(e)
+            return self.zerofill(cols)
+                
+    def zerofill(self, cols):
+        for key in cols.keys():
+            self.users[key] = 0
+        return False
+
+    def statistics(self):
+        timestamp = datetime.datetime.now().isoformat()
+        today = timestamp[:10]
+        first_of_year = today[:4] + "-01-01"
+        end_of_year = today[:4] + "-12-31"
+        prev_month = (datetime.date.fromisoformat(today[:7] + "-01") + datetime.timedelta(days=-1)).isoformat()[:10]
+
+        r = requests.get(self.webservice + f"/crew/{self.crew_id}/time-tracking/stamps", { 'startTime': timestamp, 'endTime': timestamp }, cookies=self.cookies)
+        stamps = pd.DataFrame(r.json()['payload'])
+
+        if len(stamps.index) > 0:
+            self.add_to_userlist(stamps.query("timeAccount==1").groupby('userId').max())
+
+        self.cm_request(f"/crew/{self.crew_id}/absence-management/absences", { 'from': today, 'to': today }, { 'absence_today': ("typeId", identity) })
+        self.cm_request(f"/crew/{self.crew_id}/absence-management/absence-balances", { 'date': first_of_year, 'typeId': 1 }, { 'absence_vac_prev': ("value", identity) })
+        self.cm_request(f"/crew/{self.crew_id}/absence-management/absence-balances", { 'date': today, 'typeId': 1 }, { 'absence_vacation': ("value", identity) })
+        self.cm_request(f"/crew/{self.crew_id}/absence-management/absence-balances", { 'date': end_of_year, 'typeId': 1 }, { 'absence_planned': ("value", identity) })
+        self.cm_request(f"/crew/{self.crew_id}/duration-balances?groupBy%5B%5D=user_id&date%5B%5D={prev_month}", {}, { 'duration_prev_month': ("value", pt_to_hours) })
+        self.cm_request(f"/crew/{self.crew_id}/duration-balances?groupBy%5B%5D=user_id&date%5B%5D={today}", {}, { 'duration_today': ("value", pt_to_hours) })
+        return self.users
+
+cm = crewmeister()
+user = cm.statistics().loc[86043]
+print("Gerade anwesend: " + user['clockInTimestamp'])
+print("geplante Abwesenheit heute: " + type_id(user['absence_today']))
+print("Urlaub Jahresanfang: " + str(user['absence_vac_prev']))
+print("Urlaub genommen: " + str(user['absence_vacation']))
+print("Urlaub geplant: " + str(user['absence_planned']))
+print("Überstunden Vormonat: " + str(user['duration_prev_month']))
+print("Überstunden aktuell: " + str(user['duration_today']))
+
+print(f"{Fore.GREEN}Test!{Style.RESET_ALL}")

+ 34 - 0
data.py

@@ -0,0 +1,34 @@
+import pandas as pd 
+import numpy as np
+import json
+from functools import reduce
+
+debug = False
+
+csv_file = "data/offene_auftraege_eds_c11.csv"
+cols_pkey = ["Hauptbetrieb", "Standort", "Nr", "Auftragsdatum"]
+cols_str = ["Serviceberater", "Order Number", "Fabrikat", "Model", "Fahrzeug", "Kostenstelle", "Marke", "Kunde", "Turnover_Type_Desc"]
+cols_float = ["Durchg\u00e4nge (Auftrag)", "Arbeitswerte", "Teile", "Fremdl.", "Anzahl Tage"]
+
+def update(d, other): 
+    d.update(dict(dict(other)))
+    return d
+
+def get_dict(cols, type):
+    return dict(dict(zip(cols,[type] * len(cols))))
+
+cols_dict = reduce(update, (get_dict(cols_pkey, np.str), get_dict(cols_str, np.str), get_dict(cols_float, np.float)), {})
+
+df = pd.read_csv(csv_file, decimal=",", sep=";", encoding="ansi", usecols=cols_dict.keys(), dtype=cols_dict)
+df['pkey'] = reduce(lambda x, y: x + "_" + df[y], cols_pkey, "") 
+df_sum = df.groupby("pkey").sum()
+df_unique = df[cols_pkey + cols_str + ['pkey']].drop_duplicates()
+
+df_join = df_sum.join(df_unique.set_index('pkey'), rsuffix='_other')
+df_join['Gesamt'] = df_join['Arbeitswerte'] + df_join['Teile'] + df_join['Fremdl.']
+df_result = df_join[(df_join['Gesamt'] != 0) & (df_join['Serviceberater'] != "")]
+
+with open("data/offene_auftraege.json", "w") as f: 
+    f.write(df_result.to_json(orient="split", indent=2))
+
+print(df_result.shape)

+ 115 - 0
db_create.py

@@ -0,0 +1,115 @@
+import sqlite3
+import pandas as pd 
+from sqlalchemy import create_engine, inspect
+from threading import Thread
+
+csv_file = "CARLO.csv"
+clients = {'1': "M und S Fahrzeughandel GmbH" }
+client_db = "1"
+date_filter = "'2018-01-01'"
+source_dsn = { 'user': "sa", 'pass': "Mffu3011#", 'server': "GC-SERVER1\\GLOBALCUBE", 'database': "DE0017" }
+source_schema = "dbo"
+target_dsn = { 'user': "sa", 'pass': "Mffu3011#", 'server': "GC-SERVER1\\GLOBALCUBE", 'database': "CARLO2" }
+target_schema = "import"
+#stage_dir = "\\\\gc-server1\Austausch\\stage"
+stage_dir = "C:\\GlobalCube\\System\\CARLO\\Export\\stage"
+
+def db_import (select_query, source_db, current_table, target_db, target_schema):
+    pd.read_sql(select_query, source_db).to_sql(current_table['target'], target_db, schema=target_schema, index=False, if_exists='append')
+
+def conn_string (dsn):
+    return f"mssql+pyodbc://{dsn['user']}:{dsn['pass']}@{dsn['server']}/{dsn['database']}?driver=SQL+Server+Native+Client+11.0"
+
+def conn_params (dsn):
+    return f"-S {dsn['server']} -d {dsn['database']} -U {dsn['user']} -P {dsn['pass']}"
+
+df = pd.read_csv(csv_file, sep=";", encoding="ansi")
+config = df[df['target'].notnull()]
+print(config.head())
+
+source_db = create_engine(conn_string(source_dsn))
+source_insp = inspect(source_db)
+source_tables = source_insp.get_table_names(schema=source_schema)
+source_tables_prefix = set([t.split("$")[0] for t in source_tables if "$" in t])
+print(source_tables_prefix)
+
+target_db = create_engine(conn_string(target_dsn))
+target_insp = inspect(target_db)
+target_tables = target_insp.get_table_names(schema=target_schema)
+
+
+for index, current_table in config.iterrows():
+    with open(stage_dir + "\\batch\\" + current_table['target'] + ".bat", "w", encoding="cp850") as f:
+        f.write("@echo off \n")
+        f.write("rem ==" + current_table['target'] + "==\n")
+        
+        if not current_table['target'] in target_tables:
+            f.write(f"echo Ziel-Tabelle '{current_table['target']}' existiert nicht!\n")
+            continue
+        
+        f.write(f"del {stage_dir}\\{current_table['target']}*.* /Q /F >nul 2>nul \n")
+        f.write(f"sqlcmd.exe {conn_params(target_dsn)} -p -Q \"TRUNCATE TABLE [{target_schema}].[{current_table['target']}]\" \n")
+
+        target_insp_cols = target_insp.get_columns(current_table['target'], schema=target_schema)
+        target_columns_list = [col['name'] for col in target_insp_cols]
+        target_columns = set(target_columns_list)
+
+        for client_db, prefix in clients.items():
+            source_table = current_table['source'].format(prefix)
+
+            if not source_table in source_tables:
+                f.write(f"echo Quell-Tabelle '{source_table}' existiert nicht!\n")
+                continue
+            stage_csv = f"{stage_dir}\\{current_table['target']}_{client_db}.csv"
+            
+            if not pd.isnull(current_table['query']):
+                select_query = current_table['query'].format(prefix, date_filter)
+            else:
+                select_query = f"SELECT T1.* FROM [{source_schema}].[{source_table}] T1 "
+
+            if not pd.isnull(current_table['filter']):
+                select_query += " WHERE " + current_table['filter'].format("", date_filter)
+
+            source_insp_cols = source_insp.get_columns(source_table)
+            source_columns = set([col['name'] for col in source_insp_cols])
+
+            intersect = source_columns.intersection(target_columns)
+            #print("Auf beiden Seiten: " + ";".join(intersect))
+            diff1 = source_columns.difference(target_columns)
+            if len(diff1) > 0:
+                f.write("rem Nur in Quelle: " + ";".join(diff1) + "\n")
+            diff2 = target_columns.difference(source_columns)
+            if not "Client_DB" in diff2:
+                f.write("echo Spalte 'Client_DB' fehlt!\n")
+                continue
+            diff2.remove("Client_DB")
+            if len(diff2) > 0:
+                f.write("rem Nur in Ziel:   " + ";".join(diff2) + "\n")
+
+            #select_columns = "T1.[" + "], T1.[".join(intersect) + "],"
+            select_columns = ""
+            for col in target_columns_list:
+                if col in intersect:
+                    select_columns += "T1.[" + col + "], "
+                elif col == "Client_DB":
+                    select_columns += "'" + client_db + "' as \\\"Client_DB\\\", "
+                else:
+                    select_columns += "'' as \\\"" + col + "\\\", "
+
+
+            select_query = select_query.replace("T1.*", select_columns[:-2])
+            select_query = select_query.replace("%", "%%") # batch-Problem
+            #print(select_query)
+            f.write(f"bcp \"{select_query}\" queryout \"{stage_csv}\" {conn_params(source_dsn)} -c -C 65001 -e \"{stage_csv[:-4]}.queryout.log\" > \"{stage_csv[:-4]}.bcp1.log\" \n")
+            f.write(f"type \"{stage_csv[:-4]}.bcp1.log\" | findstr -v \"1000\" \n")
+            f.write(f"bcp [{target_schema}].[{current_table['target']}] in \"{stage_csv}\" {conn_params(target_dsn)} -c -C 65001 -e \"{stage_csv[:-4]}.in.log\" > \"{stage_csv[:-4]}.bcp2.log\" \n")
+            f.write(f"type \"{stage_csv[:-4]}.bcp2.log\" | findstr -v \"1000\" \n")
+
+
+with open(stage_dir + "\\batch\\_all.bat", "w", encoding="cp850") as f:
+    f.write("@echo off & cd /d %~dp0 \n")
+    f.write(f"del {stage_dir}\\*.* /Q /F >nul 2>nul \n\n")
+    for index, current_table in config.iterrows():
+        f.write("echo ==" + current_table['target'] + "==\n")
+        f.write("echo " + current_table['target'] + " >CON \n")
+        f.write("call " + current_table['target'] + ".bat\n\n")

+ 47 - 3
notebook.ipynb

@@ -30,10 +30,54 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 4,
    "metadata": {},
-   "outputs": [],
-   "source": []
+   "outputs": [
+    {
+     "output_type": "error",
+     "ename": "TypeError",
+     "evalue": "This COM object can not automate the makepy process - please run makepy manually for this object",
+     "traceback": [
+      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[1;31mcom_error\u001b[0m                                 Traceback (most recent call last)",
+      "\u001b[1;32mC:\\dev\\Python\\Python38-32\\lib\\site-packages\\win32com\\client\\gencache.py\u001b[0m in \u001b[0;36mEnsureDispatch\u001b[1;34m(prog_id, bForDemand)\u001b[0m\n\u001b[0;32m    529\u001b[0m                 \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 530\u001b[1;33m                         \u001b[0mti\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mdisp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_oleobj_\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mGetTypeInfo\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    531\u001b[0m                         \u001b[0mdisp_clsid\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mti\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mGetTypeAttr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
+      "\u001b[1;31mcom_error\u001b[0m: (-2147467263, 'Nicht implementiert', None, None)",
+      "\nDuring handling of the above exception, another exception occurred:\n",
+      "\u001b[1;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
+      "\u001b[1;32m<ipython-input-4-45d6d54b965f>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m      1\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mwin32com\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mclient\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0mtr\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mwin32com\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mclient\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgencache\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mEnsureDispatch\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"CognosTransformer.Application\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
+      "\u001b[1;32mC:\\dev\\Python\\Python38-32\\lib\\site-packages\\win32com\\client\\gencache.py\u001b[0m in \u001b[0;36mEnsureDispatch\u001b[1;34m(prog_id, bForDemand)\u001b[0m\n\u001b[0;32m    539\u001b[0m                         \u001b[0mdisp\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mdisp_class\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdisp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_oleobj_\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    540\u001b[0m                 \u001b[1;32mexcept\u001b[0m \u001b[0mpythoncom\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcom_error\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 541\u001b[1;33m                         \u001b[1;32mraise\u001b[0m \u001b[0mTypeError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"This COM object can not automate the makepy process - please run makepy manually for this object\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    542\u001b[0m         \u001b[1;32mreturn\u001b[0m \u001b[0mdisp\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    543\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
+      "\u001b[1;31mTypeError\u001b[0m: This COM object can not automate the makepy process - please run makepy manually for this object"
+     ]
+    }
+   ],
+   "source": [
+    "import win32com.client\n",
+    "tr = win32com.client.gencache.EnsureDispatch(\"CognosTransformer.Application\")\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "tags": []
+   },
+   "outputs": [
+    {
+     "output_type": "stream",
+     "name": "stdout",
+     "text": "Could not locate a type library matching 'C:\\Program Files (x86)\\Cognos\\cer5\\bin\\trnsfrmr.dll'\n"
+    }
+   ],
+   "source": [
+    "import sys\n",
+    "from win32com.client import makepy\n",
+    "\n",
+    "outputFile = \"C:\\\\Projekte\\\\Python\\\\transformer_automation.py\"\n",
+    "comTypeLibraryOrDLL = \"C:\\\\Program Files (x86)\\\\Cognos\\\\cer5\\\\bin\\\\transdll.dll\"\n",
+    "sys.argv = [\"makepy\", \"-o\", outputFile, comTypeLibraryOrDLL]\n",
+    "\n",
+    "makepy.main ()"
+   ]
   },
   {
    "cell_type": "code",

+ 22 - 0
powerplay.vbs

@@ -0,0 +1,22 @@
+Sub Main()
+    Dim objPPRep as Object
+    Dim objRepPrt as Object
+    Set objPPRep = CreateObject ("CognosPowerPlay.Report")
+    objPPRep.Open "C:\CUBES UND BERICHTE\BEISPIEL2.PPR"
+    Set objRepPrt = objPPRep.Print
+    objRepPrt.PrintAllCharts = False
+    objRepPrt.SetListOfRowsToPrint objPPrep.Rows
+    objRepPrt.SetListOfLayersToPrint objPPRep.Layers
+    objRepPrt.SetChartToPrint objPPRep.Graphs.Item(2)
+    objRepPrt.IncludeLegend = True
+    objRepPrt.ChartTitleOnAllPages = True
+    objRepPrt.SummariesOnAllPages = True
+    objRepPrt.AxisOnAllPages = True
+    objRepPrt.Collate = True
+    objRepPrt.Copies = 1
+    objRepPrt.PrintOut
+    objRepPrt.Copies = 2
+    objRepPrt.PrintOut
+    Set objRepPrt = Nothing
+    Set objPPRep = Nothing
+End Sub

+ 17 - 0
sql_excel.py

@@ -0,0 +1,17 @@
+import sqlite3
+import pandas as pd 
+import sqlalchemy
+from sqlalchemy import create_engine
+
+file = "Planung_2020.xlsx"
+sheet = "Planung"
+output = "result.xlsx"
+
+engine = create_engine("sqlite://", echo=False)
+df = pd.read_excel(file, sheet_name=sheet)
+df.to_sql("planung", engine, if_exists="replace", index=False)
+
+results = engine.execute("SELECT * FROM planung")
+
+final = pd.DataFrame(results, columns=df.columns)
+final.to_excel(output, index=False)

+ 5 - 4
transformer7.py

@@ -4,16 +4,17 @@ from pathlib import Path
 
 class transformer7:
     def __init__(self):
-        self.tr = win32com.client.Dispatch("CognosTransformer.Application")
-        #self.tr.Visible(True)
-        print(self.tr.Version)
+        self.app = win32com.client.Dispatch("CognosPowerPlay.Report") # "CognosTransformer.Application")
+        print(self.app)
+        self.app.visible = 1
+        print(self.app.Version)
 
     def open(self, file, format = None): 
         filename = Path(file).resolve()
         #path = str(filename.parent.resolve())
         print(filename)
         try:
-            model = self.tr.OpenModel(filename)
+            model = self.app.OpenModel(filename)
             cube = model.Cubes.Item(1)
             cube.Update()
             model.CreateMDCFiles()

+ 0 - 0
transformer_automation.py


+ 56 - 0
zip_backup.py

@@ -0,0 +1,56 @@
+import zipfile
+import glob
+import os.path
+from os import path
+
+class zip_backup:
+    root_dir = ""
+    
+    ignore = []
+    backup_list = []
+
+    def ignore_list(self):
+        gitignore = self.root_dir + "\\.gitignore"
+        if not path.exists(gitignore):
+            pass
+        with open(gitignore, "r") as f:
+            for l in f.readlines():
+                line = l.strip().replace("/", "\\").lower()
+                if line[:1] == "*": 
+                    if line[-1] == "*":
+                        line = line[1:-1]
+                    else:
+                        line = line[1:] + "\\"
+                else:
+                    line = "\\" + line + "\\"
+                self.ignore.append(line)
+
+    def ignored(self, filename):
+        rel_filename = "\\" + filename.replace(self.root_dir, "").lower() + "\\"
+
+        for e in self.ignore:
+            if e in rel_filename:
+                return True
+        return False
+
+    def check_dir(self, current_dir):
+        if self.root_dir == "":
+            self.root_dir = current_dir
+            self.ignore_list()
+
+        for entry in glob.glob(current_dir + "\\*"):
+            if path.isdir(entry):
+                self.check_dir(entry)
+            elif not self.ignored(entry):
+                self.backup_list.append(entry)
+
+    def zip_to_file(self, zip_file):
+        with zipfile.ZipFile(zip_file, 'w', compression=zipfile.ZIP_DEFLATED, compresslevel=9) as zip:
+            for e in self.backup_list:
+                zip.write(e)
+
+
+z_backup = zip_backup()
+z_backup.check_dir("C:\\GAPS_Autosys")
+#print(backup_list[:10])
+z_backup.zip_to_file("C:\\GAPS_Autosys\\Test.zip")