diff --git a/README.md b/README.md index b07f7a2..2f1a56d 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ This is a set of tooling to perform useful operations of queryign and moving His Currently supports querying data from the following sources, and their requirements * VTScada - REST * ClearSCADA - Raw Historic Files -* Wonderware / AVEVA Historian - InSQL {Coming Soon} +* AVEVA (Wonderware) Historian - InSQL {Coming Soon} The primary function of this tooling is to query a set of tags for a specified date range, compress and process those values as required, and move it into a format which can be easily imported into VTScada. @@ -44,7 +44,7 @@ This is a method of moving VTScada data into VTScada data. Scenarios where this In places where targetting a live ClearSCADA system with SQL queries is challenging, ClearSCADA uses a file-based Historian and provides a utility which converts these HRD files into CSV data. -For each week of each data point, a separate CSV file of data is created. +For each week of each data point, a separate CSV file of data is created from the Files are generally stored: ```C:\ProgramData\Schneider Electric\ClearSCADA\Database\HisFiles``` diff --git a/main.py b/main.py index d0f4bbb..1fc4bf9 100644 --- a/main.py +++ b/main.py @@ -41,6 +41,39 @@ class HistoricalTag: def __repr__(self): return f"({self.row}, {self.tag_type}, {self.name_source}, {self.name_dest}, {self.scale_factor}, {self.interval}, {self.precision}, {self.deadband})" + +# ---------------------- +# AVEVA (Wonderware) Historian Functions +# ---------------------- + +def aveva_query(historical_tags: List[HistoricalTag], start_time: datetime, end_time: datetime): + + print("Querying data for: " + str(start_time.year) + " " + + str(start_time.month) + " " + str(start_time.day)) + dir_path = output_path + str(start_time.year) + "\\" + create_directory(dir_path) + + ft_start_time = "'" + str(start_time.astimezone(timezone.utc)) + "'" + ft_end_time = "'" + str(end_time.astimezone(timezone.utc)) + "'" + + init_string = "driver={SQLOLEDB}; server=" + server + "; database=" + \ + database_name + "; UID=" + application_user + "; PWD=" + application_pass + ";" + + print(init_string) + # connection = pyodbc.connect(init_string) + + for tag in historical_tags: + if tag.tag_type == "real" or tag.tag_type == "integer": + retrieval_mode = "'Average'" + else: + retrieval_mode = "'Cyclic'" + + query = "SELECT * FROM OpenQuery(INSQL, 'SELECT DateTime, '" + tag.name_source + "' FROM WideHistory WHERE DateTime >= " + \ + ft_start_time + " AND DateTime <= " + ft_end_time + " AND wwRetrievalMode = " + retrieval_mode + \ + " AND wwResolution = " + str(tag.interval) + "')" + + print(query) + # ---------------------- # ClearSCADA Functions # ---------------------- @@ -66,13 +99,13 @@ def clearscada_generate_historical_ids(historic_files: str): if id is not None: csv_writer.writerow([str(id)]) -# clearscada_query() +# clearscada_process() # ---------------------- -# Query ClearSCADA raw historical files using the ClearSCADA command line tool to create +# Process ClearSCADA raw historical files using the ClearSCADA command line tool to create # csv data from the raw data files, then process and merge the data into VTScada formats -def clearscada_query(historical_tags: List[HistoricalTag], start_time: datetime, end_time: datetime): +def clearscada_process(historical_tags: List[HistoricalTag], start_time: datetime, end_time: datetime): dir_path = output_path + str(start_time.year) + "\\" create_directory(dir_path) @@ -99,7 +132,11 @@ def clearscada_query(historical_tags: List[HistoricalTag], start_time: datetime, zipped_directories = zip(historic_directories, tags) # For each found historic directory execute the ClearSCADA CSV command + print("Found: " + str(len(zipped_directories)) + + " historic directories matching tags") + tag_mappings = [] + files = [] for (path, tag) in zipped_directories: # print(path, tag.name_dest) @@ -110,6 +147,7 @@ def clearscada_query(historical_tags: List[HistoricalTag], start_time: datetime, if os.fsdecode(file).endswith(".HRD"): week_number = int(file[2:8]) if week_number >= start_week and week_number <= end_week: + files.append(file) argument = os.path.join(path, file) subprocess.run([command, "HISDUMP", argument]) @@ -120,8 +158,8 @@ def clearscada_query(historical_tags: List[HistoricalTag], start_time: datetime, for file in os.listdir(path): if os.fsdecode(file).endswith(".csv"): csv_file = os.path.join(path, file) - - values.extend(read_clearscada_file(csv_file)) + values.extend(clearscada_read_file(csv_file)) + files.append(file) # Values will have had their deadband and scaling processed, but remaining is excess frequency if len(values) > 0: @@ -130,15 +168,22 @@ def clearscada_query(historical_tags: List[HistoricalTag], start_time: datetime, tag, values, dir_path, current_end_time, True) tag_mappings.append((output_file, tag.name_dest)) + # Delete files as they are processed + for file in files: + if file.endswith(".csv") or (delete_processed and file.endswith(".HRD")): + try: + os.remove(file) + except OSError as e: + print("Error: %s - %s." % (e.filename, e.strerror)) + write_tagmapping_to_file( dir_path + "TagMapping.csv", tag_mappings) - # main_directory = os.fsencode(historic_files) - - # clearscada_read_file() # ---------------------- # Read in a ClearSCADA CSV file converted from HRD into a list of timestamps and values + + def clearscada_read_file(file_path: str) -> List[Union[int, float, None]]: values = [] @@ -406,10 +451,12 @@ def write_values_to_file(output_file: str, values: List[Union[int, float, None]] # weeks_since_date() # ---------------------- -# Returns the number of weeks since the given timestamp, or defaults to December 25th, 1600 +# Returns the number of weeks since the given timestamp, or defaults to January 1, 1601 +# It looks like ClearSCADA documentation before the GeoSCADA rebrand erroneously used +# the number of weeks since December 25, 1600, which caused being off by 1. -def weeks_since_date(timestamp, date=(1600, 12, 25)): +def weeks_since_date(timestamp, date=(1601, 1, 1)): dt = datetime.utcfromtimestamp(timestamp) start_date = datetime(*date) delta = dt - start_date @@ -455,14 +502,14 @@ if len(sys.argv) == 4: if query_type == "VTScada": print_text('VTScada Data Query') - server = config['vtscada']['server_name'] realm_port = config['vtscada']['realm_port'] realm_name = config['vtscada']['realm_name'] - vtscada_query(historical_tags, start_time, end_time) elif query_type == "AVEVA": - print_text('AVEVA Historian - Not Implemented') + server = config['aveva']['server_name'] + database_name = config['aveva']['database_name'] + aveva_query(historical_tags, start_time, end_time) elif query_type == "ClearSCADA": print_text('ClearSCADA - Query Raw Historic Files') historic_files = config['clearscada']['historic_files'] @@ -470,7 +517,7 @@ if len(sys.argv) == 4: delete_processed = config['clearscada']['delete_processed'] clearscada_generate_historical_ids(historic_files) - clearscada_query(historical_tags, start_time, end_time) + clearscada_process(historical_tags, start_time, end_time) else: print("Invalid arguments!") diff --git a/setup.toml b/setup.toml index 4ec8d1c..59782f5 100644 --- a/setup.toml +++ b/setup.toml @@ -7,6 +7,10 @@ output_path = "output\\" # Canada/Pacific system_timezone = "Canada/Mountain" +[aveva] +server_name = "172.16.1.123" +database_name = "Runtime" + [vtscada] server_name = "lb-vanryn" realm_port = "8888" @@ -19,5 +23,5 @@ install_location = "C:\\Program Files (x86)\\Schneider Electric\\ClearSCADA" delete_processed = false [user] -application_user = "query" -application_pass = "queryuser" +application_user = "wwUser" +application_pass = "wwUser"