Adding AVEVA connection and formatting capabilities.

This commit is contained in:
Michael Van Ryn 2023-09-15 08:17:01 -06:00
parent 69b7930943
commit 4c8372d7b1
3 changed files with 68 additions and 14 deletions

View File

@ -5,7 +5,7 @@ This is a set of tooling to perform useful operations of queryign and moving His
Currently supports querying data from the following sources, and their requirements Currently supports querying data from the following sources, and their requirements
* VTScada - REST * VTScada - REST
* ClearSCADA - Raw Historic Files * ClearSCADA - Raw Historic Files
* AVEVA (Wonderware) Historian - InSQL {Coming Soon} * AVEVA (Wonderware) Historian - InSQL
The primary function of this tooling is to query a set of tags for a specified date range, compress and process those values as required, and move it into a format which can be easily imported into VTScada. The primary function of this tooling is to query a set of tags for a specified date range, compress and process those values as required, and move it into a format which can be easily imported into VTScada.
@ -53,6 +53,13 @@ Each directory contains a Historic XXXXXX directory where XXXXX is the Unique ID
These tools will conver the user start time and end time in a way that will only process the found and required HRD files at a time. This can *greatly* expand the amount of data in the system, it is strongly recommended to have a lot of free space left during queries. These tools will conver the user start time and end time in a way that will only process the found and required HRD files at a time. This can *greatly* expand the amount of data in the system, it is strongly recommended to have a lot of free space left during queries.
### Setup ClearSCADA Config ### Setup ClearSCADA Config
Key Point: In the tags list file, the Source Name field is the unique identifier for the tag name to query. In VTScada this can be something like ```temp\old_value1```. In ClearSCADA, it will be the unique point ID, ex. ```005152```. The leading zeroes can be left out as the script will pad them in front of the integer to determine the correct path. Key Point: In the tags list file, the Source Name field is the unique identifier for the tag name to query. In VTScada this can be something like ```temp\old_value1```. In ClearSCADA, it will be the unique point ID, ex. ```005152```. The leading zeroes can be left out as the script will pad them in front of the integer to determine the correct path.
## AVEVA (Wonderware) Historian - InSQL
Prerequisite: Install the Microsoft ODBC Driver, and assign the correct name to the driver in the *setup.toml* file.
Data can be queried from an AVEVA Historian through an ODBC connection. The Historian provides an interface to SQL, InSQL, which can be used to create data queries.

71
main.py
View File

@ -46,33 +46,80 @@ class HistoricalTag:
# AVEVA (Wonderware) Historian Functions # AVEVA (Wonderware) Historian Functions
# ---------------------- # ----------------------
# aveva_query()
# ----------------------
#
def aveva_query(historical_tags: List[HistoricalTag], start_time: datetime, end_time: datetime): def aveva_query(historical_tags: List[HistoricalTag], start_time: datetime, end_time: datetime):
current_start_time = start_time
current_end_time = start_time
tag_mappings = []
dir_path = output_path + str(start_time.year) + "\\"
while current_end_time < end_time:
# Wonderware starts to buckle around 3 month queries, query each timeline 3 months at a time
if current_end_time + timedelta(days=92) > end_time:
current_end_time = end_time
else:
current_end_time = current_end_time + timedelta(days=92)
tag_mappings.append(aveva_query_date(historical_tags, current_start_time, current_end_time))
current_start_time = current_start_time + timedelta(days=92)
write_tagmapping_to_file(dir_path + "TagMapping.csv", tag_mappings)
# aveva_query_date()
# ----------------------
#
def aveva_query_date(historical_tags: List[HistoricalTag], start_time: datetime, end_time: datetime):
print("Querying data for: " + str(start_time.year) + " " + print("Querying data for: " + str(start_time.year) + " " +
str(start_time.month) + " " + str(start_time.day)) str(start_time.month) + " " + str(start_time.day))
dir_path = output_path + str(start_time.year) + "\\" dir_path = output_path + str(start_time.year) + "\\"
create_directory(dir_path) create_directory(dir_path)
ft_start_time = "'" + str(start_time.astimezone(timezone.utc)) + "'" init_string = f'DRIVER={{ODBC Driver 18 for SQL Server}};SERVER={server};' + \
ft_end_time = "'" + str(end_time.astimezone(timezone.utc)) + "'" f'DATABASE={database_name};UID={application_user};PWD={application_pass};' + \
f'ENCRYPT=no'
init_string = "driver={SQLOLEDB}; server=" + server + "; database=" + \
database_name + "; UID=" + application_user + "; PWD=" + application_pass + ";"
print(init_string) print(init_string)
# connection = pyodbc.connect(init_string) connection = pyodbc.connect(init_string)
tag_mappings = []
for tag in historical_tags: for tag in historical_tags:
if tag.tag_type == "real" or tag.tag_type == "integer": if tag.tag_type == "real" or tag.tag_type == "integer":
retrieval_mode = "'Average'" retrieval_mode = '"Average"'
else: else:
retrieval_mode = "'Cyclic'" retrieval_mode = '"Cyclic"'
query = "SELECT * FROM OpenQuery(INSQL, 'SELECT DateTime, '" + tag.name_source + "' FROM WideHistory WHERE DateTime >= " + \ ft_start_time = '"' + str(start_time.astimezone(timezone.utc)) + '"'
ft_start_time + " AND DateTime <= " + ft_end_time + " AND wwRetrievalMode = " + retrieval_mode + \ ft_end_time = '"' + str(end_time.astimezone(timezone.utc)) + '"'
" AND wwResolution = " + str(tag.interval) + "')" ft_tag_str = '[' + str(tag.name_source) + ']'
query = f"SELECT * FROM OpenQuery(INSQL, 'SELECT DateTime, {ft_tag_str} FROM WideHistory WHERE DateTime >=" + \
f"{ft_start_time} AND DateTime <= {ft_end_time} AND wwRetrievalMode = {retrieval_mode}" + \
f"AND wwResolution = {str(tag.interval * 1000)}')"
cursor = connection.cursor()
print(query) print(query)
cursor.execute(query)
raw_values = cursor.fetchall()
values = []
for row in raw_values:
values.append((datetime.timestamp(row[0]), row[1]))
output_file = prepare_file_for_tag(tag, values, dir_path, end_time)
if output_file != "":
tag_mappings.append((output_file, tag.name_dest))
return tag_mappings
# ---------------------- # ----------------------
# ClearSCADA Functions # ClearSCADA Functions
@ -332,7 +379,7 @@ def create_directory(path):
# postprocess_values() # postprocess_values()
# ---------------------- # ----------------------
# Process a list of values assumed and clean up timestamps which are within the interval of the last # Process a list of values assumed and clean up timestamps which are within the interval of the last
# timestamp. Values are assumed to already have been compressed # timestamp
def postprocess_values(values: List[Union[int, float, None]]): def postprocess_values(values: List[Union[int, float, None]]):

View File

@ -8,7 +8,7 @@ output_path = "output\\"
system_timezone = "Canada/Mountain" system_timezone = "Canada/Mountain"
[aveva] [aveva]
server_name = "172.16.1.123" server_name = "HIST002"
database_name = "Runtime" database_name = "Runtime"
[vtscada] [vtscada]