Compare commits

...

10 Commits

Author SHA1 Message Date
QXZ3NHY dc52a6e1e9 dynatrace 1M 2023-06-15 08:24:35 +02:00
QXZ3NHY 954b70ef63 dynatrace 1M 2023-06-14 14:11:07 +02:00
QXZ3NHY 33c02a3559 kpi1 change 2023-06-14 13:52:57 +02:00
QXZ3NHY 0f56702ef4 1M dynatrace deprecation 2023-06-14 09:11:54 +02:00
QXZ3NHY d855132217 timeshift fix 2023-06-14 09:09:32 +02:00
QXZ3NHY 4c44f0da16 calc timeframe shift 2023-06-14 09:02:07 +02:00
QXZ3NHY d087ef73db calc timeframe shift 2023-06-14 09:01:34 +02:00
QXZ3NHY d2f1199df4 added comments 2023-06-07 14:50:56 +02:00
QXZ3NHY 286010b5b9 added new environment vars 2023-06-06 15:12:36 +02:00
QXZ3NHY 03a0deeb24 updated createReport.py from main 2023-06-06 10:41:47 +02:00
6 changed files with 1045 additions and 219302 deletions

3
.gitignore vendored
View File

@ -140,4 +140,5 @@ crash.log
# for dev
slo_parameter.yaml
*.json
*.bak
*.bak
reports-dev/

556
createReport.py Normal file
View File

@ -0,0 +1,556 @@
import logging
from tracemalloc import start
from typing import Dict
from decouple import config
import sys
import yaml
import datetime
import time
import pandas as pd
import argparse
import warnings
import os
import dynatraceAPI
from pagination import Pagionation
warnings.filterwarnings("ignore")
# warning, there are warnings which are ignored!
try:
os.environ["TZ"] = "Europe/Berlin" # set new timezone
time.tzset()
except Exception as e:
print(f"This error was encounterted : {e}")
COLUMNS_IN_CSV = ["Date", "HUB", "id", "name", "evaluatedPercentage", "status"]
COLUMNS_IN_XLSX = [
"Date",
"HUB",
"id",
"enabled",
"name",
"description",
"Touchpoint",
"evaluatedPercentage",
"errorBudget",
"status",
"error",
"target",
"warning",
"evaluationType",
"timeframe",
"metricExpression",
"filter",
"type",
]
def previous_day_range(date):
start_date = date - datetime.timedelta(days=1)
end_date = date - datetime.timedelta(days=1)
return start_date, end_date
def previous_week_range(date):
start_date = date + datetime.timedelta(-date.weekday(), weeks=-1)
end_date = date + datetime.timedelta(-date.weekday() - 1)
return start_date, end_date
def previous_month_range(date):
end_date = date.replace(day=1) - datetime.timedelta(days=1)
start_date = end_date.replace(day=1)
return start_date, end_date
def getSLO(
DTAPIToken, DTENV, fromDate, toDate, selector_var, selector_type, header_name
):
# DTENV = base url
# DTAPIToken = sec token
dtclient = dynatraceAPI.Dynatrace(
DTENV, DTAPIToken, logging.Logger("ERROR"), None, None, 0, 2 * 1000
)
my_params_report = {
"pageSize": 25,
"from": int(fromDate),
"to": int(toDate),
"timeFrame": "GTF",
"evaluate": "true",
# name = exact name, text = like
"sloSelector": f"""{selector_type}("{header_name}")"""
# 'sloSelector': f"""name("{header_name}")"""
}
# gets all slos and filter later
api_url_report = "/api/v2/slo"
pages = dtclient.returnPageination(api_url_report, my_params_report, "slo")
# only_wanted = [x for x in pages.elements if str.lower(selector) in str.lower(x['description'])]
df = pd.DataFrame(pages.elements)
return df
def get_daily_slice(start_date, end_date):
tempstart = start_date
days = pd.DataFrame()
# Add the first day
tempend = tempstart + datetime.timedelta(hours=24)
startms = time.mktime(tempstart.timetuple()) * 1000
endms = time.mktime(tempend.timetuple()) * 1000
row = {"Date": tempstart, "startTime": startms, "endTime": endms}
days = pd.concat([days, pd.DataFrame([row])], ignore_index=True)
while tempstart < end_date:
tempstart = tempstart + datetime.timedelta(hours=24)
tempend = tempstart + datetime.timedelta(hours=24)
startms = time.mktime(tempstart.timetuple()) * 1000
endms = time.mktime(tempend.timetuple()) * 1000
row = {"Date": tempstart, "startTime": startms, "endTime": endms}
days = pd.concat([days, pd.DataFrame([row])], ignore_index=True)
return days
def get_hourly_slice(start_date, end_date):
# date object to datetime
tempstart = datetime.datetime(start_date.year, start_date.month, start_date.day)
# date object to datetime
final_end = datetime.datetime.combine(end_date, datetime.datetime.max.time())
hours = pd.DataFrame()
# Add the first slice
tempend = tempstart + datetime.timedelta(hours=1)
startms = time.mktime(tempstart.timetuple()) * 1000
endms = time.mktime(tempend.timetuple()) * 1000
row = {"Date": tempstart, "startTime": startms, "endTime": endms}
hours = pd.concat([hours, pd.DataFrame([row])], ignore_index=True)
while tempstart < final_end:
tempstart = tempstart + datetime.timedelta(hours=1)
tempend = tempstart + datetime.timedelta(hours=1)
startms = time.mktime(tempstart.timetuple()) * 1000
endms = time.mktime(tempend.timetuple()) * 1000
row = {"Date": tempstart, "startTime": startms, "endTime": endms}
hours = pd.concat([hours, pd.DataFrame([row])], ignore_index=True)
return hours
def init_argparse():
parser = argparse.ArgumentParser(
usage="%(prog)s [--fromDate] [toDate] or [preSelect]",
description="gather SLO in daily slices for given Timeframe",
)
parser.add_argument("-f", "--fromDate", help="YYYY-mm-dd e.g. 2022-01-01")
parser.add_argument("-t", "--toDate", help="YYYY-mm-dd e.g. 2022-01-31")
parser.add_argument(
"-p",
"--preSelect",
help="day | week | month - gathers the data for the last full day, week or month",
)
parser.add_argument(
"-s",
"--slices",
help="h | d | t | y - writes the slices hourly, daily, total or year to date into ecxel. given in any order",
)
parser.add_argument(
"-o",
"--output",
help="x | c - creates xlsx (x) and/or CSV (c) file. The CSV file will include a reduced list per sheet",
)
return parser
def check_inputs(args):
"""
This functions is the single point of true for arguments. If new arguments are added they need to be added in here. Returns from and to date.
"""
if args.preSelect and (args.fromDate or args.toDate):
print(
"--preSelect must not be used in conjuntion with --fromDate and/or --toDate"
)
sys.exit()
elif args.fromDate and not args.toDate:
print("--fromDate only in conjunction with --toDate")
sys.exit()
elif args.toDate and not args.fromDate:
print("--toDate only in conjunction with --fromDate")
sys.exit()
elif args.toDate and args.fromDate and not args.preSelect:
try:
# fromDate = datetime.date.fromisoformat(args.fromDate)
fromDate = datetime.datetime.strptime(args.fromDate, "%Y-%m-%d")
# toDate = datetime.date.fromisoformat(args.toDate)
toDate = datetime.datetime.strptime(args.toDate, "%Y-%m-%d")
except Exception as e:
print("Progam closed: " + str(e))
sys.exit()
if toDate < fromDate:
print("--toDate can't be older than --fromDate")
sys.exit()
if toDate > datetime.datetime.today() or fromDate > datetime.datetime.today():
print("--toDate or --fromDate can't be in the future")
sys.exit()
elif args.preSelect and not args.fromDate and not args.toDate:
date = datetime.date.today()
if args.preSelect == "week":
fromDate, toDate = previous_week_range(date)
elif args.preSelect == "month":
fromDate, toDate = previous_month_range(date)
elif args.preSelect == "day":
fromDate, toDate = previous_day_range(date)
else:
print("--preSelect must be day, week or month")
sys.exit()
else:
print("Invalid arguments, please use --help")
sys.exit()
if args.slices == None:
print(
"-s or --slices must not be null and needs at least one letter of h d t or y, lower- or uppercase."
)
sys.exit()
elif (
sum(
[
1 if one_inp in str.lower(args.slices) else 0
for one_inp in ["h", "d", "t", "y"]
]
)
== 0
):
print(
"-s or --slices must has at least one letter of h d t or y, lower- or uppercase."
)
sys.exit()
if not args.output:
args.output = "x"
elif (
sum([1 if one_inp in str.lower(args.output) else 0 for one_inp in ["x", "c"]])
== 0
):
print(
"-o or --output requires at least one letter of x or c, lower- or uppercase."
)
sys.exit()
return fromDate, toDate
def get_one_slice(
item,
DTTOKEN,
DTURL,
slice,
out_df,
selector_var,
selector_type,
header_name,
env_type,
):
# Calc daily SLO
df = pd.DataFrame()
for index, row in slice.iterrows():
num_probs = len(slice)
percentage = str(round((100 * (index + 1)) / num_probs, 2)).split(".")
print(
"{:0>4d} von {:0>4d} = {:0>3d}.{:0>2d} %".format(
index + 1, num_probs, int(percentage[0]), int(percentage[1])
),
end="\r",
)
temp_df = getSLO(
DTTOKEN,
DTURL,
row["startTime"],
row["endTime"],
selector_var,
selector_type,
header_name,
)
temp_df["Date"] = row["Date"]
temp_df["HUB"] = item
temp_df["type"] = env_type
df = pd.concat([df, temp_df], ignore_index=True)
# sort columns in a try block - if API is returning columns which are non exist, this will not fail the script
try:
df[["description", "Touchpoint"]] = df["description"].str.split(
"_", expand=True
)
except Exception as e:
print(f"This error was encounterted : {e}")
out_df = pd.concat([out_df, df], ignore_index=True)
print() # newline to remove \r from progress bar
return out_df
def get_slice_ytd_total(
DTTOKEN,
DTURL,
item,
start_date,
end_date,
time_name,
time_val,
out_df,
selector_var,
selector_type,
header_name,
env_type,
):
df = getSLO(
DTTOKEN, DTURL, start_date, end_date, selector_var, selector_type, header_name
)
df[time_name] = time_val
df["HUB"] = item
df["type"] = env_type
try:
df[["description", "Touchpoint"]] = df["description"].str.split(
"_", expand=True
)
except Exception as e:
print(f"This error was encounterted : {e}")
out_df = pd.concat([out_df, df], ignore_index=True)
return out_df
def load_slo_parameter(path):
# the first part is to read a yaml and only select latest, valid config
mandatory_fields = ["hubs", "selector_type", "selector_var", "yearstart"]
all_yaml_configs = []
with open(path) as file:
slo_doc = yaml.safe_load(file)
for header_name, configs in slo_doc.items():
tmp_dict = {}
if not len(slo_doc[header_name]) == 13:
print(f"Slo Configuration {header_name} is broken")
continue
for key, value in configs.items():
tmp_dict.update({key: value})
if all(
[element in sorted(list(tmp_dict.keys())) for element in mandatory_fields]
):
# python 3.7+
# yearstart = datetime.date.fromisoformat(tmp_dict['yearstart'])
# python <3.7
yearstart = datetime.datetime.strptime(tmp_dict["yearstart"], "%Y-%m-%d")
# common code
yearstart = datetime.datetime(
yearstart.year, yearstart.month, yearstart.day
)
yearstart = time.mktime(yearstart.timetuple()) * 1000
selector_type = tmp_dict["selector_type"] # name if exact name is wanted
selector_var = tmp_dict["selector_var"]
hub = ",".join(
list(
map(
lambda x: x + "-" + tmp_dict["hubs"][x]["type"],
tmp_dict["hubs"].keys(),
)
)
)
all_yaml_configs.append(
[hub, selector_type, selector_var, yearstart, header_name]
)
else:
print(f"Slo Configuration {header_name} is broken")
return all_yaml_configs
def write_slo_to_csv(fileName: str, slice: str, df: pd.DataFrame):
try:
df = df[COLUMNS_IN_CSV]
except Exception as e:
print("Could not rearrange columns: " + str(e))
csvName = "".join([fileName, "_", slice, ".csv"])
df.to_csv(csvName, encoding="utf-8", index=False)
def write_slo_to_excel(writer, sheet: str, df: pd.DataFrame):
try:
df = df[COLUMNS_IN_XLSX]
except Exception as e:
print("Could not rearrange columns: " + str(e))
df.to_excel(writer, sheet_name=sheet)
def create_report_files(args, fromDate, hourlyall, dailyall, totalall, ytd):
touchpoints = ["Vehicle", "Mobile"]
if args.preSelect == "day":
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
fileName = "./QM_Report_" + str(yesterday)
else:
fileName = "./QM_Report_" + str(fromDate.isocalendar()[1])
if "x" in str.lower(args.output):
writer = pd.ExcelWriter(fileName + ".xlsx")
if not totalall.empty and "t" in str.lower(args.slices):
totalall = totalall[totalall["Touchpoint"].isin(touchpoints)]
if "x" in str.lower(args.output):
write_slo_to_excel(writer, "total", totalall)
if "c" in str.lower(args.output):
write_slo_to_csv(fileName, "total", totalall)
if not dailyall.empty and "d" in str.lower(args.slices):
dailyall = dailyall[dailyall["Touchpoint"].isin(touchpoints)]
dailyall["Date"] = (
dailyall["Date"].astype("datetime64[ns]").dt.strftime("%Y-%m-%d")
)
if "x" in str.lower(args.output):
write_slo_to_excel(writer, "daily", dailyall)
if "c" in str.lower(args.output):
write_slo_to_csv(fileName, "daily", dailyall)
if not hourlyall.empty and "h" in str.lower(args.slices):
hourlyall = hourlyall[hourlyall["Touchpoint"].isin(touchpoints)]
hourlyall["Date"] = hourlyall["Date"].astype("datetime64[ns]")
if "x" in str.lower(args.output):
write_slo_to_excel(writer, "hourly", hourlyall)
if "c" in str.lower(args.output):
write_slo_to_csv(fileName, "hourly", hourlyall)
if not ytd.empty and "y" in str.lower(args.slices):
ytd = ytd[ytd["Touchpoint"].isin(touchpoints)]
if "x" in str.lower(args.output):
write_slo_to_excel(writer, "YTD", ytd)
if "c" in str.lower(args.output):
write_slo_to_csv(fileName, "YTD", ytd)
if "x" in str.lower(args.output):
writer.close()
def main(slo_path):
start_timer = time.time()
parser = init_argparse()
args = parser.parse_args()
fromDate, toDate = check_inputs(args)
print("slices", args.slices)
print("fromDate: " + str(fromDate))
print("toDate: " + str(toDate))
# days = get_daily_slice(fromDate,toDate)
days = get_daily_slice(fromDate, toDate)
hours = get_hourly_slice(fromDate, toDate)
with open(os.path.basename("./environment.yaml")) as file:
env_doc = yaml.safe_load(file)
hourlyall = pd.DataFrame()
dailyall = pd.DataFrame()
totalall = pd.DataFrame()
ytd = pd.DataFrame()
slo_configs = load_slo_parameter(slo_path)
for one_slo_config in slo_configs:
hub, selector_type, selector_var, yearstart, header_name = one_slo_config
print(
f"For the slo config was '{slo_path}' used with the config '{header_name}'."
)
for item, doc in env_doc.items():
if not item in hub:
print(
f"{item} will be skipped since it is not in {hub}, which was selected in {slo_path}"
)
continue
token = dict(doc[2])
url = dict(doc[1])
print("Crawling through: " + item)
print("Check if token exists in environment...")
if config(token.get("env-token-name"), default="") != "":
print("Gather data, hold on a minute")
DTTOKEN = config(token.get("env-token-name"), default="")
DTURL = url.get("env-url")
# Calc daily SLO
if "d" in str.lower(args.slices):
dailyall = get_one_slice(
doc[0]["name"],
DTTOKEN,
DTURL,
days,
dailyall,
selector_var,
selector_type,
header_name,
doc[4]["type"],
)
# Calc hourly SLO
if "h" in str.lower(args.slices):
hourlyall = get_one_slice(
doc[0]["name"],
DTTOKEN,
DTURL,
hours,
hourlyall,
selector_var,
selector_type,
header_name,
doc[4]["type"],
)
# Calc Overall YTD SLO
if "y" in str.lower(args.slices):
ytd = get_slice_ytd_total(
DTTOKEN,
DTURL,
doc[0]["name"],
yearstart,
days["endTime"].max(),
"Date",
fromDate.year,
ytd,
selector_var,
selector_type,
header_name,
doc[4]["type"],
)
# Calc Overall SLO
if "t" in str.lower(args.slices):
totalall = get_slice_ytd_total(
DTTOKEN,
DTURL,
doc[0]["name"],
days["startTime"].min(),
days["endTime"].max(),
"Date",
fromDate.isocalendar()[1],
totalall,
selector_var,
selector_type,
header_name,
doc[4]["type"],
)
else:
print("token not found, skipping " + item)
create_report_files(args, fromDate, hourlyall, dailyall, totalall, ytd)
print("\n")
print("It took {} seconds to run this script".format(time.time() - start_timer))
if __name__ == "__main__":
main("../shared_configuration/slo_parameter.yaml")

View File

@ -1,31 +1,43 @@
---
euprod:
euprod-coco:
- name: "euprod"
- env-url: "https://xxu26128.live.dynatrace.com"
- env-token-name: "EUPROD_TOKEN_VAR"
- jenkins: "https://jaws.bmwgroup.net/opapm/"
eupreprod:
- type: "coco"
euprod-gcdm:
- name: "euprod"
- env-url: "https://moh22956.live.dynatrace.com"
- env-token-name: "EUPRODSAAS_TOKEN_VAR"
- jenkins: "https://jaws.bmwgroup.net/opapm/"
- type: "gcdm"
eupreprod-coco:
- name: "eupreprod"
- env-url: "https://qqk70169.live.dynatrace.com"
- env-token-name: "EUPREPROD_TOKEN_VAR"
- jenkins: "https://jaws.bmwgroup.net/opapm/"
naprod:
- type: "coco"
naprod-coco:
- name: "naprod"
- env-url: "https://wgv50241.live.dynatrace.com"
- env-token-name: "NAPROD_TOKEN_VAR"
- jenkins: "https://jaws.bmwgroup.net/opapm/"
napreprod:
- type: "coco"
napreprod-coco:
- name: "napreprod"
- env-url: "https://onb44935.live.dynatrace.com"
- env-token-name: "NAPREPROD_TOKEN_VAR"
- jenkins: "https://jaws.bmwgroup.net/opapm/"
cnprod:
- type: "coco"
cnprod-coco:
- name: "cnprod"
- env-url: "https://dyna-synth-cn.bmwgroup.com.cn/e/b921f1b9-c00e-4031-b9d1-f5a0d530757b"
- env-token-name: "CNPROD_TOKEN_VAR"
- jenkins: "https://jaws-china.bmwgroup.net/opmaas/"
cnpreprod:
- type: "coco"
cnpreprod-coco:
- name: "cnpreprod"
- env-url: "https://dynatracemgd-tsp.bmwgroup.net/e/ab88c03b-b7fc-45f0-9115-9e9ecc0ced35"
- env-token-name: "CNPREPROD_TOKEN_VAR"
- jenkins: "https://jaws-china.bmwgroup.net/opmaas/"
- type: "coco"

View File

@ -1,4 +1,4 @@
import threading
# import threading
import concurrent.futures
import os
import glob
@ -12,17 +12,24 @@ import yaml
from KRParser import krparser, helper
import warnings
from datetime import datetime, timedelta
from openpyxl import Workbook
from openpyxl.utils.dataframe import dataframe_to_rows
# DEBUG
import time
import json
from dateutil.relativedelta import relativedelta
DEBUG = False
warnings.filterwarnings("ignore")
load_dotenv()
REPORT_TYPE = os.environ.get("REPORT_TYPE")
try:
os.environ["TZ"] = "Europe/Berlin" # set new timezone
time.tzset()
except Exception as e:
print(f"This error was encounterted : {e}")
class ReportReader:
"""
@ -47,6 +54,7 @@ class ReportReader:
"""
Gets XLSX file and reads it into pandas dataframes
"""
Helper.console_output("Getting latest QM-Report and ingesting...")
self.qm_report_file = glob.glob(os.path.join(os.getcwd(), "*.xlsx"))[0]
sheet_names = self.get_sheet_names()
for sheet_name in sheet_names:
@ -69,20 +77,37 @@ class ReportReader:
"""
Extracts all the SLO ids and sorts them by hub.
"""
Helper.console_output("Extracting SLO ids...")
for df_sheet_name in self.qm_report_df.keys():
hubs = self.qm_report_df[df_sheet_name]["HUB"].unique()
hubs = self._build_environment_names()
for hub in hubs:
self.qm_report_ids[df_sheet_name][hub] = []
for _, row in self.qm_report_df[df_sheet_name].iterrows():
self.qm_report_ids[df_sheet_name][row["HUB"]].append(row["id"])
self.qm_report_ids[df_sheet_name][f'{row["HUB"]}-{row["type"]}'].append(
row["id"]
)
def _build_environment_names(self) -> typing.List:
environment_names = []
for _, row in self.qm_report_df[self.get_sheet_names()[0]].iterrows():
name = f"{row['HUB']}-{row['type']}"
if name not in environment_names:
environment_names.append(name)
return environment_names
class QmReportWriter:
def __init__(self, report_dfs: pd.DataFrame, kpis: typing.Dict):
def __init__(self, report_dfs: pd.DataFrame, kpis: typing.Dict, filename: str):
self.report_dfs = report_dfs
self.kpis = kpis
self.filename = filename
def run(self):
Helper.console_output("Starting QM-Report writing process...")
self._combine_datasets()
def _combine_datasets(self):
Helper.console_output("Enriching QM-Report with new KPI")
for sheet in self.kpis.keys():
for hub in self.kpis[sheet].keys():
for slo_id in self.kpis[sheet][hub].keys():
@ -91,17 +116,22 @@ class QmReportWriter:
if (
query["result"] != "None"
and len(query["result"]) > 0
and len(query["result"][0]["data"]) > 0
and len(query["result"][0]["data"][0]["values"][0]) > 0
# and len(query["result"][0]["data"]) > 0
# and len(query["result"][0]["data"][0]["values"]) > 0
):
values = query["result"][0]["data"][0]["values"][0]
# values = query["result"][0]["data"][0]["values"][0]
values = query["api_result"]
mask = (
(self.report_dfs[sheet]["HUB"] == hub)
(self.report_dfs[sheet]["HUB"] == hub.split("-")[0])
& (self.report_dfs[sheet]["id"] == slo_id)
& (
self.report_dfs[sheet]["timeframe"]
== query["timeframe"]
)
& (
self.report_dfs[sheet]["type"]
== hub.split("-")[1]
)
)
if (
query["kpi_name"]
@ -112,10 +142,15 @@ class QmReportWriter:
mask, query["kpi_name"]
] = values
self.write_report_to_xlsx()
self._write_report_to_xlsx()
def write_report_to_xlsx(self):
writer = pd.ExcelWriter("test.xlsx", engine="xlsxwriter")
def _write_report_to_xlsx(self):
Helper.console_output("Writing XLSX")
if DEBUG:
filename = "test.xlsx"
else:
filename = self.filename
writer = pd.ExcelWriter(filename, engine="xlsxwriter")
workbook = writer.book
for sheet_name, dataframe in self.report_dfs.items():
@ -129,11 +164,11 @@ class QmReportWriter:
class DynatraceDataGetter:
def __init__(self) -> None:
self.config = {"threads": 3}
self.config = {"threads": 10}
self.environment = self._load_environment()
def run(self, data: typing.Dict):
env_doc = self.environment
# def run(self, data: typing.Dict):
# env_doc = self.environment
def get_data_from_dynatrace(
self, params, environment: str, route: str
@ -159,12 +194,6 @@ class DynatraceDataGetter:
def krparser_get_data_from_dynatrace(
self, params, environment: str, route: str
) -> typing.Dict:
# if type(params) is dict:
# params_string = f"?{self._build_params(params)}"
# elif type(params) is str:
# params_string = f"/{params}"
# if environment == "euprod":
url = self.environment[environment][1]["env-url"]
token = os.environ[self.environment[environment][2]["env-token-name"]]
@ -197,7 +226,41 @@ class KPIGetter:
self.extracted_key_requests = defaultdict(dict)
self.metric_expressions = defaultdict(dict) # sheet -> hub -> sloid
def transform_key_requests(self) -> str:
def run(self):
"""
Entrypoint for the KPI extension.
"""
if DEBUG:
Helper.console_output("Script running debug mode")
Helper.cleanup_debug_files()
report_reader = ReportReader()
report_reader.run()
# Get SLO IDs from first sheet and build metric expression queries.
for i, sheet in enumerate(report_reader.qm_report_ids.keys()):
if i == 0:
for hub in report_reader.qm_report_ids[sheet].keys():
self.get_slos(report_reader.qm_report_ids[sheet][hub], hub)
self.get_kpi_data(report_reader.qm_report_df)
write_report = QmReportWriter(
report_reader.qm_report_df,
self.metric_expressions,
report_reader.qm_report_file,
)
write_report.run()
# DEBUG
if DEBUG:
with open("metricexpressions.json", "w") as f:
f.write(json.dumps(self.metric_expressions, indent=4))
def _transform_key_requests(self):
"""
Transforms the responses from the key request parser into a joined string.
"""
for hub in self.extracted_key_requests.keys():
for slo in self.extracted_key_requests[hub].keys():
if len(self.extracted_key_requests[hub][slo]["services"]) > 0:
@ -208,7 +271,8 @@ class KPIGetter:
"services_transformed"
] = services
else:
print(f"SERVICE: {hub} - {slo} is empty")
if DEBUG:
print(f"SERVICE: {hub} - {slo} is empty")
if len(self.extracted_key_requests[hub][slo]["requests"]):
requests = Helper.transform_and_format_list(
@ -218,49 +282,135 @@ class KPIGetter:
"requests_transformed"
] = requests
else:
# TODO: proper logging
print(f"REQUEST: {hub} - {slo} is empty")
if DEBUG:
print(f"REQUEST: {hub} - {slo} is empty")
def _build_environment_names(self, df: pd.DataFrame) -> typing.List:
"""
Creates new environment list from given QM report dataframe.
Args:
df (pd.DataFrame): Converted QM report xlsx into an dataframe
Returns:
typing.List: List with unique environment names.
"""
environment_names = []
for _, row in df.iterrows():
name = f'{row["HUB"]}-{row["type"]}'
if name not in environment_names:
environment_names.append(name)
return environment_names
def _get_time_scope(self, sheet_name: str) -> str:
if sheet_name == "hourly":
pass
elif sheet_name == "daily":
pass
def get_kpi_data(self, dfs: ReportReader):
# for hub in self.extracted_key_requests.keys():
# for slo in self.extracted_key_requests[hub].keys():
# if "services_transformed" in self.extracted_key_requests[hub][slo]:
"""
Creates queries for dynatrace and adds them into a list for further processing.
Args:
dfs (ReportReader): Takes in the dictionary with the QM reports from the ReportReader class.
"""
for sheet in dfs.keys():
self.metric_expressions[sheet] = defaultdict(dict)
hubs = dfs[sheet]["HUB"].unique()
hubs = self._build_environment_names(dfs[sheet])
for hub in hubs:
self.metric_expressions[sheet][hub] = defaultdict(dict)
for _, row in dfs[sheet].iterrows():
self.metric_expressions[sheet][row["HUB"]][row["id"]] = []
# TODO: another iteration
if (
row["id"]
not in self.metric_expressions[sheet][f'{row["HUB"]}-{row["type"]}']
):
self.metric_expressions[sheet][f'{row["HUB"]}-{row["type"]}'][
row["id"]
] = []
from_timestamp_ms, to_timestamp_ms = Helper.extract_timestamps(
row["timeframe"]
)
timeframe = self._get_timeframe_for_kpi_data(
from_timestamp_ms, to_timestamp_ms
)
# timeframe = self._get_timeframe_for_kpi_data(
# from_timestamp_ms, to_timestamp_ms
# )
timeframe = self._get_timeframe_for_kpi_data()
# get timestamps shifted
(
from_timestamp_ms_shifted,
to_timestamp_ms_shifted,
) = self._calculate_timeshift(
from_timestamp_ms, to_timestamp_ms, timeframe
)
if row["id"] in self.extracted_key_requests[row["HUB"]]:
if (
row["id"]
in self.extracted_key_requests[f'{row["HUB"]}-{row["type"]}']
):
if (
"services_transformed"
in self.extracted_key_requests[row["HUB"]][row["id"]]
in self.extracted_key_requests[f'{row["HUB"]}-{row["type"]}'][
row["id"]
]
):
metric_kpi1 = self._build_kpi_metric_for_query(
"kpi1",
timeframe,
self.extracted_key_requests[row["HUB"]][row["id"]][
"services_transformed"
],
# 1M gets deprecated
if timeframe == "1M":
# KPI 1 :timeshift(in days)
# timeshift(-1M) will be deprecated
kpi1_timeshift = f"{Helper.get_days(from_timestamp_ms, to_timestamp_ms)}d"
metric_kpi1 = self._build_kpi_metric_for_query(
"kpi1",
kpi1_timeshift,
self.extracted_key_requests[
f'{row["HUB"]}-{row["type"]}'
][row["id"]]["services_transformed"],
)
else:
metric_kpi1 = self._build_kpi_metric_for_query(
"kpi1",
timeframe,
self.extracted_key_requests[
f'{row["HUB"]}-{row["type"]}'
][row["id"]]["services_transformed"],
)
self.metric_expressions[sheet][f'{row["HUB"]}-{row["type"]}'][
row["id"]
].append(
# self._template_metric_expression(
# "kpi_1",
# metric_kpi1,
# from_timestamp_ms,
# to_timestamp_ms,
# timeframe,
# row["timeframe"],
# )
{
"kpi_name": "kpi_1",
"metric": metric_kpi1,
"from_date": from_timestamp_ms,
"to_date": to_timestamp_ms,
"resolution": timeframe,
"timeframe": row["timeframe"],
}
)
self.metric_expressions[sheet][row["HUB"]][row["id"]].append(
metric_kpi2 = self._build_kpi_metric_for_query(
"kpi2",
timeframe,
self.extracted_key_requests[f'{row["HUB"]}-{row["type"]}'][
row["id"]
]["services_transformed"],
)
self.metric_expressions[sheet][f'{row["HUB"]}-{row["type"]}'][
row["id"]
].append(
self._template_metric_expression(
"kpi_1",
metric_kpi1,
"kpi_2",
metric_kpi2,
from_timestamp_ms_shifted,
to_timestamp_ms_shifted,
timeframe,
@ -268,48 +418,37 @@ class KPIGetter:
)
)
metric_kpi2 = self._build_kpi_metric_for_query(
"kpi2",
timeframe,
self.extracted_key_requests[row["HUB"]][row["id"]][
"services_transformed"
],
)
self.metric_expressions[sheet][row["HUB"]][row["id"]].append(
self._template_metric_expression(
"kpi_2",
metric_kpi2,
from_timestamp_ms_shifted,
to_timestamp_ms_shifted,
timeframe,
)
)
if (
"requests_transformed"
in self.extracted_key_requests[row["HUB"]][row["id"]]
in self.extracted_key_requests[f'{row["HUB"]}-{row["type"]}'][
row["id"]
]
and "services_transformed"
in self.extracted_key_requests[row["HUB"]][row["id"]]
in self.extracted_key_requests[f'{row["HUB"]}-{row["type"]}'][
row["id"]
]
):
metric_count = self._build_kpi_metric_for_query(
"count",
timeframe,
self.extracted_key_requests[row["HUB"]][row["id"]][
"services_transformed"
],
self.extracted_key_requests[row["HUB"]][row["id"]][
"requests_transformed"
],
self.extracted_key_requests[f'{row["HUB"]}-{row["type"]}'][
row["id"]
]["services_transformed"],
self.extracted_key_requests[f'{row["HUB"]}-{row["type"]}'][
row["id"]
]["requests_transformed"],
)
self.metric_expressions[sheet][row["HUB"]][row["id"]].append(
self.metric_expressions[sheet][f'{row["HUB"]}-{row["type"]}'][
row["id"]
].append(
{
"kpi_name": "count",
"metric": metric_count,
"from_date": from_timestamp_ms,
"to_date": to_timestamp_ms,
"resolution": timeframe,
# "resolution": timeframe,
"resolution": f"{Helper.get_days(from_timestamp_ms, to_timestamp_ms)}d",
"timeframe": row["timeframe"],
}
)
@ -317,26 +456,33 @@ class KPIGetter:
metric_error_count = self._build_kpi_metric_for_query(
"error_count",
timeframe,
self.extracted_key_requests[row["HUB"]][row["id"]][
"services_transformed"
],
self.extracted_key_requests[row["HUB"]][row["id"]][
"requests_transformed"
],
self.extracted_key_requests[f'{row["HUB"]}-{row["type"]}'][
row["id"]
]["services_transformed"],
self.extracted_key_requests[f'{row["HUB"]}-{row["type"]}'][
row["id"]
]["requests_transformed"],
)
self.metric_expressions[sheet][row["HUB"]][row["id"]].append(
self.metric_expressions[sheet][f'{row["HUB"]}-{row["type"]}'][
row["id"]
].append(
{
"kpi_name": "error_count",
"metric": metric_error_count,
"from_date": from_timestamp_ms,
"to_date": to_timestamp_ms,
"resolution": timeframe,
"resolution": f"{Helper.get_days(from_timestamp_ms, to_timestamp_ms)}d",
"timeframe": row["timeframe"],
}
)
self._dispatch_to_dynatrace()
def _dispatch_to_dynatrace(self):
"""
Dispatches all queries to Dynatrace.
"""
Helper.console_output("Fetching data from Dynatrace...")
with concurrent.futures.ThreadPoolExecutor(
self.data_getter.config["threads"]
) as executor:
@ -357,13 +503,6 @@ class KPIGetter:
if "resolution" in query:
params["resolution"] = query["resolution"]
# future = executor.submit(
# self.data_getter.get_data_from_dynatrace,
# params,
# hub,
# "metrics/query",
# )
future = executor.submit(
self.data_getter.krparser_get_data_from_dynatrace,
params,
@ -378,10 +517,12 @@ class KPIGetter:
self._process_dynatrace_data()
def _process_dynatrace_data(self):
"""
Processes the responses from Dynatrace and adds them to a dictionary.
"""
for sheet in self.metric_expressions.keys():
for hub in self.metric_expressions[sheet].keys():
for slo in self.metric_expressions[sheet][hub].keys():
# future = self.metric_expressions[sheet][hub][slo]["data"]
for index, query in enumerate(
self.metric_expressions[sheet][hub][slo]
):
@ -391,19 +532,40 @@ class KPIGetter:
self.metric_expressions[sheet][hub][slo][index][
"result"
] = result["result"]
self.metric_expressions[sheet][hub][slo][index][
"api_result"
] = self._extract_result_from_api(
result["result"],
self.metric_expressions[sheet][hub][slo][index][
"kpi_name"
],
)
else:
self.metric_expressions[sheet][hub][slo][index][
"result"
] = "None"
# DEBUG
del query["data"]
# print(self.metric_expressions[sheet][hub][slo][index]["result"])
# TODO: DEBUG remove
with open("./slo_results.txt", "a") as f:
f.write(f"\n{sheet} -> {hub} -> {slo}:\n")
f.write(json.dumps(result, indent=4))
f.write("\n")
f.write("-" * 80)
# if DEBUG:
# with open("./slo_results.txt", "a") as f:
# f.write(f"\n{sheet} -> {hub} -> {slo}:\n")
# f.write(json.dumps(result, indent=4))
# f.write("\n")
# f.write("-" * 80)
def _extract_result_from_api(
self, result: typing.Dict, result_type: str
) -> typing.Union[int, float, str]:
if result_type == "kpi_2":
result_values = []
for data in result[0]["data"]:
result_values.append(data["values"][0])
return sum(result_values) / len(result_values)
else:
return result[0]["data"][0]["values"][0]
def _template_metric_expression(
self,
@ -414,6 +576,20 @@ class KPIGetter:
resolution: str,
timeframe: str,
) -> typing.Dict:
"""
Template for used for Dynatrace KPI query creation.
Args:
kpi_name (str): KPI name which will be displayed in the QM report
metric_expression (str): The metric selector which will be used to fetch data from Dynatrace
from_timestamp_ms (int): From timestamp in milliseconds
to_timestamp_ms (int): To timestamp in milliseconds
resolution (str): Resolution used for fetching data from Dynatrace
timeframe (str): Timeframe from the original QM report
Returns:
typing.Dict: Returns a dictionary with all the necessary information for futher processing.
"""
element = {
"kpi_name": kpi_name,
"metric": metric_expression,
@ -426,69 +602,116 @@ class KPIGetter:
def _calculate_timeshift(
self, from_timestamp_ms: int, to_timestamp_ms: int, resolution: str
) -> int:
from_ts, to_ts = "", ""
) -> typing.Tuple[int, int]:
"""
Calculates the time shift for KPI 2.
Args:
from_timestamp_ms (int): From timestamp in milliseconds.
to_timestamp_ms (int): To timestamp in milliseconds.
resolution (str): The resolution used in the Dynatrace query.
Returns:
typing.Tuple[int, int]: Returns timestamps in milliseconds
"""
if resolution == "7d":
from_ts = from_timestamp_ms - ((60 * 60 * 24 * 7) * 1000)
to_ts = to_timestamp_ms - ((60 * 60 * 24 * 7) * 1000)
to_ts = to_timestamp_ms
return from_ts, to_ts
if resolution == "1w":
from_date, end_date = Helper.previous_week_range(
datetime.fromtimestamp(to_timestamp_ms / 1000), -2
# from_date, end_date = Helper.previous_week_range(
# datetime.fromtimestamp(to_timestamp_ms / 1000), -2
# )
# from_ts = Helper.convert_datetime_to_timestamp(from_date, "ms")
# to_ts = Helper.convert_datetime_to_timestamp(end_date, "ms")
from_ts = from_timestamp_ms - ((60 * 60 * 24 * 7) * 1000)
to_ts = to_timestamp_ms
return from_ts, to_ts
if resolution == "1M":
from_date, _ = Helper.previous_month_range(
datetime.fromtimestamp(from_timestamp_ms / 1000), 1
)
from_ts = Helper.convert_datetime_to_timestamp(from_date, "ms")
to_ts = Helper.convert_datetime_to_timestamp(end_date, "ms")
if resolution == "1M":
# TODO: not done yet
from_ts = from_timestamp_ms
# to_ts = Helper.convert_datetime_to_timestamp(to_timestamp_ms, "ms")
to_ts = to_timestamp_ms
return from_ts, to_ts
return from_ts, to_ts
# def _get_timeframe_for_kpi_data(
# self, from_timestamp: int, to_timestamp: int
# ) -> typing.Union[str, bool]:
# """
# Returns the timeframe for KPI data
# def _calculate_1w_timeshift(self, timestamp_ms: int) -> typing.Tuple[int, int]:
# pass
# Args:
# from_timestamp (int): From timestamp in milliseconds
# to_timestamp (int): To timestamp in milliseconds
# def _calculate_1M_timeshift(self, timestamp_ms: int) -> typing.Tuple[int, int]:
# pass
# Returns:
# typing.Union[str, bool]: Returns the timeframe as string. If option not valid, it returns False.
# """
def _get_timeframe_for_kpi_data(
self, from_timestamp: int, to_timestamp: int
) -> str:
days = Helper.get_days(from_timestamp, to_timestamp)
# days = Helper.get_days(from_timestamp, to_timestamp)
timeframe = ""
# if days == 1:
# return "7d"
# elif days == 7:
# return "1w"
# elif days >= 28 and days < 32:
# return "1M"
# else:
# return False
if days == 1:
timeframe = "7d"
if days == 7:
timeframe = "1w"
if days >= 28 and days < 32:
timeframe = "1M"
return timeframe
def _get_timeframe_for_kpi_data(self) -> str:
if REPORT_TYPE == "day":
return "7d"
if REPORT_TYPE == "week":
return "1w"
if REPORT_TYPE == "month":
return "1M"
def _build_kpi_metric_for_query(
self, kpi_type: str, timeframe: str, service: str = None, request: str = None
) -> typing.Union[str, bool]:
# if switches are available (python3.10?) use switches
# TODO: make nicer
kpi = ""
"""
Returns formatted query string
Args:
kpi_type (str): KPI option.
timeframe (str): Timeframe as string.
service (str, optional): String with services from the KRParser. Defaults to None.
request (str, optional): String with requests from the KRParser. Defaults to None.
Returns:
typing.Union[str, bool]: Returns formatted string for quering Dynatrace. If option not available, it returns False.
"""
if kpi_type == "kpi1":
kpi = f'100-(builtin:service.keyRequest.count.total:filter(and(or(in("dt.entity.service_method",entitySelector("type(service_method), fromRelationship.isServiceMethodOfService( type(~"SERVICE~"),entityName.in({service}))"))))):splitBy()/builtin:service.keyRequest.count.total:filter(and(or(in("dt.entity.service_method",entitySelector("type(service_method), fromRelationship.isServiceMethodOfService( type(~"SERVICE~"),entityName.in({service}))"))))):splitBy():timeshift(-{timeframe}))'
return f'100*(builtin:service.keyRequest.count.total:filter(and(or(in("dt.entity.service_method",entitySelector("type(service_method), fromRelationship.isServiceMethodOfService( type(~"SERVICE~"),entityName.in({service}))"))))):lastReal:splitBy()/builtin:service.keyRequest.count.total:filter(and(or(in("dt.entity.service_method",entitySelector("type(service_method), fromRelationship.isServiceMethodOfService( type(~"SERVICE~"),entityName.in({service}))"))))):lastReal:splitBy():timeshift(-{timeframe}))'
elif kpi_type == "kpi2":
kpi = f'100*((builtin:service.requestCount.server:filter(and(or(in("dt.entity.service",entitySelector("type(service),entityName.in({service})"))))):value:rate(h):lastReal())/(builtin:service.requestCount.server:filter(and(or(in("dt.entity.service",entitySelector("type(service),entityName.in({service})"))))):value:rate(h):fold(avg)))'
timeframe_split = [letter for letter in timeframe]
return f'100*((builtin:service.requestCount.server:filter(and(or(in("dt.entity.service",entitySelector("type(service),entityName.in({service})"))))):value:rate({timeframe_split[1]}):lastReal())/(builtin:service.requestCount.server:filter(and(or(in("dt.entity.service",entitySelector("type(service),entityName.in({service})"))))):value:rate({timeframe_split[1]}):fold(avg)))'
elif kpi_type == "count":
kpi = f'(builtin:service.keyRequest.count.total:filter(and(or(in("dt.entity.service_method",entitySelector("type(service_method), fromRelationship.isServiceMethodOfService( type(~"SERVICE~"),entityName.in( {service} ) ) ,entityName.in( {request} )"))))):splitBy())'
return f'(builtin:service.keyRequest.count.total:filter(and(or(in("dt.entity.service_method",entitySelector("type(service_method), fromRelationship.isServiceMethodOfService( type(~"SERVICE~"),entityName.in( {service} ) ) ,entityName.in( {request} )"))))):lastReal:splitBy())'
elif kpi_type == "error_count":
kpi = f'(builtin:service.keyRequest.errors.fivexx.count:filter(and(or(in("dt.entity.service_method",entitySelector("type(service_method), fromRelationship.isServiceMethodOfService( type(~"SERVICE~"),entityName.in( {service} ) ) ,entityName.in( {request} )"))))):splitBy())'
return f'(builtin:service.keyRequest.errors.fivexx.count:filter(and(or(in("dt.entity.service_method",entitySelector("type(service_method), fromRelationship.isServiceMethodOfService( type(~"SERVICE~"),entityName.in( {service} ) ) ,entityName.in( {request} )"))))):lastReal:splitBy())'
else:
kpi = False
return kpi
return False
def _extract_key_requests(
self, slo_ids_df: pd.DataFrame, env: str, DTURL: str, DTTOKEN: str
):
"""
Extracts key requests using the KRParser.
Args:
slo_ids_df (pd.DataFrame): Dataframe containing SLO Ids.
env (str): The environment used for quering.
DTURL (str): Dynatrace URL.
DTTOKEN (str): Dynatrace token.
"""
Helper.console_output("Extracting Key Requests...")
krp = krparser.KRParser(
name=env,
options=krparser.KROption.RESOLVESERVICES,
@ -526,9 +749,16 @@ class KPIGetter:
"services"
].append(service["displayName"])
self.transform_key_requests()
self._transform_key_requests()
def get_slos(self, slo_ids: list, hub: str):
"""
Ingests a list of SLO Ids and prepares a pandas dataframe for KRParser ingestion.
Args:
slo_ids (list): List of SLO Ids.
hub (str): The hub/environment.
"""
slo_responses = []
for slo_id in slo_ids:
response = self.data_getter.get_data_from_dynatrace(slo_id, hub, "slo")
@ -548,17 +778,45 @@ class KPIGetter:
class Helper:
@staticmethod
def transform_and_format_list(data: list) -> str:
"""
Joins a list to a string.
Args:
data (list): List with data for joining.
Returns:
str: Joined string.
"""
joined_string = ", ".join(data)
string = ", ".join([f'~"{s}~"' for s in joined_string.split(", ")])
return string
@staticmethod
def extract_timestamps(timestamp: str) -> typing.Tuple[int, int]:
"""
Extracts the timestamps from the "timeframe" column in the QM report.
Args:
timestamp (str): "timeframe" column value.
Returns:
typing.Tuple[int, int]: Returns processed "timeframe" value as integers.
"""
ts = timestamp.split(" to ")
return int(ts[0]), int(ts[1])
@staticmethod
def get_days(from_timestamp: int, to_timestamp: int) -> int:
"""
Calculates days between two timestamps.
Args:
from_timestamp (int): From timestamp in milliseconds.
to_timestamp (int): To timestamp in milliseconds.
Returns:
int: Returns the days between two timestamps.
"""
from_date = datetime.fromtimestamp(from_timestamp / 1000)
to_timestamp = datetime.fromtimestamp(to_timestamp / 1000)
duration = to_timestamp - from_date
@ -571,20 +829,55 @@ class Helper:
return start_date, end_date
@staticmethod
def previous_week_range(date, weeks: int):
def previous_week_range(
date: int, weeks: int
) -> typing.Tuple[datetime.date, datetime.date]:
"""
Gets previous week from current timestamp.
Args:
date (_type_): Date as timestamp in seconds.
int (_type_): Weeks to go back.
Returns:
typing.Tuple[datetime.date, datetime.date]: Returns start and end date.
"""
start_date = date + timedelta(-date.weekday(), weeks=weeks) # -1
# end_date = date + timedelta(-date.weekday() - 1)
end_date = date + timedelta(-date.weekday(), weeks=weeks + 1)
return start_date, end_date
@staticmethod
def previous_month_range(date):
end_date = date.replace(day=1) - datetime.timedelta(days=1)
def previous_month_range(date, shift: int):
shifted_date = date - relativedelta(months=shift)
end_date = shifted_date.replace(day=1) - timedelta(days=1)
start_date = end_date.replace(day=1)
return start_date, end_date
@staticmethod
def get_previous_month_days(timestamp_ms: int):
date = datetime.fromtimestamp(timestamp_ms / 1000).date()
end_date = date.replace(day=1) - timedelta(days=1)
start_date = end_date.replace(day=1)
days = Helper.get_days(
Helper.convert_datetime_to_timestamp(start_date) * 1000,
Helper.convert_datetime_to_timestamp(end_date) * 1000,
)
return days + 1
@staticmethod
def convert_datetime_to_timestamp(date: datetime.date, option: str = None) -> int:
"""
Converts datetime object to timestamp.
Returns by default timestamp in seconds.
Args:
date (datetime.date): Datetime object to convert.
option (str, optional): If set to "ms", returns timestamp as milliseconds. Defaults to None.
Returns:
int: _description_
"""
date_datetime = datetime.combine(date, datetime.min.time())
epoch = datetime(1970, 1, 1)
date_timestamp = (date_datetime - epoch).total_seconds()
@ -592,35 +885,47 @@ class Helper:
date_timestamp = date_timestamp * 1000
return int(date_timestamp)
@staticmethod
def console_output(text: str, indent=False):
"""
Helper function for uniform console output, when debugging is enabled.
Args:
text (str): _description_
indent (bool, optional): _description_. Defaults to False.
"""
if DEBUG:
if indent:
print(f"{' '*10}{text}")
else:
print(text)
print("-" * 80)
@staticmethod
def cleanup_debug_files():
"""
Cleans up files created in debugging mode.
"""
Helper.console_output("Cleaning up debug files")
files = ["./metricexpressions.json", "./slo_results.txt", "./test.xlsx"]
for file in files:
if os.path.exists(file):
os.remove(file)
Helper.console_output(f"{file.replace('./', '')} removed.", indent=True)
else:
Helper.console_output(
f"{file.replace('./', '')} not found. Nothing removed.", indent=True
)
Helper.console_output("=" * 80)
def main():
"""
Entrypoint.
"""
kpi_getter = KPIGetter()
report_reader = ReportReader()
report_reader.run()
# print(report_reader.qm_report_df)
# Get SLO IDs from first sheet and build metric expression queries.
for i, sheet in enumerate(report_reader.qm_report_ids.keys()):
if i == 0:
for hub in report_reader.qm_report_ids[sheet].keys():
kpi_getter.get_slos(report_reader.qm_report_ids[sheet][hub], hub)
kpi_getter.get_kpi_data(report_reader.qm_report_df)
kpi_getter._dispatch_to_dynatrace()
write_report = QmReportWriter(
report_reader.qm_report_df, kpi_getter.metric_expressions
)
write_report._combine_datasets()
# DEBUG
with open("metricexpressions.json", "w") as f:
f.write(json.dumps(kpi_getter.metric_expressions["daily"], indent=4))
kpi_getter.run()
if __name__ == "__main__":

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff