Compare commits

...

5 Commits

Author SHA1 Message Date
Daniel Mikula 33eff7ba7d added keyrequest parser 2023-05-24 14:58:59 +02:00
Daniel Mikula 1c150a045f added keyrequest parser 2023-05-24 14:58:19 +02:00
Daniel Mikula a757286c03 added keyrequest parser 2023-05-24 14:57:14 +02:00
Daniel Mikula 65e9210f21 added xlsx filter feature 2023-05-24 09:07:29 +02:00
Daniel Mikula bcdb53786b removed trigger in jenkinsfile 2023-05-24 07:17:19 +02:00
3 changed files with 209 additions and 139 deletions

6
Jenkinsfile vendored
View File

@ -38,7 +38,7 @@ def reportRetryCount = 0
//here comes the trigger according to crontabs - jenkins is in UTC
triggers {
//triggers {
//every 1st of every month at 00:00
//cron('0 0 1 * *')
@ -46,11 +46,11 @@ def reportRetryCount = 0
//cron('0 8 * * *')
//every monday at 06:00
cron('0 5 * * 0-7')
//cron('0 5 * * 0-7')
//parameterizedCron('0 10 * * * %PRESELECT=week;SLICES=tdhy')
}
//}
environment {
//ProxySettings

View File

@ -5,19 +5,21 @@ import yaml
import datetime
import time
import pandas as pd
#import requests
#import openpyxl
# import requests
# import openpyxl
import argparse
import warnings
import os
import warnings
import os
import dynatraceAPI
from pagination import Pagionation
warnings.filterwarnings("ignore")
#warning, there are warnings which are ignored!
from KRParser import krparser
os.environ['TZ'] = 'Europe/Berlin' # set new timezone
warnings.filterwarnings("ignore")
# warning, there are warnings which are ignored!
os.environ['TZ'] = 'Europe/Berlin' # set new timezone
time.tzset()
'''
@ -37,21 +39,25 @@ def make_request(url, headers,verify,parameters):
return response
'''
def previous_day_range(date):
start_date = date - datetime.timedelta(days=1)
end_date = date - datetime.timedelta(days=1)
return start_date, end_date
def previous_week_range(date):
start_date = date + datetime.timedelta(-date.weekday(), weeks=-1)
end_date = date + datetime.timedelta(-date.weekday() - 1)
return start_date, end_date
def previous_month_range(date):
end_date = date.replace(day=1) - datetime.timedelta(days=1)
start_date = end_date.replace(day=1)
return start_date, end_date
'''
def getSLO(DTAPIToken, DTENV, fromDate, toDate):
@ -106,99 +112,108 @@ def getSLO(DTAPIToken, DTENV, fromDate, toDate, selector_var, selector_type):
# DTAPIToken = sec token
dtclient = dynatraceAPI.Dynatrace(DTENV, DTAPIToken)
my_params_report = {'pageSize': 25,
'from': int(fromDate),
'to': int(toDate),
'timeFrame': 'GTF',
'evaluate': "true",
# name = exact name, text = like
'sloSelector': f"""{selector_type}("{selector_var}")"""
}
'from': int(fromDate),
'to': int(toDate),
'timeFrame': 'GTF',
'evaluate': "true",
# name = exact name, text = like
'sloSelector': f"""{selector_type}("{selector_var}")"""
}
# gets all slos and filter later
api_url_report = "/api/v2/slo"
pages = dtclient.returnPageination(api_url_report, my_params_report, "slo")
#only_wanted = [x for x in pages.elements if str.lower(selector) in str.lower(x['description'])]
# only_wanted = [x for x in pages.elements if str.lower(selector) in str.lower(x['description'])]
df = pd.DataFrame(pages.elements)
return df
def get_metric(DTAPIToken, DTENV, fromDate, toDate, metricExpression):
print(f"here: {len(metricExpression)}")
print(f"{metricExpression[:-1]}:timeshift(-7d))")
return metricExpression
def get_daily_slice(start_date, end_date):
tempstart = start_date
days = pd.DataFrame()
#Add the first day
# Add the first day
tempend = tempstart + datetime.timedelta(hours=24)
startms = time.mktime(tempstart.timetuple()) * 1000
endms = time.mktime(tempend.timetuple()) * 1000
endms = time.mktime(tempend.timetuple()) * 1000
row = {'Date': tempstart, 'startTime': startms, 'endTime': endms}
days = days.append(row, ignore_index=True)
row = {'Date':tempstart,'startTime':startms, 'endTime':endms}
days = days.append(row,ignore_index=True)
while tempstart < end_date:
tempstart = tempstart + datetime.timedelta(hours=24)
tempend = tempstart + datetime.timedelta(hours=24)
startms = time.mktime(tempstart.timetuple()) * 1000
endms = time.mktime(tempend.timetuple()) * 1000
endms = time.mktime(tempend.timetuple()) * 1000
row = {'Date':tempstart,'startTime':startms, 'endTime':endms}
days = days.append(row,ignore_index=True)
row = {'Date': tempstart, 'startTime': startms, 'endTime': endms}
days = days.append(row, ignore_index=True)
return days
def get_hourly_slice(start_date, end_date):
#date object to datetime
tempstart = datetime.datetime(start_date.year,start_date.month,start_date.day)
#date object to datetime
final_end = datetime.datetime.combine(end_date,datetime.datetime.max.time())
# date object to datetime
tempstart = datetime.datetime(
start_date.year, start_date.month, start_date.day)
# date object to datetime
final_end = datetime.datetime.combine(
end_date, datetime.datetime.max.time())
hours = pd.DataFrame()
#Add the first slice
# Add the first slice
tempend = tempstart + datetime.timedelta(hours=1)
startms = time.mktime(tempstart.timetuple()) * 1000
endms = time.mktime(tempend.timetuple()) * 1000
endms = time.mktime(tempend.timetuple()) * 1000
row = {'Date': tempstart, 'startTime': startms, 'endTime': endms}
hours = hours.append(row, ignore_index=True)
row = {'Date':tempstart,'startTime':startms, 'endTime':endms}
hours = hours.append(row,ignore_index=True)
while tempstart < final_end:
tempstart = tempstart + datetime.timedelta(hours=1)
tempend = tempstart + datetime.timedelta(hours=1)
startms = time.mktime(tempstart.timetuple()) * 1000
endms = time.mktime(tempend.timetuple()) * 1000
endms = time.mktime(tempend.timetuple()) * 1000
row = {'Date':tempstart,'startTime':startms, 'endTime':endms}
hours = hours.append(row,ignore_index=True)
row = {'Date': tempstart, 'startTime': startms, 'endTime': endms}
hours = hours.append(row, ignore_index=True)
return hours
def init_argparse():
parser = argparse.ArgumentParser(
usage="%(prog)s [--fromDate] [toDate] or [preSelect]",
description="gather SLO in daily slices for given Timeframe"
)
parser.add_argument(
"-f","--fromDate",
help = "YYYY-mm-dd e.g. 2022-01-01"
"-f", "--fromDate",
help="YYYY-mm-dd e.g. 2022-01-01"
)
parser.add_argument(
"-t","--toDate",
help = "YYYY-mm-dd e.g. 2022-01-31"
"-t", "--toDate",
help="YYYY-mm-dd e.g. 2022-01-31"
)
parser.add_argument(
"-p","--preSelect",
help = "day | week | month - gathers the data for the last full day, week or month"
"-p", "--preSelect",
help="day | week | month - gathers the data for the last full day, week or month"
)
parser.add_argument(
"-s","--slices",
help = "h | d | t | y - writes the slices hourly, daily, total or year to date into ecxel. given in any order"
"-s", "--slices",
help="h | d | t | y - writes the slices hourly, daily, total or year to date into ecxel. given in any order"
)
return parser
def check_inputs(args):
'''
This functions is the single point of true for arguments. If new arguments are added they need to be added in here. Returns from and to date.
@ -206,36 +221,36 @@ def check_inputs(args):
if args.preSelect and (args.fromDate or args.toDate):
print("--preSelect must not be used in conjuntion with --fromDate and/or --toDate")
sys.exit()
elif args.fromDate and not args.toDate:
print("--fromDate only in conjunction with --toDate")
sys.exit()
elif args.toDate and not args.fromDate:
print("--toDate only in conjunction with --fromDate")
sys.exit()
elif args.toDate and args.fromDate and not args.preSelect:
elif args.toDate and args.fromDate and not args.preSelect:
try:
#fromDate = datetime.date.fromisoformat(args.fromDate)
# fromDate = datetime.date.fromisoformat(args.fromDate)
fromDate = datetime.datetime.strptime(args.fromDate, "%Y-%m-%d")
#toDate = datetime.date.fromisoformat(args.toDate)
# toDate = datetime.date.fromisoformat(args.toDate)
toDate = datetime.datetime.strptime(args.toDate, "%Y-%m-%d")
except Exception as e:
print("Progam closed: " + str(e))
sys.exit()
if toDate < fromDate:
print("--toDate can't be older than --fromDate")
sys.exit()
if toDate > datetime.date.today() or fromDate > datetime.date.today():
print("--toDate or --fromDate can't be in the future")
sys.exit()
elif args.preSelect and not args.fromDate and not args.toDate:
date = datetime.date.today()
if args.preSelect == "week":
@ -243,7 +258,7 @@ def check_inputs(args):
elif args.preSelect == "month":
fromDate, toDate = previous_month_range(date)
elif args.preSelect == "day":
fromDate, toDate = previous_day_range(date)
fromDate, toDate = previous_day_range(date)
else:
print("--preSelect must be week or month")
sys.exit()
@ -253,53 +268,71 @@ def check_inputs(args):
if args.slices == None:
print("-s or --slices must not be null and needs at least one letter of h d t or y, lower- or uppercase.")
sys.exit()
elif sum([1 if one_inp in str.lower(args.slices) else 0 for one_inp in ['h','d','t','y'] ]) == 0:
elif sum([1 if one_inp in str.lower(args.slices) else 0 for one_inp in ['h', 'd', 't', 'y']]) == 0:
print("-s or --slices must has at least one letter of h d t or y, lower- or uppercase.")
sys.exit()
return fromDate, toDate
def get_one_slice(item, DTTOKEN, DTURL, slice, out_df, selector_var, selector_type):
###Calc daily SLO
df = pd.DataFrame()
for index, row in slice.iterrows():
num_probs = len(slice)
percentage = str(round((100*(index+1))/num_probs, 2)).split(".")
print("{:0>4d} von {:0>4d} = {:0>3d}.{:0>2d} %".format(index+1, num_probs, int(percentage[0]), int(percentage[1]) ), end='\r')
temp_df = getSLO(DTTOKEN,DTURL,row['startTime'],row['endTime'], selector_var, selector_type)
temp_df['Date'] = row['Date']
temp_df['HUB'] = item
df = pd.concat([df,temp_df],ignore_index=True)
#sort columns in a try block - if API is returning columns which are non exist, this will not fail the script
try:
df[['description','Touchpoint']] = df['description'].str.split('_',expand=True)
except Exception as e:
print(f"This error was encounterted : {e}")
try:
df = df[['Date', 'HUB', 'id', 'enabled', 'name', 'description', 'Touchpoint', 'evaluatedPercentage', 'errorBudget', 'status', 'error', 'target','warning', 'evaluationType', 'timeframe', 'metricExpression', 'filter']]
except Exception as e:
print("Could not rearrange columns: " + str(e))
out_df = pd.concat([out_df,df],ignore_index=True)
print() # newline to remove \r from progress bar
return out_df
# Calc daily SLO
df = pd.DataFrame()
for index, row in slice.iterrows():
num_probs = len(slice)
percentage = str(round((100*(index+1))/num_probs, 2)).split(".")
print("{:0>4d} von {:0>4d} = {:0>3d}.{:0>2d} %".format(
index+1, num_probs, int(percentage[0]), int(percentage[1])), end='\r')
temp_df = getSLO(
DTTOKEN, DTURL, row['startTime'], row['endTime'], selector_var, selector_type)
temp_df['Date'] = row['Date']
temp_df['HUB'] = item
def get_slice_ytd_total(DTTOKEN,DTURL,item, start_date, end_date, time_name, time_val, out_df, selector_var, selector_type):
df = getSLO(DTTOKEN,DTURL,start_date,end_date, selector_var, selector_type)
df[time_name] = time_val
df['HUB'] = item
# new metric expression
# temp_df["newMetricExpression"] = get_metric(
# DTTOKEN, DTURL, row["startTime"], row["endTime"], temp_df["metricExpression"])
# print(temp_df)
temp_df["newMetricExpression"] = temp_df["metricExpression"].apply(
lambda x: get_metric(DTTOKEN, DTURL, row["startTime"], row["endTime"], x))
df = pd.concat([df, temp_df], ignore_index=True)
# sort columns in a try block - if API is returning columns which are non exist, this will not fail the script
try:
df[['description','Touchpoint']] = df['description'].str.split('_',expand=True)
df[['description', 'Touchpoint']
] = df['description'].str.split('_', expand=True)
except Exception as e:
print(f"This error was encounterted : {e}")
try:
df = df[['Date', 'HUB', 'id', 'enabled', 'name', 'description','Touchpoint', 'evaluatedPercentage', 'errorBudget', 'status', 'error', 'target','warning', 'evaluationType', 'timeframe', 'metricExpression', 'filter']]
df = df[['Date', 'HUB', 'id', 'enabled', 'name', 'description', 'Touchpoint', 'evaluatedPercentage', 'errorBudget',
'status', 'error', 'target', 'warning', 'evaluationType', 'timeframe', 'metricExpression', 'filter']]
except Exception as e:
print("Could not rearrange columns: " + str(e))
out_df = pd.concat([out_df,df],ignore_index=True)
out_df = pd.concat([out_df, df], ignore_index=True)
print() # newline to remove \r from progress bar
return out_df
def get_slice_ytd_total(DTTOKEN, DTURL, item, start_date, end_date, time_name, time_val, out_df, selector_var, selector_type):
df = getSLO(DTTOKEN, DTURL, start_date, end_date,
selector_var, selector_type)
df[time_name] = time_val
df['HUB'] = item
try:
df[['description', 'Touchpoint']
] = df['description'].str.split('_', expand=True)
except Exception as e:
print(f"This error was encounterted : {e}")
try:
df = df[['Date', 'HUB', 'id', 'enabled', 'name', 'description', 'Touchpoint', 'evaluatedPercentage', 'errorBudget',
'status', 'error', 'target', 'warning', 'evaluationType', 'timeframe', 'metricExpression', 'filter']]
except Exception as e:
print("Could not rearrange columns: " + str(e))
out_df = pd.concat([out_df, df], ignore_index=True)
return out_df
def load_slo_parameter(path):
# the first part is to read a yaml and only select latest, valid config
mandatory_fields = ['hub', 'selector_type', 'selector_var', 'yearstart']
@ -314,54 +347,68 @@ def load_slo_parameter(path):
for config_line in configs:
tmp_dict.update(config_line)
if sorted(list(tmp_dict.keys())) == mandatory_fields:
#python 3.7+
#yearstart = datetime.date.fromisoformat(tmp_dict['yearstart'])
#python <3.7
yearstart = datetime.datetime.strptime(tmp_dict['yearstart'], "%Y-%m-%d")
#common code
yearstart = datetime.datetime(yearstart.year, yearstart.month, yearstart.day)
# python 3.7+
# yearstart = datetime.date.fromisoformat(tmp_dict['yearstart'])
# python <3.7
yearstart = datetime.datetime.strptime(
tmp_dict['yearstart'], "%Y-%m-%d")
# common code
yearstart = datetime.datetime(
yearstart.year, yearstart.month, yearstart.day)
yearstart = time.mktime(yearstart.timetuple()) * 1000
selector_type = tmp_dict['selector_type'] # name if exact name is wanted
# name if exact name is wanted
selector_type = tmp_dict['selector_type']
selector_var = tmp_dict['selector_var']
hub = tmp_dict['hub']
all_yaml_configs.append([hub, selector_type, selector_var, yearstart, header_name])
all_yaml_configs.append(
[hub, selector_type, selector_var, yearstart, header_name])
else:
print(f"Slo Configuration {header_name} is broken")
return all_yaml_configs
def write_slo_to_excel(args, fromDate, hourlyall, dailyall, totalall, ytd):
touchpoints = ['Vehicle' , 'Mobile']
touchpoints = ['Vehicle', 'Mobile']
if args.preSelect == 'day':
today = datetime.date.today()
yesterday = today - datetime.timedelta(days = 1)
fileName = "./QM_Report_"+ str(yesterday) +".xlsx"
yesterday = today - datetime.timedelta(days=1)
fileName = "./QM_Report_" + str(yesterday) + ".xlsx"
else:
fileName = "./QM_Report_" + str(fromDate.isocalendar()[1]) + ".xlsx"
writer = pd.ExcelWriter(fileName)
workbook = writer.book
if not totalall.empty and 't' in str.lower(args.slices):
totalall = totalall[totalall['Touchpoint'].isin(touchpoints)]
totalall.to_excel(writer, sheet_name='total')
totalall.to_excel(writer, sheet_name='total', index=False)
worksheet = writer.sheets['total']
worksheet.autofilter(0, 0, totalall.shape[0], totalall.shape[1])
if not dailyall.empty and 'd' in str.lower(args.slices):
dailyall = dailyall[dailyall['Touchpoint'].isin(touchpoints)]
dailyall.to_excel(writer, sheet_name='daily')
dailyall.to_excel(writer, sheet_name='daily', index=False)
worksheet = writer.sheets['daily']
worksheet.autofilter(0, 0, dailyall.shape[0], dailyall.shape[1])
if not hourlyall.empty and 'h' in str.lower(args.slices):
hourlyall = hourlyall[hourlyall['Touchpoint'].isin(touchpoints)]
hourlyall.to_excel(writer, sheet_name='hourly')
hourlyall.to_excel(writer, sheet_name='hourly', index=False)
worksheet = writer.sheets['hourly']
worksheet.autofilter(0, 0, hourlyall.shape[0], hourlyall.shape[1])
if not ytd.empty and 'y' in str.lower(args.slices):
ytd = ytd[ytd['Touchpoint'].isin(touchpoints)]
ytd.to_excel(writer, sheet_name='YTD')
ytd.to_excel(writer, sheet_name='YTD', index=False)
worksheet = writer.sheets['YTD']
worksheet.autofilter(0, 0, ytd.shape[0], ytd.shape[1])
workbook.close()
writer.save()
writer.close()
def main(slo_path):
start_timer = time.time()
@ -372,52 +419,74 @@ def main(slo_path):
print("fromDate: " + str(fromDate))
print("toDate: " + str(toDate))
#days = get_daily_slice(fromDate,toDate)
days = get_daily_slice(fromDate,toDate)
hours = get_hourly_slice(fromDate,toDate)
# days = get_daily_slice(fromDate,toDate)
days = get_daily_slice(fromDate, toDate)
hours = get_hourly_slice(fromDate, toDate)
with open('./environment.yaml') as file:
env_doc = yaml.safe_load(file)
hourlyall = pd.DataFrame()
dailyall = pd.DataFrame()
totalall = pd.DataFrame()
ytd = pd.DataFrame()
ytd = pd.DataFrame()
slo_configs = load_slo_parameter(slo_path)
for one_slo_config in slo_configs:
hub, selector_type, selector_var, yearstart, header_name = one_slo_config
print(f"For the slo config was '{slo_path}' used with the config '{header_name}'.")
print(
f"For the slo config was '{slo_path}' used with the config '{header_name}'.")
for item, doc in env_doc.items():
if not item in hub:
print(f"{item} will be skipped since it is not in {hub}, which was selected in {slo_path}")
print(
f"{item} will be skipped since it is not in {hub}, which was selected in {slo_path}")
continue
token = dict(doc[2])
url = dict(doc[1])
print("Crawling through: " + item)
print("Check if token exists in environment...")
if(config(token.get('env-token-name')) != ""):
if (config(token.get('env-token-name')) != ""):
print("Gather data, hold on a minute")
DTTOKEN = config(token.get('env-token-name'))
DTURL = url.get('env-url')
###Calc daily SLO
# key request parser start
krp = krparser.KRParser(
name=item,
options=krparser.KROption.RESOLVESERVICES,
config={
"threads": 10,
"serviceLookupParams": {"fields": "tags,fromRelationships"},
"extendResultObjects": {"env": item},
},
DTAPIURL=DTURL,
DTAPIToken=DTTOKEN,
)
# TODO: Pass krp down to parse Slos and get metrics via /metrics/query
# Calc daily SLO
if 'd' in str.lower(args.slices):
dailyall = get_one_slice(item, DTTOKEN, DTURL, days, dailyall, selector_var, selector_type)
#Calc hourly SLO
dailyall = get_one_slice(
item, DTTOKEN, DTURL, days, dailyall, selector_var, selector_type)
# Calc hourly SLO
if 'h' in str.lower(args.slices):
hourlyall = get_one_slice(item, DTTOKEN, DTURL, hours, hourlyall, selector_var, selector_type)
###Calc Overall YTD SLO
hourlyall = get_one_slice(
item, DTTOKEN, DTURL, hours, hourlyall, selector_var, selector_type)
# Calc Overall YTD SLO
if 'y' in str.lower(args.slices):
ytd = get_slice_ytd_total(DTTOKEN,DTURL,item, yearstart, days['endTime'].max(), 'Date', fromDate.year, ytd, selector_var, selector_type)
###Calc Overall SLO
ytd = get_slice_ytd_total(DTTOKEN, DTURL, item, yearstart, days['endTime'].max(
), 'Date', fromDate.year, ytd, selector_var, selector_type)
# Calc Overall SLO
if 't' in str.lower(args.slices):
totalall = get_slice_ytd_total(DTTOKEN,DTURL,item, days['startTime'].min(), days['endTime'].max(), 'Date', fromDate.isocalendar()[1], totalall, selector_var, selector_type)
else:
totalall = get_slice_ytd_total(DTTOKEN, DTURL, item, days['startTime'].min(), days['endTime'].max(
), 'Date', fromDate.isocalendar()[1], totalall, selector_var, selector_type)
else:
print("token not found, skipping " + item)
write_slo_to_excel(args, fromDate, hourlyall, dailyall, totalall, ytd)
write_slo_to_excel(args, fromDate, hourlyall,
dailyall, totalall, ytd)
print("It took {} seconds to run this script".format(time.time()-start_timer))
if __name__ == "__main__":
main('./slo_parameter.yaml')
main('./slo_parameter.yaml')

View File

@ -4,4 +4,5 @@ pandas
requests
datetime
argparse
openpyxl
openpyxl
git+https://atc.bmwgroup.net/bitbucket/scm/opapm/keyrequestparser.git