import yaml from decouple import config import json import argparse import requests from datetime import datetime from git import Repo import os #set STAGING global dashboard name DASHBOARD_NAME = "[STAGING]Global Offboard Reliability 2.0" AUTHSTRING = config("BITBUCKET_USERNAME")+":"+config("BITBUCKET_TOKEN") CONFIG_REPO_URL = "https://"+AUTHSTRING+"@atc.bmwgroup.net/bitbucket/scm/opapm/shared_configuration.git" CONFIG_REPO_NAME = "shared_configuration" ARCHIVE_REPO_URL = "https://"+AUTHSTRING+"@atc.bmwgroup.net/bitbucket/scm/opapm/archive.git" ARCHIVE_REPO_NAME = "archive" BUSINESS_LINES = {'DE-3':'My Journey','DE-7':'Connected Vehicle Platform','DE-4':'My Life','EC-DE':'China Services'} parser = argparse.ArgumentParser(description="Generate and deploy the Dynatrace Global Dashboard as Code. Auto deployment works only for STAGING dashboard", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("-R", "--rows", type=int, help="Number of rows per dashboard. If not specified, all rows will be added to single dashboard") parser.add_argument('--auto-upload', default=False, action='store_true', help="Auto upload to STAGING dashboard") parser.add_argument('-D', '--department', type=str,default="ALL", required=False, help="Define department for which the dashboard should be updated: 'DE-3', 'DE-7', 'DE-4' or 'EC-DE'. Leave empty or use 'ALL' if you want to generate 1 cumulated dashboard") parser.add_argument('--wall', default=False, action='store_true', help="By default script is generating desktop version. Use parameter to set dashboard generation to type 'Wall'.") args = parser.parse_args() def clone_repo_if_notexist(repourl, reponame): if(not os.path.isdir(reponame)): repo = Repo.clone_from(repourl, reponame) return repo return Repo(reponame) def pull_repo(repo): origin = repo.remotes.origin origin.pull() def push_repo(repo, message): repo.git.add(all=True) repo.index.commit(message) origin = repo.remotes.origin origin.push() def load_slo_parameter(path): # the first part is to read a yaml and only select latest, valid config with open(path) as file: slo_doc = yaml.safe_load(file) return slo_doc def make_request(url, DTAPIToken,verify, method, jsondata): headers = { 'Content-Type': 'application/json', 'Authorization': 'Api-Token ' + DTAPIToken } try: if method == "get": response = requests.get(url, headers=headers,verify=verify) elif method == "post": response = requests.post(url, headers=headers,verify=verify, data=jsondata) elif method == "put": response = requests.put(url, headers=headers,verify=verify, data=jsondata) elif method == "delete": response = requests.delete(url, headers=headers,verify=verify) response.raise_for_status() except requests.exceptions.HTTPError as errh: return "An Http Error occurred:" + repr(errh) except requests.exceptions.ConnectionError as errc: return "An Error Connecting to the API occurred:" + repr(errc) except requests.exceptions.Timeout as errt: return "A Timeout Error occurred:" + repr(errt) except requests.exceptions.RequestException as err: return "An Unknown Error occurred" + repr(err) return response def get_all_dashboards_withname(DTAPIToken, DTENV,name): DTAPIURL= DTENV + "api/config/v1/dashboards" r = make_request(DTAPIURL,DTAPIToken,True,"get",None) print(r) entityResponse = r.json() result = [] if("dashboards" in entityResponse): for dashboard in entityResponse["dashboards"]: if(dashboard["name"]).startswith(name): result.append(dashboard) result = sorted(result, key=lambda x : x['name'], reverse=False) return result def backup_dashboards(DTAPIToken, DTENV, dashboards): for dashboard in dashboards: DTAPIURL = DTENV + "api/config/v1/dashboards/" + dashboard["id"] r = make_request(DTAPIURL,DTAPIToken,True,"get",None) entityResponse = r.json() print("Downloaded dashboard from Dynatrace: "+entityResponse["dashboardMetadata"]["name"]+", creating backup...") now=datetime.now() strnow = now.strftime("%Y%m%d_%H%M%S") strnowdate = now.strftime("%Y%m%d") if not os.path.isdir("./archive/"+strnowdate): os.makedirs("./archive/"+strnowdate) with open("./archive/"+strnowdate+"/"+entityResponse["dashboardMetadata"]["name"]+"_"+strnow+".json", "w") as file: json.dump(entityResponse, file, indent=2) def remove_dashboards(DTAPIToken, DTENV, dashboards): for dashboard in dashboards: print("Removing STAGING dashboard from Dynatrace: "+dashboard["name"]) DTAPIURL = DTENV + "api/config/v1/dashboards/" + dashboard["id"] print(make_request(DTAPIURL,DTAPIToken,True,"delete",None)) def create_or_update_dashboard(DTAPIToken, DTENV, dashboards, files, businessline): if(files): for index, filename in enumerate(files,start=1): with open('./'+filename) as file: tilesjson = json.load(file) if any(dashboard["name"].endswith("#"+str(index)) for dashboard in dashboards): existingdashboard = next((dashboard for dashboard in dashboards if dashboard["name"].endswith("#"+str(index))), None) if existingdashboard: print("Found dashboard for file: "+filename + ", Name: "+ existingdashboard["name"]) DTAPIURL = DTENV + "api/config/v1/dashboards/" + existingdashboard["id"] r = make_request(DTAPIURL,DTAPIToken,True,"get",None) entityResponse = r.json() entityResponse["tiles"] = tilesjson print("Updating dashboard: "+entityResponse["dashboardMetadata"]["name"]) print(make_request(DTAPIURL,DTAPIToken,True,"put",json.dumps(entityResponse))) dashboards.remove(existingdashboard) else: print("Dashboard for file: "+filename + " not found.") if(args.department == "ALL"): dashfullname = DASHBOARD_NAME else: dashfullname = DASHBOARD_NAME + " - " + businessline + " #" + str(index) newdashboard = { "dashboardMetadata":{ "name": dashfullname, "owner": "PATRYK.GUDALEWICZ@partner.bmw.de" }, "tiles":[] } DTAPIURL = DTENV + "api/config/v1/dashboards" newdashboard["tiles"] = tilesjson print("Creating dashboard: "+newdashboard["dashboardMetadata"]["name"]) creationresult = make_request(DTAPIURL,DTAPIToken,True,"post",json.dumps(newdashboard)) print(creationresult) remove_dashboards(DTAPIToken, DTENV, dashboards) def get_bounds (grid_row, grid_column, tile_columnwidth, tile_rowheight): grid_brick = 38 grid_top = 0 if grid_row == 0 else grid_row * grid_brick grod_left = 0 if grid_column == 0 else grid_column * grid_brick grod_width = 0 if tile_columnwidth == 0 else tile_columnwidth * grid_brick grod_height = 0 if tile_rowheight == 0 else tile_rowheight * grid_brick bounds = { "top": grid_top, "left": grod_left, "width": grod_width, "height": grod_height } return bounds def get_dataExplorerTileSloThreshold(sloThresholdValuesAndColor): value1 = int(str(sloThresholdValuesAndColor).split("|")[0].split("_")[0]) value2 = int(str(sloThresholdValuesAndColor).split("|")[1].split("_")[0]) value3 = int(str(sloThresholdValuesAndColor).split("|")[2].split("_")[0]) color1 = str(sloThresholdValuesAndColor).split("|")[0].split("_")[1] color2 = str(sloThresholdValuesAndColor).split("|")[1].split("_")[1] color3 = str(sloThresholdValuesAndColor).split("|")[2].split("_")[1] dataExplorerTileThreshold = [ { "value": value1, "color": color1 }, { "value": value2, "color": color2 }, { "value": value3, "color": color3 } ] return dataExplorerTileThreshold def get_DataExplorerTile_Markdown(name_short, department, bounds, detailDashboardUrl_EMEA,detailDashboardUrl_NA, detailDashboardUrl_CN, docURL, slourl_EMEA, slourl_NA, slourl_CN,slorelevant, wall ): # dataExplorerTile_Markdown = { # "name": "Markdown", # "tileType": "MARKDOWN", # "configured": "true", # "bounds": bounds, # "tileFilter": {}, # "markdown": "___________\n## " + name_short + "\n\n" + department + " | --> [EMEA](" + detailDashboardUrl_EMEA + ") [NA](" + detailDashboardUrl_NA + ") [CN](" + detailDashboardUrl_CN + ")\n [Documentation](" + docURL + ")" # } #without team links if(not wall): markdown = "___________\n## " + name_short + "\n\n" + department + " [Documentation](" + docURL + ")" else: markdown = "___________\n## " + name_short + "\n\n" + department if(slorelevant): markdown = markdown + " [QM-Report] \n" else: markdown = markdown + " \n" if(slourl_EMEA and not wall): markdown = markdown + "[EMEA]("+slourl_EMEA+") " if(slourl_NA and not wall): markdown = markdown + "[NA]("+slourl_NA+") " if(slourl_CN and not wall): markdown = markdown + "[CN]("+slourl_CN+") " dataExplorerTile_Markdown = { "name": "Markdown", "tileType": "MARKDOWN", "configured": "true", "bounds": bounds, "tileFilter": {}, "markdown": markdown } return dataExplorerTile_Markdown def get_DataExplorerTile_SingleValue(customName, metricSelector, remoteEnvironmentUrl, bounds, timeframe, graphThreshold): dataExplorerTile_SingleValue = { "name": "", "tileType": "DATA_EXPLORER", "configured": "true", "bounds": bounds, "tileFilter": { "timeframe": timeframe }, "remoteEnvironmentUri": remoteEnvironmentUrl, "customName": metricSelector, "queries": [ { "id": "A", "timeAggregation": "DEFAULT", "metricSelector": metricSelector, "foldTransformation": "TOTAL", "enabled": "true" } ], "visualConfig": { "type": "SINGLE_VALUE", "global": { "seriesType": "LINE", "hideLegend": "true" }, "rules": [ { "matcher": "A:", "properties": { "color": "DEFAULT", "seriesType": "LINE", "alias": "SLO" }, "seriesOverrides": [{"name": metricSelector, "color": "#ffffff"}] } ], "axes": { "xAxis": { "visible": "true" }, "yAxes": [] }, "heatmapSettings": {}, "singleValueSettings": { "showTrend": "false", "showSparkLine": "false", "linkTileColorToThreshold": "true" }, "thresholds": [ { "axisTarget": "LEFT", "rules": graphThreshold, "queryId": "", "visible": "true" } ], "tableSettings": { "isThresholdBackgroundAppliedToCell": "false" }, "graphChartSettings": { "connectNulls": "false" } }, "queriesSettings": { "resolution": "" } } return dataExplorerTile_SingleValue def get_DataExplorerTile_Graph(customName, metricSelector, metricName, remoteEnvironmentUrl, bounds, timeframe, axisTargetMin, axisTargetMax, graphThreshold ): dataExplorerTile_Graph = { "name": "", "tileType": "DATA_EXPLORER", "configured": "true", "bounds": bounds, "tileFilter": { "timeframe": timeframe }, "remoteEnvironmentUri": remoteEnvironmentUrl, "customName": metricSelector, "queries": [ { "id": "A", "timeAggregation": "DEFAULT", "metricSelector": metricSelector, "foldTransformation": "TOTAL", "enabled": "true" } ], "visualConfig": { "type": "GRAPH_CHART", "global": { "seriesType": "LINE", "hideLegend": "true" }, "rules": [ { "matcher": "A:", "properties": { "color": "DEFAULT", "seriesType": "LINE", "alias": "SLO" }, "seriesOverrides": [{"name": customName, "color": "#ffffff"}] } ], "axes": { "xAxis": { "visible": "true" }, "yAxes": [{ "displayName": "", "visible": "true", "min": axisTargetMin, "max": axisTargetMax, "position": "LEFT", "queryIds": [ "A" ], "defaultAxis": "true" }] }, "heatmapSettings": {}, "singleValueSettings": { "showTrend": "false", "showSparkLine": "false", "linkTileColorToThreshold": "true" }, "thresholds": [ { "axisTarget": "LEFT", "rules": graphThreshold, "queryId": "", "visible": "true" } ], "tableSettings": { "isThresholdBackgroundAppliedToCell": "false" }, "graphChartSettings": { "connectNulls": "false" } }, "queriesSettings": { "resolution": "" } } return dataExplorerTile_Graph def create_default_tiles(): newDashboardTiles = [] # HEADER TILES # Picture Tile newDashboardTiles.append( { "name": "Image", "tileType": "IMAGE", "configured": "true", "bounds": {"top": 0, "left": 0, "width": 76, "height": 114}, "tileFilter": {}, "image": "data:image/webp;base64,UklGRlYHAABXRUJQVlA4WAoAAAAQAAAAOQAAcAAAQUxQSAQBAAABgGNt2zHn2cDYUzM2O6tyNWVsLCJZAav5+2QWEHZONmBbpTHW95zvpExEoG3bNlS7FdmaIxCVylExPCKo4UqHCvFqXzp8Cgjr6XCpLZbe9x0Q3LdPH339AUL+qKMEKClhje8cb7XATIDUFHQrLIsa9xHLicd5wHLo+js59nnKb3kGAjzDUuogyjplDpJRwSVPv4SD9h1DNC+1PIERokGmYaLmVx7nPo/rkKdXyidDMhoi6uO5LSP+bmcPcaPknsu4OZ0H/5VxH7EcewxrLGt6zLHMAc2fHB9NABQOBWH2PYYtawRKF8Sbz0d0usmNs3dx3s/WJ3SIkyXdOzYqqHFvugWRAFZQOCAsBgAAkBoAnQEqOgBxAD5tMJNHJCKhoSYVXdCADYlpDbwAiVj9T4g+Rr19KlpQ3/9rj/S8IrYT0C+8HEJxzeAh9f5w/Pv9WewZ0rPRuDyMFLG6crgiIWlUyjpmL5HPrpvWBMvukVcE5YNx8ruVhPIdFYzcMcSKDt6WbR2pWVaPGgSC2qlIjC2BEG5M26HdCGzRgIB5zaaZ07nGiRZzz9vXMi/llb00XYLoUdkiCDl3QGQsoImUReZw/BdBhECojaxfssBPjD/e0Byn4skcilQkqjWBGiPtxxnxE2z2ij64eQAA/vk3/iQ3RiQi73vf1jyEzqxtdpC9c6W7f0YHI7IjGBZt8lJwFvqD0/RyKRTUZ4ca6J/cHX43YtfnES4IYCXsiBf7vE+FGn33JO2eppuFhHytXY+aZs2uPZIhEMc5ySwjBTgqO5YxzWiJWr6hciKsoE2DmjaW7ZPSVnPRABOAfnCyEZztvarofuVv87+Sextupq3XeaMmu7H//YGKpaVe/aFeYtQmF10uveMCMvSNBR5QMuewXj+DnANxqkjd00aBzK1cv0C5BR8mfGumw7T2+Yrprn8/SHljjxQPFpYNcJaS3fUH0UiT0VIjf4//6rP5chINAQ3XlqIpAI+js8ruy0zBjhtm1VmnmYY+apBmGU/nhYtgjVjUOm8eqbiRmB7224QhQ+ietF+iSxQOmZj8tpWItmbetKYu5fziovwyp6p3UwZS9bkm0EReYpdBOa71yeXOnryJyfsGm3Gjy1DO5XLYBYpv6/8qn2X3FLf+uH413VN/lTw3Xxq/HkZ8cVY7HNjnnDfqTMgsqhRqgD72PimwBmUVXjqbxnwEEsx/7u094xjgOP7o0NyO1i3Y55hgsBO0O3B0TzfxtARJ+gZ2HL1/P/rhre+AHTZflfJTOpY1tPVKbLwTcChEP+whaFs9njwG85AjgrVOKUWTGG9nIcDxB+vm9v89XxNHM9lZzP3z9rfdjK2AR8DUrCRWYQFaOx86Iu/OoUPVljX/HI+Z93EjoPz+SKT/AfaRtxr2N3OWeOA1X98dFmOhQkrdk5wZ9uedHQGRgHx0Wn1zQuMvV4vcquNcaWAbB8ZQaqiivy/X23br4HyvLDCc9m20dMXRSgyppfst2eCGWjysA+QengG61mxN+qFG8Bu3nRI+JX8Mzcbd5J1rmxydrnVvqO0NHcwF8i/MnRavxxoWfHbcimUOPOF6kPL3GEXV6n+5HNvTIjQmbPMWqlaBguD9P66FUyX3/kWBhPgIK5zh0CvLM4XZHIfjrqFe/AtvswFrtImSr03rjnqwnW2Hi1wvQ/31jUXwjTC0IXDD9xym6rk0FWxJpbdEapIYXqV+a5dSyJ6fTz+da8DYcF6T49O5dOSxB1//jPh6YOF5nqdGp8kJEdzOIvpOAlVY0IlHeRHo1RjWnkf5/AUGU5e+U9mZisn16llZslRbu+7wszY2HNDg8Am1OKUXeqUPrhoZk9XeN7e4IBkz4UxRDlR4AFjdguVyHqFYnRD9kSl+6LqSjNs+o3lQclsjtqAdomU57RrASjAKYfdFeyVDh+ZaWuANksKPrtNXV8ot8BUnu952F0PU7Zq7Vj+jLBw3myGlA6swuz+0bKItmuttXGxhx/Go+wAtOzWFWwjB2SdKNNpL/ovEF5ibZz+5Fzibio4yW8apoq+UkpCcLbkd5abaPjdrjIXelYG8EDHz402pdyJW8vk49MRRFOvKqyGHJVZY1JZON2Y2oosF+xNq96ekOv09l4mW4zQrUCBiq1MHeB6bTk3Ujc6/9W5W3YpjxPrkQyIykHM0imBojob2/II37UldcLDY8MuG3Dci8rbo8TASEZ/6vAYAvtE16IFVn2Yft/fM1p/aBQQh/6Auz+OqKcSZ4GaCPzmOYHsgIieWAsZJ93o6gdU/cl9NTD/m3ZqtX1n/kCwYHEXOAX9KyvqjXqg55cTK4GnWZzFtojcfG0Fd30O9AlVl0yzSwxwxFA128xJmPoCrDV339n/l1jnaYx3ivNfCCZU7qhzUpecgzq4p/VsMQX3Pb8elQ40J68H/2n/2/Nl4O/sRgFXm9Y3Qmj0Bs0wbYY7/3//aU7iSogAAAAA=" } ) if(args.wall): # EMEA HUB newDashboardTiles.append({ "name": "Header" ,"tileType": "MARKDOWN" , "configured": "true" , "bounds": get_bounds(0 , 7 , 20 , 2), "tileFilter": {}, "markdown": "# EMEA" }) newDashboardTiles.append({ "name": "Last 1 h" ,"tileType": "HEADER" , "configured": "true" , "bounds": get_bounds(2 , 7 , 4 , 1), "tileFilter": {} }) newDashboardTiles.append({ "name": "Reliability Graph (3 days)" ,"tileType": "HEADER" , "configured": "true" , "bounds": get_bounds(2 , 11 , 12 , 1), "tileFilter": {} }) newDashboardTiles.append({ "name": "Last 3 days" ,"tileType": "HEADER" , "configured": "true" , "bounds": get_bounds(2 , 23 , 4 , 1), "tileFilter": {} }) # NORTH AMERICA HUB newDashboardTiles.append({ "name": "Header" ,"tileType": "MARKDOWN" , "configured": "true" , "bounds": get_bounds(0 , 28 , 20 , 2), "tileFilter": {}, "markdown": "# NORTH AMERICA" }) newDashboardTiles.append({ "name": "Last 1 h" ,"tileType": "HEADER" , "configured": "true" , "bounds": get_bounds(2 , 28 , 4 , 1), "tileFilter": {} }) newDashboardTiles.append({ "name": "Reliability Graph (3 days)" ,"tileType": "HEADER" , "configured": "true" , "bounds": get_bounds(2 , 32 , 12 , 1), "tileFilter": {} }) newDashboardTiles.append({ "name": "Last 3 days" ,"tileType": "HEADER" , "configured": "true" , "bounds": get_bounds(2 , 44 , 4 , 1), "tileFilter": {} }) # CHINA HUB newDashboardTiles.append({ "name": "Header" ,"tileType": "MARKDOWN" , "configured": "true" , "bounds": get_bounds(0 , 49 , 20 , 2), "tileFilter": {}, "markdown": "# CHINA" }) newDashboardTiles.append({ "name": "Last 1 h" ,"tileType": "HEADER" , "configured": "true" , "bounds": get_bounds(2 , 49 , 4 , 1), "tileFilter": {} }) newDashboardTiles.append({ "name": "Reliability Graph (3 days)" ,"tileType": "HEADER" , "configured": "true" , "bounds": get_bounds(2 , 53 , 12 , 1), "tileFilter": {} }) newDashboardTiles.append({ "name": "Last 3 days" ,"tileType": "HEADER" , "configured": "true" , "bounds": get_bounds(2 , 65 , 4 , 1), "tileFilter": {} }) else: # EMEA HUB newDashboardTiles.append({ "name": "Header" ,"tileType": "MARKDOWN" , "configured": "true" , "bounds": get_bounds(0 , 7 , 14 , 2), "tileFilter": {}, "markdown": "# EMEA" }) newDashboardTiles.append({ "name": "Last 1 h" ,"tileType": "HEADER" , "configured": "true" , "bounds": get_bounds(2 , 7 , 4 , 1), "tileFilter": {} }) newDashboardTiles.append({ "name": "Reliability Graph (3 days)" ,"tileType": "HEADER" , "configured": "true" , "bounds": get_bounds(2 , 11 , 6 , 1), "tileFilter": {} }) newDashboardTiles.append({ "name": "Last 3 days" ,"tileType": "HEADER" , "configured": "true" , "bounds": get_bounds(2 , 17 , 4 , 1), "tileFilter": {} }) # NORTH AMERICA HUB newDashboardTiles.append({ "name": "Header" ,"tileType": "MARKDOWN" , "configured": "true" , "bounds": get_bounds(0 , 22 , 14 , 2), "tileFilter": {}, "markdown": "# NORTH AMERICA" }) newDashboardTiles.append({ "name": "Last 1 h" ,"tileType": "HEADER" , "configured": "true" , "bounds": get_bounds(2 , 22 , 4 , 1), "tileFilter": {} }) newDashboardTiles.append({ "name": "Reliability Graph (3 days)" ,"tileType": "HEADER" , "configured": "true" , "bounds": get_bounds(2 , 26 , 6 , 1), "tileFilter": {} }) newDashboardTiles.append({ "name": "Last 3 days" ,"tileType": "HEADER" , "configured": "true" , "bounds": get_bounds(2 , 32 , 4 , 1), "tileFilter": {} }) # CHINA HUB newDashboardTiles.append({ "name": "Header" ,"tileType": "MARKDOWN" , "configured": "true" , "bounds": get_bounds(0 , 37 , 14 , 2), "tileFilter": {}, "markdown": "# CHINA" }) newDashboardTiles.append({ "name": "Last 1 h" ,"tileType": "HEADER" , "configured": "true" , "bounds": get_bounds(2 , 37 , 4 , 1), "tileFilter": {} }) newDashboardTiles.append({ "name": "Reliability Graph (3 days)" ,"tileType": "HEADER" , "configured": "true" , "bounds": get_bounds(2 , 41 , 6 , 1), "tileFilter": {} }) newDashboardTiles.append({ "name": "Last 3 days" ,"tileType": "HEADER" , "configured": "true" , "bounds": get_bounds(2 , 47 , 4 , 1), "tileFilter": {} }) return newDashboardTiles def main(slo_path): configrepo = clone_repo_if_notexist(CONFIG_REPO_URL, CONFIG_REPO_NAME) pull_repo(configrepo) archiverepo = clone_repo_if_notexist(ARCHIVE_REPO_URL, ARCHIVE_REPO_NAME) pull_repo(archiverepo) print("Generating dashboard tiles...") slo_doc = load_slo_parameter(slo_path) dashboard_json = create_default_tiles() if(args.wall): offsets = [0,21,42] else: offsets = [0,15,30] hub_config = { "euprod": { "offset": offsets[0], "remote_url": 'https://xxu26128.live.dynatrace.com' }, "naprod": { "offset": offsets[1], "remote_url": 'https://wgv50241.live.dynatrace.com' }, "cnprod": { "offset": offsets[2], "remote_url": 'https://dynatrace-cn-int.bmwgroup.com:443/e/b921f1b9-c00e-4031-b9d1-f5a0d530757b' } } timeframe_actual = "-1h" timeframe_graph = "-3d" dahboardcount = 1 rowcount = 0 boundindex = 1 generatedfiles = [] if(args.rows is not None): rowcount = args.rows if(args.department == "ALL"): blname = "ALL" else: blname = BUSINESS_LINES[args.department] blvalue = args.department slorelevant = False if(blname and blvalue): for slo_name, configuration in slo_doc.items(): if configuration['department'].startswith(blvalue) or blvalue == "ALL": if(configuration['selector_var'] == "CoCo-QM-Report_Mobile"): slorelevant = True print("Dashboard #"+str(dahboardcount)+" : Configurint SLO "+str(boundindex) +" of "+str(rowcount)) if rowcount > 0 and boundindex > rowcount: dashboard_json = create_default_tiles() dahboardcount = dahboardcount+1 boundindex = 1 slo_display = configuration["displayname"] slo_department = configuration["department"] #timeframe_ytd = configuration["yearstart"] + " 00:00 to now" timeframe_ytd = "-3d" slo_graphThreshold_SingleValue = get_dataExplorerTileSloThreshold(configuration["thresholds"]["single_value"]) slo_graphThreshold_Graph = get_dataExplorerTileSloThreshold(configuration["thresholds"]["graph_value"]) emeaslourl = "" naslourl = "" cnslourl = "" if len(configuration["hubs"]) > 0: if(configuration["ids"]["emea"]): emeaslourl = hub_config["euprod"]["remote_url"] + "/ui/slo?id="+configuration["ids"]["emea"]+"&sloexp="+configuration["ids"]["emea"]+"&slovis=Table" if(configuration["ids"]["na"]): naslourl = hub_config["naprod"]["remote_url"] + "/ui/slo?id="+configuration["ids"]["na"]+"&sloexp="+configuration["ids"]["na"]+"&slovis=Table" if(configuration["ids"]["cn"]): cnslourl = hub_config["cnprod"]["remote_url"] + "/ui/slo?id="+configuration["ids"]["cn"]+"&sloexp="+configuration["ids"]["cn"]+"&slovis=Table" dashboard_json.append(get_DataExplorerTile_Markdown(slo_display, slo_department, get_bounds(((boundindex)*(3)) , 0 , 7 , 3), configuration["ops_dashboard"]["emea"], configuration["ops_dashboard"]["na"], configuration["ops_dashboard"]["cn"],configuration["doc_url"],emeaslourl,naslourl,cnslourl,slorelevant,args.wall)) for hub,tiles in configuration["hubs"].items(): if 'actual' in tiles["tiles"]: dashboard_json.append(get_DataExplorerTile_SingleValue(slo_name, configuration["metric"], hub_config[hub]["remote_url"], get_bounds(((boundindex)*(3)) , 7 + hub_config[hub]["offset"] , 4 , 3), timeframe_actual, slo_graphThreshold_SingleValue)) if "graph" in tiles["tiles"]: if(args.wall): dashboard_json.append(get_DataExplorerTile_Graph(slo_name, configuration["metric"], configuration["selector_var"].replace("~",""), hub_config[hub]["remote_url"], get_bounds(((boundindex)*(3)) , 11 + hub_config[hub]["offset"] , 12 , 3), timeframe_graph, "97", "102", slo_graphThreshold_Graph)) else: dashboard_json.append(get_DataExplorerTile_Graph(slo_name, configuration["metric"], configuration["selector_var"].replace("~",""), hub_config[hub]["remote_url"], get_bounds(((boundindex)*(3)) , 11 + hub_config[hub]["offset"] , 6 , 3), timeframe_graph, "97", "102", slo_graphThreshold_Graph)) if "ytd" in tiles["tiles"]: if(args.wall): dashboard_json.append(get_DataExplorerTile_SingleValue(slo_name, configuration["metric"], hub_config[hub]["remote_url"], get_bounds(((boundindex)*(3)) , 23 + hub_config[hub]["offset"] , 4 , 3), timeframe_ytd, slo_graphThreshold_SingleValue)) else: dashboard_json.append(get_DataExplorerTile_SingleValue(slo_name, configuration["metric"], hub_config[hub]["remote_url"], get_bounds(((boundindex)*(3)) , 17 + hub_config[hub]["offset"] , 4 , 3), timeframe_ytd, slo_graphThreshold_SingleValue)) boundindex = boundindex+1 if rowcount > 0 and boundindex > rowcount: with open("dashboard_tiles_"+str(dahboardcount)+".json", "w") as file: json.dump(dashboard_json, file, indent=2) generatedfiles.append("dashboard_tiles_"+str(dahboardcount)+".json") if rowcount == 0 or (args.rows is not None and boundindex%args.rows != 0): with open("dashboard_tiles_"+str(dahboardcount)+".json", "w") as file: json.dump(dashboard_json, file, indent=2) generatedfiles.append("dashboard_tiles_"+str(dahboardcount)+".json") if args.auto_upload: print("Getting existing STAGING dashboards from Dynatrace") with open('./environment.yaml') as file: doc = yaml.safe_load(file) for item, doc in doc.items(): token = dict(doc[2]) url = dict(doc[1]) print("Crawling through: " + item) print("Gather data, hold on a minute") DTTOKEN = config(token.get('env-token-name')) DTURL = url.get('env-url') print("Downloading STAGING dashboards to local repo ("+blname+")...") existingdashboards = get_all_dashboards_withname(DTTOKEN, DTURL,DASHBOARD_NAME +blname) print("Uploading STAGING dashboards to Dynatrace ("+blname+")...") backup_dashboards(DTTOKEN, DTURL, existingdashboards) now=datetime.now() strnowdate = now.strftime("%Y%m%d") push_repo(archiverepo, strnowdate+"_Global dashboard as code auto-upload backup") create_or_update_dashboard(DTTOKEN, DTURL, existingdashboards, generatedfiles, blname) else: print("ERROR: Could not find Business line for given department.") if __name__ == "__main__": main('./shared_configuration/slo_parameter.yaml')