Compare commits

..

No commits in common. "master" and "production" have entirely different histories.

12 changed files with 14 additions and 763 deletions

139
.gitignore vendored
View File

@ -1,139 +0,0 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# reportfiles
*.csv
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
### Terraform stuff
**/.terraform/*
crash.log
*.tfvars

73
Jenkinsfile vendored
View File

@ -1,42 +1,14 @@
//not required right now as CN is reachable from EMEA as well
def loopEnvironments(environments){
print env.JENKINS_URL
environments.each { key, val ->
//Execute only if you are on the same environment
//not required right now as CN is reachable from EMEA as well
if (env.JENKINS_URL == environments."${key}"[3].'jenkins')
{
envname = environments."${key}"[0].'name'
envurl = environments."${key}"[1].'env-url'
tokenname = environments."${key}"[2].'env-token-name'
sh 'python createReport.py "${envname}"'
}
}
}
pipeline {
options {
ansiColor('xterm')
}
//label libraryBuild is available in CN JAWS and ROW JAWS, therefore this one was used; no additional intents
agent {label 'libraryBuild'}
agent{label 'libraryBuild'}
//here comes the trigger according to crontabs - jenkins is in UTC
//here comes the trigger according to crontabs
triggers {
//every 1st of every month at 00:00
//cron('0 0 1 * *')
//every day at 08:00
cron('0 6 * * *')
//every monday at 08:00
//cron('0 8 * * MON')
cron('0 0 1 * *')
}
environment {
//ProxySettings
@ -60,47 +32,18 @@
}
stages {
stage('install required python packages') {
stage('install required packages') {
steps {
sh '''
pip3 install --user -r requirements.txt
pip install -upgrade pip
pip install -r requirements.txt
'''
print env.JENKINS_URL
}
}
stage('Execute Reporting Script') {
steps {
sh 'python3 createReport.py'
//Only required once CN is not reachable from EMEA
//loopEnvironments(environments)
}
}
stage('Send report') {
steps {
script {
try {
emailext subject: env.JOB_NAME,
body: 'Please find the output of your reports attached',
to: 'stephan.oertelt@bmw.de,Hendrik.Schweppe@bmw.de,Klemens.Mang@bmw.de,Alexander.Wiedeck@bmw.de,carolin.brandl@bmw.de',
replyTo: 'coco-apm@bmw.de',
attachmentsPattern: '*.csv'
}
catch ( mailExc ){
echo "Sending Email Failed: ${mailExc}"
}
}
}
}
}
}
post {
always {
cleanWs()
}
}
}
}

135
README.md
View File

@ -1,134 +1 @@
# Dynatrace Reporting Pipeline
This repository is used as a template to create automated Dynatrace reports through Jenkins (JAWS) which are sent as attachement through mail.
***
## Jenkins environments
EMEA & NA: https://jaws.bmwgroup.net/opapm/
CN: https://jaws-china.bmwgroup.net/opmaas/
### Request access
Access is granted manually through stephan.oertelt@bmw.de and Mohammed.Abadel@bmw.de
### Multi-Branch Pipelines
- master (=latest) --> for testing and developing stuff
- staging --> pre-release branch, if stuff tested successfully here merge it to production
- production --> actively used, productive reports
***
## Minimum Content of a Dynatrace Reporting Repo
### Repository & Pipeline Naming
- Repository and Pipelines must have identical names
- Naming must start with CoCo_APM_Reporting_**report name here**
### readme-File
The readme file must contain a useful description **what** is reported **when** to **whom**
Example: <br>
> This report is reporting all installed OneAgent versions including:
> - Hostnames
> - Environment Tag
> - PaaS Tag
>
> Sent to: coco-apm@bmw.de
>
> Executed on each Monday of every week <br>
> Executed on every Dynatrace environment
### requirements.txt
The requirements.txt file must contain **all** python packages which are used within the script through ```import``` <br>
e.g.: <br>
``` python-decouple
pyyaml
pandas
decouple
requests
datetime
argparse
```
### environments.yaml
The environments.yaml contains all environments on which the script should be executed, environments which should not be executed may be excluded through ```#```
**Do NOT change the environments names, the pipeline script is configured to distingue between EMEA/NA and CN as their are different Jenkins environments!**
The following snipped shows an environment file, which is only executed on Dynatrace prod environments.
```
euprod:
- name: "euprod"
- env-url: "https://xxu26128.live.dynatrace.com"
- env-token-name: "EUPROD_TOKEN_VAR"
#eupreprod:
- name: "eupreprod"
- env-url: "https://qqk70169.live.dynatrace.com"
- env-token-name: "EUPREPROD_TOKEN_VAR"
#napreprod:
- name: "napreprod"
- env-url: "https://onb44935.live.dynatrace.com"
- env-token-name: "NAPREPROD_TOKEN_VAR"
naprod:
- name: "naprod"
- env-url: "https://wgv50241.live.dynatrace.com"
- env-token-name: "NAPROD_TOKEN_VAR"
cnprod:
- name: "cnprod"
- env-url: "https://dynatracemgd-cn.bmwgroup.net/e/b921f1b9-c00e-4031-b9d1-f5a0d530757b"
- env-token-name: "CNPROD_TOKEN_VAR"
#cnpreprod:
- name: "cnpreprod"
- env-url: "https://dynatracemgd-cn.bmwgroup.net/e/b921f1b9-c00e-4031-b9d1-f5a0d530757b"
- env-token-name: "CNPREPROD_TOKEN_VAR"
```
### Jenkinsfile
The Jenkinsfile is the pipeline script itself including:
- Time-Trigger
- Install package step
- Execute script step
- Send Mail step
### Python Script
The script itself will gather and aggregate the data from the Dynatrace environments.
Script output must be a csv or excel file in the **folder where the script is executed**
***
## First Usage - Create a new Pipeline
### Fork this repo
Do **NOT** clone this repo, create a fork instead.
1. On the left menue bar click on *Create fork*
![Bitbucket Fork Step 1](assets/bitbucket_fork_1.PNG)
2. Choose Project *Offboard Platform - APM - Application Performance Monitoring*
3. Choose a name starting with *CoCo_APM_Reporting_* and a useful suffix
4. Uncheck *Enable fork syncing*
![Bitbucket Fork Step 2](assets/bitbucket_fork_2.PNG)
5. Edit readme.md and describe your report
6. Change/Update the environment, requirements and script according to your needs
7. Login to Jenkins and select the folder *Coco APM Reporting*
8. Click the *New Item* Button
9. Enter the name of your repo (e.g. *CoCo_APM_Reporting_OneAgentVersion*)
10. Select *Multibranch Pipeline*
11. Click *OK* Button
![Jenkins Create Pipieline Step 1](assets/jenkins_create_pipeline_1.PNG)
12. On The Pipeline Settings go to *Branch Sources*, click *ADD SOURCE* and select *Bitbucket*
- Server: **ATC**
- Credentials: **qqjaws7**
- Owner: **OPAPM**
- Repository Name: **your forked repository**
- Behavious: **According to screenshot**
<br>
![Jenkins Create Pipieline Step 2](assets/jenkins_create_pipeline_2.PNG)
<br>
Your pipeline will automatically test-run for all 3 branches.
init repo

Binary file not shown.

Before

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 141 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 53 KiB

View File

@ -1,188 +0,0 @@
from operator import index
from textwrap import indent
from decouple import config
import yaml
import requests
import json
import pandas as pd
import time
from distutils.version import LooseVersion
from dynatraceAPI import Dynatrace
def make_request(url, headers):
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
except requests.exceptions.HTTPError as errh:
return "An Http Error occurred:" + repr(errh)
except requests.exceptions.ConnectionError as errc:
return "An Error Connecting to the API occurred:" + repr(errc)
except requests.exceptions.Timeout as errt:
return "A Timeout Error occurred:" + repr(errt)
except requests.exceptions.RequestException as err:
return "An Unknown Error occurred" + repr(err)
return response
def GatherAllProblems(DTAPIToken, DTENV,friendlyName):
env = DTENV
DTAPIToken = DTAPIToken
dtClient = Dynatrace(env, DTAPIToken)
params = {"pageSize":500
#"fields":'%2BriskAssessment'
}
secProblems = dtClient.returnPageination(f"/api/v2/securityProblems/1340823583484240022?fields=%2BriskAssessment", params, list_item="securityProblems")
df = pd.DataFrame(secProblems.elements)
df.to_csv(friendlyName + '_Allvulnerabilities.csv')
return secProblems
def GatherRemediationItems(DTAPIToken, DTENV,friendlyName):
tags = ["Environment", "Hub", "PaaS", "Cloud", "Platform","Namespace","compass-id","ms-id","app-id","app-name",
"WK","Wirkkette","app","deployment","service","itsm-service","runtime.connected.bmw/managed-app-name",
"controller-revision-hash","name2","statefulset.kubernetes.io/pod-name","stage","MCID"]
env = DTENV
DTAPIToken = DTAPIToken
df = pd.DataFrame()
dtClient = Dynatrace(env, DTAPIToken)
dfEntities = pd.DataFrame()
dfEntities['id']=""
#secProblems = dtClient.returnPageination(f"/api/v2/securityProblems/1340823583484240022?fields=%2BriskAssessment", params, list_item="securityProblems")
problem = dtClient.returnSingle(f"/api/v2/securityProblems/1340823583484240022?fields=%2BriskAssessment")
print(problem)
APIURL = "/api/v2/securityProblems/" + problem['securityProblemId'] + '/remediationItems'
params = {'remediationItemSelector':'vulnerabilityState("VULNERABLE")'}
remItems = dtClient.returnPageination(APIURL, params, list_item="remediationItems")
for Item in remItems.elements:
row={'CVE':problem['cveIds'],
'Title':problem['title'],
'riskLevel':problem['riskAssessment']['riskLevel'],
'riskScore':'[' + str(problem['riskAssessment']['riskScore']) + ']',
'Displayname':Item['name'],
'State':Item['vulnerabilityState'],
'muteState' : Item['muteState']['muted'],
'exposure':Item['assessment']['exposure'],
'dataAssets':Item['assessment']['dataAssets'],
#'firstAffectedDate':time.strftime('%Y-%m-%d', time.localtime(Item['firstAffectedTimestamp']/1000)),
#'firstAffectedTime':time.strftime('%H:%M:%S', time.localtime(Item['firstAffectedTimestamp']/1000))
}
i = 0
for comp in Item['vulnerableComponents']:
row.update({'vulnerableComponent_' + str(i): comp['displayName']})
if (Item['id'] in dfEntities['id']):
entity = dfEntities.loc[dfEntities['id'] == Item['id']]
entity = entity.drop(['id'])
row.update(entity)
else:
APIURL = '/api/v2/entities?entitySelector=entityId("' + Item['id'] + '")&from=now-1h&fields=+tags'
params = {}
enty = dtClient.returnPageination(APIURL,params=params,list_item="entities")
try:
if (len(enty) > 0):
for entity in enty.elements:
tagrow = {'id': entity['entityId']}
for tag in entity['tags']:
if tag['key'] in tags:
try:
tagrow.update({ tag['key']:tag['value']})
row.update({ tag['key']:tag['value']})
except:
tagrow.update({ tag['key']:tag['key']})
row.update({ tag['key']:tag['key']})
dfEntities = dfEntities.append(tagrow, ignore_index=True)
#print("adding host to cache " + str(entity['entityId']) )
#print("df len " + str(len(dfEntities.index)))
except:
print("no entity seen")
df = df.append(row,ignore_index=True)
df.to_csv(friendlyName + '_vulnerabilities.csv')
def GatherNew(DTAPIToken, DTENV,friendlyName):
secProblems = GatherAllProblems(DTAPIToken, DTENV,friendlyName)
GatherRemediationItems(DTAPIToken, DTENV,friendlyName, secProblems)
return
def GatherReportingInfo(DTAPIToken, DTENV,friendlyName):
env = DTENV
DTAPIToken = DTAPIToken
DTAPIURL= env + "/api/v2/securityProblems/1340823583484240022/remediationItems"
headers = {
'Content-Type': 'application/json',
'Authorization': 'Api-Token ' + DTAPIToken
}
r = make_request(DTAPIURL,headers)
df = pd.DataFrame()
mgmt = pd.DataFrame()
for Item in r.json()['remediationItems']:
row={'Name':Item['name'],'State':Item['vulnerabilityState'],'exposure':Item['assessment']['exposure'],'dataAssets':Item['assessment']['dataAssets'],'firstAffectedTimestamp':time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(Item['firstAffectedTimestamp']/1000))}
if 'resolvedTimestamp' in Item.keys():
row.update({'Resolved':time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(Item['resolvedTimestamp']/1000))})
DTAPIURL= env + "/api/v2/entities/" + Item['id']
r = make_request(DTAPIURL,headers)
for tag in r.json()['tags']:
try:
row.update({ tag['key']:tag['value']})
except:
row.update({ tag['key']:tag['key']})
for vulComp in Item['vulnerableComponents']:
min_version='9.0.0-0'
for PGI in vulComp['affectedEntities']:
DTAPIURL= env + "/api/v2/entities/" + PGI
r = make_request(DTAPIURL,headers)
if 'installerVersion' in r.json()['properties']:
current_version = r.json()['properties']['installerVersion']
print(current_version)
if LooseVersion(current_version) < LooseVersion(min_version):
min_version=current_version
row.update({'vulnerableComponent':vulComp['fileName'],'oldestOneAgent':min_version})
df = df.append(row,ignore_index=True)
df.to_csv(friendlyName + 'log4j_vulnerability.csv')
with open('./environment.yaml') as file:
doc = yaml.safe_load(file)
for item, doc in doc.items():
token = dict(doc[2])
url = dict(doc[1])
print("Crawling through: " + item)
print("Check if token exists in environment...")
if(config(token.get('env-token-name')) != ""):
print("Gather data, hold on a minute")
DTTOKEN = config(token.get('env-token-name'))
DTURL = url.get('env-url')
GatherRemediationItems(DTTOKEN,DTURL,item)
else:
print("token not found, skipping " + item)

View File

@ -1,41 +0,0 @@
import logging
from typing import Dict
import os
import sys
file_dir = os.path.dirname(__file__)
sys.path.append(file_dir)
from httpClient import HttpClient
from pagination import Pagionation
class Dynatrace:
def __init__(
self,
base_url: str,
token: str,
log: logging.Logger = None,
proxies: Dict = None,
too_many_requests_strategy=None,
retries: int = 0,
retry_delay_ms: int = 0,
):
self.__http_client = HttpClient(
base_url, token, log, proxies, too_many_requests_strategy, retries, retry_delay_ms
)
def returnPageination(self,path,params,list_item):
page = Pagionation(self.__http_client,path,params,list_item=list_item)
return page
def returnSingle(self,path):
response = self.__http_client.make_request(path)
json_response = response.json()
return json_response

View File

@ -1,30 +1,24 @@
euprod:
- name: "EUprod"
- name: "euprod"
- env-url: "https://xxu26128.live.dynatrace.com"
- env-token-name: "EUPROD_TOKEN_VAR"
- jenkins: "https://jaws.bmwgroup.net/opapm/"
eupreprod:
- name: "eupreprod"
- env-url: "https://qqk70169.live.dynatrace.com"
- env-token-name: "EUPREPROD_TOKEN_VAR"
- jenkins: "https://jaws.bmwgroup.net/opapm/"
napreprod:
- name: "napreprod"
- env-url: "https://onb44935.live.dynatrace.com"
- env-token-name: "NAPREPROD_TOKEN_VAR"
- jenkins: "https://jaws.bmwgroup.net/opapm/"
naprod:
- name: "naprod"
- env-url: "https://wgv50241.live.dynatrace.com"
- env-token-name: "NAPROD_TOKEN_VAR"
- jenkins: "https://jaws.bmwgroup.net/opapm/"
#cnprod:
cnprod:
- name: "cnprod"
- env-url: "https://dynatracemgd-tsp.bmwgroup.net/e/b921f1b9-c00e-4031-b9d1-f5a0d530757b"
- env-url: "https://dynatracemgd-cn.bmwgroup.net/e/b921f1b9-c00e-4031-b9d1-f5a0d530757b"
- env-token-name: "CNPROD_TOKEN_VAR"
- jenkins: "https://jaws-china.bmwgroup.net/opmaas/"
#cnpreprod:
cnpreprod:
- name: "cnpreprod"
- env-url: "https://dynatracemgd-tsp.bmwgroup.net/e/ab88c03b-b7fc-45f0-9115-9e9ecc0ced35"
- env-url: "https://dynatracemgd-cn.bmwgroup.net/e/b921f1b9-c00e-4031-b9d1-f5a0d530757b"
- env-token-name: "CNPREPROD_TOKEN_VAR"
- jenkins: "https://jaws-china.bmwgroup.net/opmaas/"

View File

@ -1,116 +0,0 @@
import logging
from typing import Dict, Optional, Any
import time
import requests
import urllib3
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
TOO_MANY_REQUESTS_WAIT = "wait"
##Not sure where/why this is here
class DynatraceRetry(Retry):
def get_backoff_time(self):
return self.backoff_factor
class HttpClient:
def __init__(
self,
base_url: str,
token: str,
log: logging.Logger = None,
proxies: Dict = None,
too_many_requests_strategy=None,
retries: int = 0,
retry_delay_ms: int = 0,
#mc_jsession_id: Optional[str] = None,
#mc_b925d32c: Optional[str] = None,
#mc_sso_csrf_cookie: Optional[str] = None,
):
while base_url.endswith("/"):
base_url = base_url[:-1]
self.base_url = base_url
if proxies is None:
proxies = {}
self.proxies = proxies
self.auth_header = {"Authorization": f"Api-Token {token}"}
self.log = log
if self.log is None:
self.log = logging.getLogger(__name__)
self.log.setLevel(logging.WARNING)
st = logging.StreamHandler()
fmt = logging.Formatter("%(asctime)s - %(levelname)s - %(name)s - %(thread)d - %(filename)s:%(lineno)d - %(message)s")
st.setFormatter(fmt)
self.log.addHandler(st)
self.too_many_requests_strategy = too_many_requests_strategy
retry_delay_s = retry_delay_ms / 1000
try:
self.retries = Retry(
total=retries,
backoff_factor=retry_delay_s,
status_forcelist=[400, 401, 403, 404, 413, 429, 500, 502, 503, 504],
allowed_methods=["TRACE", "PUT", "DELETE", "OPTIONS", "HEAD", "GET", "POST"],
raise_on_status=False,
)
except TypeError: # Older version of urllib3?
self.retries = Retry(
total=retries,
backoff_factor=retry_delay_s,
status_forcelist=[400, 401, 403, 404, 413, 429, 500, 502, 503, 504],
method_whitelist=["TRACE", "PUT", "DELETE", "OPTIONS", "HEAD", "GET", "POST"],
raise_on_status=False,
)
# This is for internal dynatrace usage
#self.mc_jsession_id = mc_jsession_id
#self.mc_b925d32c = mc_b925d32c
#self.mc_sso_csrf_cookie = mc_sso_csrf_cookie
def make_request(
self, path: str, params: Optional[Any] = None, headers: Optional[Dict] = None, method="GET", data=None, files=None, query_params=None
) -> requests.Response:
url = f"{self.base_url}{path}"
body = None
if method in ["POST", "PUT"]:
body = params
params = query_params
if headers is None:
headers = {}
if files is None and "content-type" not in [key.lower() for key in headers.keys()]:
headers.update({"content-type": "application/json"})
headers.update(self.auth_header)
cookies = None
#if self.mc_b925d32c and self.mc_sso_csrf_cookie and self.mc_jsession_id:
# headers.update({"Cookie": f"JSESSIONID={self.mc_jsession_id}; ssoCSRFCookie={self.mc_sso_csrf_cookie}; b925d32c={self.mc_b925d32c}"})
# cookies = {"JSESSIONID": self.mc_jsession_id, "ssoCSRFCookie": self.mc_sso_csrf_cookie, "b925d32c": self.mc_b925d32c}
s = requests.Session()
s.mount("https://", HTTPAdapter(max_retries=self.retries))
self.log.debug(f"Making {method} request to '{url}' with params {params} and body: {body}")
r = s.request(method, url, headers=headers, params=params, json=body, verify=False, proxies=self.proxies, data=data, cookies=cookies, files=files)
self.log.debug(f"Received response '{r}'")
while r.status_code == 429 and self.too_many_requests_strategy == TOO_MANY_REQUESTS_WAIT:
sleep_amount = int(r.headers.get("retry-after", 5))
self.log.warning(f"Sleeping for {sleep_amount}s because we have received an HTTP 429")
time.sleep(sleep_amount)
r = requests.request(method, url, headers=headers, params=params, json=body, verify=False, proxies=self.proxies)
if r.status_code >= 400:
raise Exception(f"Error making request to {url}: {r}. Response: {r.text}")
return r

View File

@ -1,69 +0,0 @@
from httpClient import HttpClient
class Pagionation():
def __init__(self, http_client, target_url, target_params=None, headers=None, list_item="result"):
#self.__target_class = target_class
self.__http_client: HttpClient = http_client
self.__target_url = target_url
self.__target_params = target_params
self.__headers = headers
self.__list_item = list_item
self._has_next_page = True
self.__total_count = None
self.__page_size = None
self.elements = self._get_next_page()
def __iter__(self):# -> Iterator[T]:
for element in self.__elements:
yield element
while self._has_next_page:
new_elements = self._get_next_page()
for element in new_elements:
yield element
def __len__(self):
return self.__total_count or len(self.__elements)
def _get_next_page(self):
response = self.__http_client.make_request(self.__target_url, params=self.__target_params, headers=self.__headers)
json_response = response.json()
data = []
if json_response.get("nextPageKey", None):
self._has_next_page = True
self.__target_params = {"nextPageKey": json_response["nextPageKey"]}
else:
self._has_next_page = False
if self.__list_item in json_response:
elements = json_response[self.__list_item]
self.__total_count = json_response.get("totalCount") or len(elements)
while self._has_next_page == True:
self.__target_url = self.__target_url.split("?")[0]
elements += self._get_response()
return elements
def _get_response(self):
response = self.__http_client.make_request(self.__target_url, params=self.__target_params, headers=self.__headers)
json_response = response.json()
data = []
if json_response.get("nextPageKey", None):
self._has_next_page = True
self.__target_params = {"nextPageKey": json_response["nextPageKey"]}
else:
self._has_next_page = False
if self.__list_item in json_response:
elements = json_response[self.__list_item]
self.__total_count = json_response.get("totalCount") or len(elements)
return elements