Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Host status test #216

Merged
merged 16 commits into from
Mar 11, 2019
11 changes: 8 additions & 3 deletions usmqe/api/grafanaapi/grafanaapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,13 +68,18 @@ def get_panel(self, panel_title, row_title, dashboard, panel_type=None):
if panel_type:
found_panels = [
panel for panel in panels
if "title" in panel and
panel["title"] == panel_title and panel["type"] == panel_type]
if ("title" in panel and
panel["title"] == panel_title
or "displayName" in panel and
panel["displayName"] == panel_title)
and panel["type"] == panel_type]
else:
found_panels = [
panel for panel in panels
if "title" in panel and
panel["title"] == panel_title]
panel["title"] == panel_title
or "displayName" in panel and
panel["displayName"] == panel_title]
assert len(found_panels) == 1
return found_panels[0]

Expand Down
24 changes: 15 additions & 9 deletions usmqe/api/graphiteapi/graphiteapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,8 @@ def compare_data_mean(
until_date=None,
divergence=10,
sample_rate=1,
operation='sum'):
operation='sum',
issue=None):
"""
Compare expected result with sum of means from graphite data from given targets.

Expand All @@ -67,6 +68,7 @@ def compare_data_mean(
when there are more targets available:
sum - summation of data
diff - subtraction of data
issue (str): known issue, log WAIVE
"""
graphite_data_mean_all = 0
if from_date and not isinstance(from_date, int):
Expand All @@ -78,7 +80,7 @@ def compare_data_mean(
graphite_data = self.get_datapoints(
target, from_date=from_date, until_date=until_date)
# drop empty data points
graphite_data = [x for x in graphite_data if x[0]]
graphite_data = [x for x in graphite_data if x[0] is not None]
# process data from graphite
graphite_data_mean = sum(
[x[0] for x in graphite_data]) / max(
Expand All @@ -91,7 +93,8 @@ def compare_data_mean(
(len(graphite_data) == expected_number_of_datapoints) or
(len(graphite_data) == expected_number_of_datapoints - 1),
"Number of samples of used data should be {}, is {}.".format(
expected_number_of_datapoints, len(graphite_data)))
expected_number_of_datapoints, len(graphite_data)),
issue=issue)
LOGGER.debug("mean of data from `{}` in Graphite: {}".format(
target, graphite_data_mean))
if operation == 'sum' or idx == 0:
Expand All @@ -106,11 +109,14 @@ def compare_data_mean(
LOGGER.info("used operation: {}".format(operation))
minimal_expected_result = expected_result - divergence
maximal_expected_result = expected_result + divergence
msg = "Data mean should be {}, data mean in Graphite is: {}, ".format(
expected_result,
graphite_data_mean_all)
msg = "Data mean for target {}, "\
"should be {}, data mean in Graphite is: {}, ".format(
target,
expected_result,
graphite_data_mean_all)
msg += "applicable divergence is {}".format(divergence)
pytest.check(
minimal_expected_result <
graphite_data_mean_all < maximal_expected_result,
msg)
minimal_expected_result <=
graphite_data_mean_all <= maximal_expected_result,
msg,
issue=issue)
71 changes: 71 additions & 0 deletions usmqe_tests/api/grafana/test_cluster_dashboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
"""

import pytest
import time
from usmqe.api.grafanaapi import grafanaapi
from usmqe.api.graphiteapi import graphiteapi
from usmqe.gluster.gluster import GlusterCommon
Expand Down Expand Up @@ -211,3 +212,73 @@ def test_hosts_panel_status(ansible_playbook, cluster_reuse):
g_down == len(real_down),
"Number of hosts that are down in graphite ({}) should be {}".format(
g_down, len(real_down)))


@pytest.mark.testready
@pytest.mark.ansible_playbook_setup("test_setup.tendrl_services_stopped_on_nodes.yml")
@pytest.mark.ansible_playbook_setup('test_setup.graphite_access.yml')
@pytest.mark.ansible_playbook_teardown("test_teardown.tendrl_services_stopped_on_nodes.yml")
@pytest.mark.author("[email protected]")
def test_hosts(ansible_playbook, workload_stop_nodes, cluster_reuse):
"""
Check that Grafana panel *Hosts* is showing correct values.
"""
if cluster_reuse["short_name"]:
cluster_identifier = cluster_reuse["short_name"]
else:
cluster_identifier = cluster_reuse["integration_id"]

grafana = grafanaapi.GrafanaApi()
graphite = graphiteapi.GraphiteApi()

hosts_panel = grafana.get_panel(
"Hosts",
row_title="At-a-glance",
dashboard="cluster-dashboard")

"""
:step:
Send **GET** request to ``GRAPHITE/render?target=[target]&format=json``
where [target] is part of uri obtained from previous GRAFANA call.
There should be target for statuses of a hosts.
Compare number of hosts from Graphite with value retrieved from
``workload_stop_nodes`` fixture.
:result:
JSON structure containing data related to hosts status is similar
to values set by ``workload_stop_nodes`` fixture in given time.
"""
# get graphite target pointing at data containing numbers of hosts
targets = grafana.get_panel_chart_targets(hosts_panel, cluster_identifier)
targets_used = (targets[0][0], targets[1][0], targets[2][0])
targets_expected = ('nodes_count.total', 'nodes_count.up', 'nodes_count.down')
for idx, target in enumerate(targets_used):
pytest.check(
target.endswith(targets_expected[idx]),
"There is used target that ends with `{}`".format(
targets_expected[idx]))
# make sure that all data in graphite are saved
time.sleep(3)
# check value *Total* of hosts
graphite.compare_data_mean(
workload_stop_nodes["result"],
(targets_used[0],),
workload_stop_nodes["start"],
workload_stop_nodes["end"],
divergence=1,
issue="https://bugzilla.redhat.com/show_bug.cgi?id=1687333")
# check value *Up* of hosts
graphite.compare_data_mean(
0.0,
(targets_used[1],),
workload_stop_nodes["start"],
workload_stop_nodes["end"],
divergence=1,
issue="https://bugzilla.redhat.com/show_bug.cgi?id=1687333")
# check value *Down* of hosts
graphite.compare_data_mean(
workload_stop_nodes["result"],
(targets_used[2],),
workload_stop_nodes["start"],
workload_stop_nodes["end"],
divergence=1,
issue="https://bugzilla.redhat.com/show_bug.cgi?id=1687333")
25 changes: 25 additions & 0 deletions usmqe_tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,8 @@ def measure_operation(
if additional_time > 0:
time.sleep(additional_time)
end_time = datetime.datetime.now()
LOGGER.info("Wait 10 seconds for graphite to load dataframes")
time.sleep(10)
return {
"start": start_time,
"end": end_time,
Expand Down Expand Up @@ -501,6 +503,29 @@ def fill_memory():
return measure_operation(fill_memory)


@pytest.fixture
def workload_stop_nodes():
"""
Test ran with this fixture have to use fixture `ansible_playbook`
and markers before this fixture is called:

@pytest.mark.ansible_playbook_setup("test_setup.stop_tendrl_nodes.yml")
@pytest.mark.ansible_playbook_teardown("test_teardown.stop_tendrl_nodes.yml")

Returns:
dict: contains information about `start` and `stop` time of wait
procedure and as `result` is used number of nodes.
"""
LOGGER.info("Wait for tendrl to notice that nodes are down")
time.sleep(280)

def wait():
LOGGER.info("Measure time when tendrl notices that nodes are down.")
time.sleep(120)
return len(CONF.inventory.get_groups_dict()["gluster_servers"])
return measure_operation(wait)


@pytest.fixture()
def gluster_volume(request):
"""
Expand Down