pcs/add-dtos-and-converting-functions-for-resources-stat.patch

3142 lines
111 KiB
Diff

From 713ede0d903f4a66a17ba30b627af921c3dbbb45 Mon Sep 17 00:00:00 2001
From: Peter Romancik <promanci@redhat.com>
Date: Fri, 2 Feb 2024 10:51:27 +0100
Subject: [PATCH] add dtos and converting functions for resources status
---
pcs/Makefile.am | 2 +
pcs/cli/common/lib_wrapper.py | 1 +
pcs/common/const.py | 23 +
pcs/common/reports/codes.py | 22 +
pcs/common/reports/messages.py | 214 ++
pcs/common/status_dto.py | 91 +
pcs/lib/commands/status.py | 13 +
pcs/lib/pacemaker/status.py | 509 +++++
pcs_test/Makefile.am | 2 +
pcs_test/resources/crm_mon.all_resources.xml | 40 +
.../tier0/common/reports/test_messages.py | 142 ++
pcs_test/tier0/lib/commands/test_status.py | 139 ++
pcs_test/tier0/lib/pacemaker/test_status.py | 1741 +++++++++++++++++
13 files changed, 2939 insertions(+)
create mode 100644 pcs/common/status_dto.py
create mode 100644 pcs/lib/pacemaker/status.py
create mode 100644 pcs_test/resources/crm_mon.all_resources.xml
create mode 100644 pcs_test/tier0/lib/pacemaker/test_status.py
diff --git a/pcs/Makefile.am b/pcs/Makefile.am
index ce10b49e..88ee8b7f 100644
--- a/pcs/Makefile.am
+++ b/pcs/Makefile.am
@@ -179,6 +179,7 @@ EXTRA_DIST = \
common/tools.py \
common/types.py \
common/validate.py \
+ common/status_dto.py \
config.py \
constraint.py \
daemon/app/common.py \
@@ -363,6 +364,7 @@ EXTRA_DIST = \
lib/pacemaker/live.py \
lib/pacemaker/simulate.py \
lib/pacemaker/state.py \
+ lib/pacemaker/status.py \
lib/pacemaker/values.py \
lib/permissions/__init__.py \
lib/permissions/checker.py \
diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
index 447cf9d8..2fd5b1b6 100644
--- a/pcs/cli/common/lib_wrapper.py
+++ b/pcs/cli/common/lib_wrapper.py
@@ -448,6 +448,7 @@ def load_module(env, middleware_factory, name):
"full_cluster_status_plaintext": (
status.full_cluster_status_plaintext
),
+ "resources_status": status.resources_status,
},
)
diff --git a/pcs/common/const.py b/pcs/common/const.py
index 311f5171..32175677 100644
--- a/pcs/common/const.py
+++ b/pcs/common/const.py
@@ -3,6 +3,7 @@ from typing import NewType
from pcs.common.tools import Version
PcmkRoleType = NewType("PcmkRoleType", str)
+PcmkStatusRoleType = NewType("PcmkStatusRoleType", str)
PcmkOnFailAction = NewType("PcmkOnFailAction", str)
PcmkAction = NewType("PcmkAction", str)
@@ -13,6 +14,14 @@ PCMK_ROLE_PROMOTED = PcmkRoleType("Promoted")
PCMK_ROLE_UNPROMOTED = PcmkRoleType("Unpromoted")
PCMK_ROLE_PROMOTED_LEGACY = PcmkRoleType("Master")
PCMK_ROLE_UNPROMOTED_LEGACY = PcmkRoleType("Slave")
+PCMK_ROLE_UNKNOWN = PcmkRoleType("Unknown")
+PCMK_STATUS_ROLE_STARTED = PcmkStatusRoleType("Started")
+PCMK_STATUS_ROLE_STOPPED = PcmkStatusRoleType("Stopped")
+PCMK_STATUS_ROLE_PROMOTED = PcmkStatusRoleType("Promoted")
+PCMK_STATUS_ROLE_UNPROMOTED = PcmkStatusRoleType("Unpromoted")
+PCMK_STATUS_ROLE_STARTING = PcmkStatusRoleType("Starting")
+PCMK_STATUS_ROLE_STOPPING = PcmkStatusRoleType("Stopping")
+PCMK_STATUS_ROLE_UNKNOWN = PcmkStatusRoleType("Unknown")
PCMK_ON_FAIL_ACTION_IGNORE = PcmkOnFailAction("ignore")
PCMK_ON_FAIL_ACTION_BLOCK = PcmkOnFailAction("block")
PCMK_ON_FAIL_ACTION_DEMOTE = PcmkOnFailAction("demote")
@@ -29,6 +38,20 @@ PCMK_ROLES_RUNNING = (
(PCMK_ROLE_STARTED,) + PCMK_ROLES_PROMOTED + PCMK_ROLES_UNPROMOTED
)
PCMK_ROLES = (PCMK_ROLE_STOPPED,) + PCMK_ROLES_RUNNING
+PCMK_STATUS_ROLES_RUNNING = (
+ PCMK_STATUS_ROLE_STARTED,
+ PCMK_STATUS_ROLE_PROMOTED,
+ PCMK_STATUS_ROLE_UNPROMOTED,
+)
+PCMK_STATUS_ROLES_PENDING = (
+ PCMK_STATUS_ROLE_STARTING,
+ PCMK_STATUS_ROLE_STOPPING,
+)
+PCMK_STATUS_ROLES = (
+ PCMK_STATUS_ROLES_RUNNING
+ + PCMK_STATUS_ROLES_PENDING
+ + (PCMK_STATUS_ROLE_STOPPED,)
+)
PCMK_ACTION_START = PcmkAction("start")
PCMK_ACTION_STOP = PcmkAction("stop")
PCMK_ACTION_PROMOTE = PcmkAction("promote")
diff --git a/pcs/common/reports/codes.py b/pcs/common/reports/codes.py
index 188295f2..417a3f4a 100644
--- a/pcs/common/reports/codes.py
+++ b/pcs/common/reports/codes.py
@@ -156,6 +156,28 @@ CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES = M(
CLUSTER_SETUP_SUCCESS = M("CLUSTER_SETUP_SUCCESS")
CLUSTER_START_STARTED = M("CLUSTER_START_STARTED")
CLUSTER_START_SUCCESS = M("CLUSTER_START_SUCCESS")
+CLUSTER_STATUS_BUNDLE_DIFFERENT_REPLICAS = M(
+ "CLUSTER_STATUS_BUNDLE_DIFFERENT_REPLICAS"
+)
+CLUSTER_STATUS_BUNDLE_MEMBER_ID_AS_IMPLICIT = M(
+ "CLUSTER_STATUS_BUNDLE_MEMBER_ID_AS_IMPLICIT"
+)
+CLUSTER_STATUS_BUNDLE_REPLICA_INVALID_COUNT = M(
+ "CLUSTER_STATUS_BUNDLE_REPLICA_INVALID_COUNT"
+)
+CLUSTER_STATUS_BUNDLE_REPLICA_MISSING_REMOTE = M(
+ "CLUSTER_STATUS_BUNDLE_REPLICA_MISSING_REMOTE"
+)
+CLUSTER_STATUS_BUNDLE_REPLICA_NO_CONTAINER = M(
+ "CLUSTER_STATUS_BUNDLE_REPLICA_NO_CONTAINER"
+)
+CLUSTER_STATUS_CLONE_MEMBERS_DIFFERENT_IDS = M(
+ "CLUSTER_STATUS_CLONE_MEMBERS_DIFFERENT_IDS"
+)
+CLUSTER_STATUS_CLONE_MIXED_MEMBERS = M("CLUSTER_STATUS_CLONE_MIXED_MEMBERS")
+CLUSTER_STATUS_EMPTY_NODE_NAME = M("CLUSTER_STATUS_EMPTY_NODE_NAME")
+CLUSTER_STATUS_UNEXPECTED_MEMBER = M("CLUSTER_STATUS_UNEXPECTED_MEMBER")
+CLUSTER_STATUS_UNKNOWN_PCMK_ROLE = M("CLUSTER_STATUS_UNKNOWN_PCMK_ROLE")
CLUSTER_UUID_ALREADY_SET = M("CLUSTER_UUID_ALREADY_SET")
CLUSTER_WILL_BE_DESTROYED = M("CLUSTER_WILL_BE_DESTROYED")
COMMAND_INVALID_PAYLOAD = M("COMMAND_INVALID_PAYLOAD")
diff --git a/pcs/common/reports/messages.py b/pcs/common/reports/messages.py
index e37cdf7c..1e98711c 100644
--- a/pcs/common/reports/messages.py
+++ b/pcs/common/reports/messages.py
@@ -3276,6 +3276,220 @@ class BadClusterStateFormat(ReportItemMessage):
return "cannot load cluster status, xml does not conform to the schema"
+@dataclass(frozen=True)
+class ClusterStatusUnknownPcmkRole(ReportItemMessage):
+ """
+ Value of pcmk role in the status xml is not valid
+
+ role -- value of the role attribute
+ resource_id -- id of the resource
+ """
+
+ role: Optional[str]
+ resource_id: str
+ _code = codes.CLUSTER_STATUS_UNKNOWN_PCMK_ROLE
+
+ @property
+ def message(self) -> str:
+ return (
+ "Attribute of resource with id '{id}' "
+ "contains {invalid} pcmk role{role}."
+ ).format(
+ id=self.resource_id,
+ invalid="empty" if not self.role else "invalid",
+ role=f" '{self.role}'" if self.role else "",
+ )
+
+
+@dataclass(frozen=True)
+class ClusterStatusEmptyNodeName(ReportItemMessage):
+ """
+ Resource in the status xml contains node with empty name
+
+ resource_id -- id of the resource
+ """
+
+ resource_id: str
+ _code = codes.CLUSTER_STATUS_EMPTY_NODE_NAME
+
+ @property
+ def message(self) -> str:
+ return (
+ f"Resource with id '{self.resource_id}' contains node "
+ "with empty name."
+ )
+
+
+@dataclass(frozen=True)
+class ClusterStatusUnexpectedMember(ReportItemMessage):
+ """
+ Unexpected resource type is present in present as child element
+ in another resource type
+
+ resource_id -- id of the outer resource
+ resource_type -- type of the outer resource
+ member_id -- id of the unexpected member
+ expected_type -- valid types for members
+ """
+
+ resource_id: str
+ resource_type: str
+ member_id: str
+ expected_types: list[str]
+ _code = codes.CLUSTER_STATUS_UNEXPECTED_MEMBER
+
+ @property
+ def message(self) -> str:
+ return (
+ f"Unexpected resource '{self.member_id}' inside of resource "
+ f"'{self.resource_id}' of type '{self.resource_type}'. "
+ f"Only resources of type {format_list(self.expected_types, '|')} "
+ f"can be in {self.resource_type}."
+ )
+
+
+@dataclass(frozen=True)
+class ClusterStatusCloneMixedMembers(ReportItemMessage):
+ """
+ Members of multiple types are present in a clone in the status xml
+
+ member_id -- id of the unexpected member
+ clone_id -- id of the clone
+ """
+
+ clone_id: str
+ _code = codes.CLUSTER_STATUS_CLONE_MIXED_MEMBERS
+
+ @property
+ def message(self) -> str:
+ return f"Primitive and group members mixed in clone '{self.clone_id}'."
+
+
+@dataclass(frozen=True)
+class ClusterStatusCloneMembersDifferentIds(ReportItemMessage):
+ """
+ Clone instances in crm_mon status xml have different ids
+
+ clone_id -- id of the clone
+ """
+
+ clone_id: str
+ _code = codes.CLUSTER_STATUS_CLONE_MEMBERS_DIFFERENT_IDS
+
+ @property
+ def message(self) -> str:
+ return f"Members with different ids in clone '{self.clone_id}'."
+
+
+@dataclass(frozen=True)
+class ClusterStatusBundleReplicaNoContainer(ReportItemMessage):
+ """
+ Bundle replica is missing implicit container resource in the status xml
+
+ bundle_id -- id of the bundle
+ replica_id -- id of the replica
+ """
+
+ bundle_id: str
+ replica_id: str
+ _code = codes.CLUSTER_STATUS_BUNDLE_REPLICA_NO_CONTAINER
+
+ @property
+ def message(self) -> str:
+ return (
+ f"Replica '{self.replica_id}' of bundle '{self.bundle_id}' "
+ "is missing implicit container resource."
+ )
+
+
+@dataclass(frozen=True)
+class ClusterStatusBundleReplicaMissingRemote(ReportItemMessage):
+ """
+ Bundle replica is missing implicit pacemaker remote resource
+ in the status xml
+
+ bundle_id -- id of the bundle
+ replica_id -- id of the replica
+ """
+
+ bundle_id: str
+ replica_id: str
+ _code = codes.CLUSTER_STATUS_BUNDLE_REPLICA_MISSING_REMOTE
+
+ @property
+ def message(self) -> str:
+ return (
+ f"Replica '{self.replica_id}' of bundle '{self.bundle_id}' is "
+ "missing implicit pacemaker remote resource while it must be "
+ "present."
+ )
+
+
+@dataclass(frozen=True)
+class ClusterStatusBundleReplicaInvalidCount(ReportItemMessage):
+ """
+ Bundle replica is has invalid number of members in the status xml
+
+ bundle_id -- id of the bundle
+ replica_id -- id of the replica
+ """
+
+ bundle_id: str
+ replica_id: str
+ _code = codes.CLUSTER_STATUS_BUNDLE_REPLICA_INVALID_COUNT
+
+ @property
+ def message(self) -> str:
+ return (
+ f"Replica '{self.replica_id}' of bundle '{self.bundle_id}' has "
+ f"invalid number of members. Expecting 2-4 members."
+ )
+
+
+@dataclass(frozen=True)
+class ClusterStatusBundleMemberIdAsImplicit(ReportItemMessage):
+ """
+ Member of bundle in cluster status xml has the same id as one of
+ the implicit resources
+
+ bundle_id -- id of the bundle
+ member_id -- id if the bundle member
+ """
+
+ bundle_id: str
+ bad_ids: list[str]
+ _code = codes.CLUSTER_STATUS_BUNDLE_MEMBER_ID_AS_IMPLICIT
+
+ @property
+ def message(self) -> str:
+ return (
+ "Skipping bundle '{bundle_id}': {resource_word} "
+ "{bad_ids} {has} the same id as some of the "
+ "implicit bundle resources."
+ ).format(
+ bundle_id=self.bundle_id,
+ resource_word=format_plural(self.bad_ids, "resource"),
+ bad_ids=format_list(self.bad_ids),
+ has=format_plural(self.bad_ids, "has"),
+ )
+
+
+@dataclass(frozen=True)
+class ClusterStatusBundleDifferentReplicas(ReportItemMessage):
+ """
+ Replicas of bundle are different in the cluster status xml
+
+ bundle_id -- id of the bundle
+ """
+
+ bundle_id: str
+ _code = codes.CLUSTER_STATUS_BUNDLE_DIFFERENT_REPLICAS
+
+ @property
+ def message(self) -> str:
+ return f"Replicas of bundle '{self.bundle_id}' are not the same."
+
+
@dataclass(frozen=True)
class WaitForIdleStarted(ReportItemMessage):
"""
diff --git a/pcs/common/status_dto.py b/pcs/common/status_dto.py
new file mode 100644
index 00000000..dcc94eca
--- /dev/null
+++ b/pcs/common/status_dto.py
@@ -0,0 +1,91 @@
+from dataclasses import dataclass
+from typing import (
+ Optional,
+ Sequence,
+ Union,
+)
+
+from pcs.common.const import (
+ PcmkRoleType,
+ PcmkStatusRoleType,
+)
+from pcs.common.interface.dto import DataTransferObject
+
+
+@dataclass(frozen=True)
+class PrimitiveStatusDto(DataTransferObject):
+ # pylint: disable=too-many-instance-attributes
+ resource_id: str
+ resource_agent: str
+ role: PcmkStatusRoleType
+ target_role: Optional[PcmkRoleType]
+ active: bool
+ orphaned: bool
+ blocked: bool
+ maintenance: bool
+ description: Optional[str]
+ failed: bool
+ managed: bool
+ failure_ignored: bool
+ node_names: list[str]
+ pending: Optional[str]
+ locked_to: Optional[str]
+
+
+@dataclass(frozen=True)
+class GroupStatusDto(DataTransferObject):
+ resource_id: str
+ maintenance: bool
+ description: Optional[str]
+ managed: bool
+ disabled: bool
+ members: Sequence[PrimitiveStatusDto]
+
+
+@dataclass(frozen=True)
+class CloneStatusDto(DataTransferObject):
+ # pylint: disable=too-many-instance-attributes
+ resource_id: str
+ multi_state: bool
+ unique: bool
+ maintenance: bool
+ description: Optional[str]
+ managed: bool
+ disabled: bool
+ failed: bool
+ failure_ignored: bool
+ target_role: Optional[PcmkRoleType]
+ instances: Union[Sequence[PrimitiveStatusDto], Sequence[GroupStatusDto]]
+
+
+@dataclass(frozen=True)
+class BundleReplicaStatusDto(DataTransferObject):
+ replica_id: str
+ member: Optional[PrimitiveStatusDto]
+ remote: Optional[PrimitiveStatusDto]
+ container: PrimitiveStatusDto
+ ip_address: Optional[PrimitiveStatusDto]
+
+
+@dataclass(frozen=True)
+class BundleStatusDto(DataTransferObject):
+ # pylint: disable=too-many-instance-attributes
+ resource_id: str
+ type: str
+ image: str
+ unique: bool
+ maintenance: bool
+ description: Optional[str]
+ managed: bool
+ failed: bool
+ replicas: Sequence[BundleReplicaStatusDto]
+
+
+AnyResourceStatusDto = Union[
+ PrimitiveStatusDto, GroupStatusDto, CloneStatusDto, BundleStatusDto
+]
+
+
+@dataclass(frozen=True)
+class ResourcesStatusDto(DataTransferObject):
+ resources: Sequence[AnyResourceStatusDto]
diff --git a/pcs/lib/commands/status.py b/pcs/lib/commands/status.py
index ec7848d1..8b644ac1 100644
--- a/pcs/lib/commands/status.py
+++ b/pcs/lib/commands/status.py
@@ -17,6 +17,7 @@ from pcs.common.node_communicator import Communicator
from pcs.common.reports import ReportProcessor
from pcs.common.reports.item import ReportItem
from pcs.common.services.interfaces import ServiceManagerInterface
+from pcs.common.status_dto import ResourcesStatusDto
from pcs.common.str_tools import (
format_list,
indent,
@@ -48,6 +49,7 @@ from pcs.lib.pacemaker.live import (
get_cluster_status_xml_raw,
get_ticket_status_text,
)
+from pcs.lib.pacemaker.status import status_xml_to_dto
from pcs.lib.resource_agent.const import STONITH_ACTION_REPLACED_BY
from pcs.lib.sbd import get_sbd_service_name
@@ -69,6 +71,17 @@ def pacemaker_status_xml(env: LibraryEnvironment) -> str:
raise LibraryError(output=stdout)
+def resources_status(env: LibraryEnvironment) -> ResourcesStatusDto:
+ """
+ Return pacemaker status of configured resources as DTO
+
+ env -- LibraryEnvironment
+ """
+ status_xml = env.get_cluster_state()
+
+ return status_xml_to_dto(env.report_processor, status_xml)
+
+
def full_cluster_status_plaintext(
env: LibraryEnvironment,
hide_inactive_resources: bool = False,
diff --git a/pcs/lib/pacemaker/status.py b/pcs/lib/pacemaker/status.py
new file mode 100644
index 00000000..722ce03f
--- /dev/null
+++ b/pcs/lib/pacemaker/status.py
@@ -0,0 +1,509 @@
+from typing import (
+ Optional,
+ Sequence,
+ Union,
+ cast,
+)
+
+from lxml.etree import _Element
+
+from pcs.common import reports
+from pcs.common.const import (
+ PCMK_ROLE_UNKNOWN,
+ PCMK_ROLES,
+ PCMK_STATUS_ROLE_UNKNOWN,
+ PCMK_STATUS_ROLES,
+ PcmkRoleType,
+ PcmkStatusRoleType,
+)
+from pcs.common.reports import ReportProcessor
+from pcs.common.status_dto import (
+ AnyResourceStatusDto,
+ BundleReplicaStatusDto,
+ BundleStatusDto,
+ CloneStatusDto,
+ GroupStatusDto,
+ PrimitiveStatusDto,
+ ResourcesStatusDto,
+)
+from pcs.lib.errors import LibraryError
+from pcs.lib.pacemaker.values import is_true
+
+_PRIMITIVE_TAG = "resource"
+_GROUP_TAG = "group"
+_CLONE_TAG = "clone"
+_BUNDLE_TAG = "bundle"
+_REPLICA_TAG = "replica"
+
+
+def _primitive_to_dto(
+ reporter: ReportProcessor,
+ primitive_el: _Element,
+ remove_clone_suffix: bool = False,
+) -> PrimitiveStatusDto:
+ resource_id = _get_resource_id(reporter, primitive_el)
+ if remove_clone_suffix:
+ resource_id = _remove_clone_suffix(resource_id)
+
+ role = _get_role(reporter, primitive_el, resource_id)
+ target_role = _get_target_role(reporter, primitive_el, resource_id)
+
+ node_names = [
+ str(node.get("name")) for node in primitive_el.iterfind("node")
+ ]
+
+ if node_names and any(not name for name in node_names):
+ reporter.report(
+ reports.ReportItem.error(
+ reports.messages.ClusterStatusEmptyNodeName(resource_id)
+ )
+ )
+
+ if reporter.has_errors:
+ raise LibraryError()
+
+ return PrimitiveStatusDto(
+ resource_id,
+ str(primitive_el.get("resource_agent")),
+ role,
+ target_role,
+ is_true(primitive_el.get("active", "false")),
+ is_true(primitive_el.get("orphaned", "false")),
+ is_true(primitive_el.get("blocked", "false")),
+ is_true(primitive_el.get("maintenance", "false")),
+ primitive_el.get("description"),
+ is_true(primitive_el.get("failed", "false")),
+ is_true(primitive_el.get("managed", "false")),
+ is_true(primitive_el.get("failure_ignored", "false")),
+ [str(node.get("name")) for node in primitive_el.iterfind("node")],
+ primitive_el.get("pending"),
+ primitive_el.get("locked_to"),
+ )
+
+
+def _group_to_dto(
+ reporter: ReportProcessor,
+ group_el: _Element,
+ remove_clone_suffix: bool = False,
+) -> GroupStatusDto:
+ # clone suffix is added even when the clone is non unique
+ group_id = _remove_clone_suffix(_get_resource_id(reporter, group_el))
+ members = []
+
+ for member in group_el:
+ if member.tag == _PRIMITIVE_TAG:
+ members.append(
+ _primitive_to_dto(reporter, member, remove_clone_suffix)
+ )
+ else:
+ reporter.report(
+ reports.ReportItem.error(
+ reports.messages.ClusterStatusUnexpectedMember(
+ group_id, "group", str(member.get("id")), ["primitive"]
+ )
+ )
+ )
+
+ if reporter.has_errors:
+ raise LibraryError()
+
+ return GroupStatusDto(
+ group_id,
+ is_true(group_el.get("maintenance", "false")),
+ group_el.get("description"),
+ is_true(group_el.get("managed", "false")),
+ is_true(group_el.get("disabled", "false")),
+ members,
+ )
+
+
+def _clone_to_dto(
+ reporter: ReportProcessor,
+ clone_el: _Element,
+ _remove_clone_suffix: bool = False,
+) -> CloneStatusDto:
+ clone_id = _get_resource_id(reporter, clone_el)
+ is_unique = is_true(clone_el.get("unique", "false"))
+
+ target_role = _get_target_role(reporter, clone_el, clone_id)
+
+ primitives = []
+ groups = []
+
+ for member in clone_el:
+ if member.tag == _PRIMITIVE_TAG:
+ primitives.append(_primitive_to_dto(reporter, member, is_unique))
+ elif member.tag == _GROUP_TAG:
+ groups.append(_group_to_dto(reporter, member, is_unique))
+ else:
+ reporter.report(
+ reports.ReportItem.error(
+ reports.messages.ClusterStatusUnexpectedMember(
+ clone_id,
+ "clone",
+ str(member.get("id")),
+ ["primitive", "group"],
+ )
+ )
+ )
+
+ reporter.report_list(
+ _validate_mixed_instance_types(primitives, groups, clone_id)
+ )
+
+ instances: Union[list[PrimitiveStatusDto], list[GroupStatusDto]]
+ if primitives:
+ reporter.report_list(
+ _validate_primitive_instance_ids(primitives, clone_id)
+ )
+ instances = primitives
+ else:
+ reporter.report_list(_validate_group_instance_ids(groups, clone_id))
+ instances = groups
+
+ if reporter.has_errors:
+ raise LibraryError()
+
+ return CloneStatusDto(
+ clone_id,
+ is_true(clone_el.get("multi_state", "false")),
+ is_unique,
+ is_true(clone_el.get("maintenance", "false")),
+ clone_el.get("description"),
+ is_true(clone_el.get("managed", "false")),
+ is_true(clone_el.get("disabled", "false")),
+ is_true(clone_el.get("failed", "false")),
+ is_true(clone_el.get("failure_ignored", "false")),
+ target_role,
+ instances,
+ )
+
+
+def _bundle_to_dto(
+ reporter: ReportProcessor,
+ bundle_el: _Element,
+ _remove_clone_suffix: bool = False,
+) -> Optional[BundleStatusDto]:
+ bundle_id = _get_resource_id(reporter, bundle_el)
+ bundle_type = str(bundle_el.get("type"))
+
+ replicas = []
+ for replica in bundle_el.iterfind(_REPLICA_TAG):
+ replica_dto = _replica_to_dto(reporter, replica, bundle_id, bundle_type)
+ if replica_dto is None:
+ # skip this bundle in status
+ return None
+ replicas.append(replica_dto)
+
+ reporter.report_list(_validate_replicas(replicas, bundle_id))
+
+ if reporter.has_errors:
+ raise LibraryError()
+
+ return BundleStatusDto(
+ bundle_id,
+ bundle_type,
+ str(bundle_el.get("image")),
+ is_true(bundle_el.get("unique", "false")),
+ is_true(bundle_el.get("maintenance", "false")),
+ bundle_el.get("description"),
+ is_true(bundle_el.get("managed", "false")),
+ is_true(bundle_el.get("failed", "false")),
+ replicas,
+ )
+
+
+_TAG_TO_FUNCTION = {
+ _PRIMITIVE_TAG: _primitive_to_dto,
+ _GROUP_TAG: _group_to_dto,
+ _CLONE_TAG: _clone_to_dto,
+ _BUNDLE_TAG: _bundle_to_dto,
+}
+
+
+def status_xml_to_dto(
+ reporter: ReportProcessor, status: _Element
+) -> ResourcesStatusDto:
+ """
+ Return dto containing status of configured resources in the cluster
+
+ reporter -- ReportProcessor
+ status -- status xml document from crm_mon, validated using
+ the appropriate rng schema
+ """
+ resources = cast(list[_Element], status.xpath("resources/*"))
+
+ resource_dtos = [
+ _TAG_TO_FUNCTION[resource.tag](reporter, resource)
+ for resource in resources
+ if resource.tag in _TAG_TO_FUNCTION
+ ]
+
+ if reporter.has_errors:
+ raise LibraryError()
+
+ return ResourcesStatusDto(
+ cast(
+ list[AnyResourceStatusDto],
+ [dto for dto in resource_dtos if dto is not None],
+ )
+ )
+
+
+def _get_resource_id(reporter: ReportProcessor, resource: _Element) -> str:
+ resource_id = resource.get("id")
+ if not resource_id:
+ reporter.report(
+ reports.ReportItem.error(
+ reports.messages.InvalidIdIsEmpty("resource id")
+ )
+ )
+ return str(resource_id)
+
+
+def _get_role(
+ reporter: ReportProcessor, resource: _Element, resource_id: str
+) -> PcmkStatusRoleType:
+ role = resource.get("role")
+ if role is None or role not in PCMK_STATUS_ROLES:
+ reporter.report(
+ reports.ReportItem.warning(
+ reports.messages.ClusterStatusUnknownPcmkRole(role, resource_id)
+ )
+ )
+ return PCMK_STATUS_ROLE_UNKNOWN
+ return PcmkStatusRoleType(role)
+
+
+def _get_target_role(
+ reporter: ReportProcessor, resource: _Element, resource_id: str
+) -> Optional[PcmkRoleType]:
+ target_role = resource.get("target_role")
+ if target_role is None:
+ return None
+ if target_role not in PCMK_ROLES:
+ reporter.report(
+ reports.ReportItem.warning(
+ reports.messages.ClusterStatusUnknownPcmkRole(
+ target_role, resource_id
+ )
+ )
+ )
+ return PCMK_ROLE_UNKNOWN
+ return PcmkRoleType(target_role)
+
+
+def _remove_clone_suffix(resource_id: str) -> str:
+ if ":" in resource_id:
+ return resource_id.rsplit(":", 1)[0]
+ return resource_id
+
+
+def _validate_mixed_instance_types(
+ primitives: list[PrimitiveStatusDto],
+ groups: list[GroupStatusDto],
+ clone_id: str,
+) -> reports.ReportItemList:
+ if primitives and groups:
+ return [
+ reports.ReportItem.error(
+ reports.messages.ClusterStatusCloneMixedMembers(clone_id)
+ )
+ ]
+ return []
+
+
+def _validate_primitive_instance_ids(
+ instances: list[PrimitiveStatusDto], clone_id: str
+) -> reports.ReportItemList:
+ if len(set(res.resource_id for res in instances)) > 1:
+ return [
+ reports.ReportItem.error(
+ reports.messages.ClusterStatusCloneMembersDifferentIds(clone_id)
+ )
+ ]
+ return []
+
+
+def _validate_group_instance_ids(
+ instances: list[GroupStatusDto], clone_id: str
+) -> reports.ReportItemList:
+ group_ids = set(group.resource_id for group in instances)
+ children_ids = set(
+ tuple(child.resource_id for child in group.members)
+ for group in instances
+ )
+
+ if len(group_ids) > 1 or len(children_ids) > 1:
+ return [
+ reports.ReportItem.error(
+ reports.messages.ClusterStatusCloneMembersDifferentIds(clone_id)
+ )
+ ]
+ return []
+
+
+def _replica_to_dto(
+ reporter: ReportProcessor,
+ replica_el: _Element,
+ bundle_id: str,
+ bundle_type: str,
+) -> Optional[BundleReplicaStatusDto]:
+ replica_id = str(replica_el.get("id"))
+
+ resources = [
+ _primitive_to_dto(reporter, resource)
+ for resource in replica_el.iterfind(_PRIMITIVE_TAG)
+ ]
+
+ duplicate_ids = _find_duplicate_ids(resources)
+ if duplicate_ids:
+ reporter.report(
+ reports.ReportItem.warning(
+ reports.messages.ClusterStatusBundleMemberIdAsImplicit(
+ bundle_id, duplicate_ids
+ )
+ )
+ )
+ return None
+
+ # TODO pacemaker will probably add prefix
+ # "pcmk-internal" to all implicit resources
+
+ container_resource = _get_implicit_resource(
+ resources,
+ f"{bundle_id}-{bundle_type}-{replica_id}",
+ True,
+ f"ocf:heartbeat:{bundle_type}",
+ )
+
+ if container_resource is None:
+ reporter.report(
+ reports.ReportItem.error(
+ reports.messages.ClusterStatusBundleReplicaNoContainer(
+ bundle_id, replica_id
+ )
+ )
+ )
+ raise LibraryError()
+
+ remote_resource = _get_implicit_resource(
+ resources, f"{bundle_id}-{replica_id}", True, "ocf:pacemaker:remote"
+ )
+
+ # implicit ip address resource might be present
+ ip_resource = None
+ if (remote_resource is not None and len(resources) == 2) or (
+ remote_resource is None and len(resources) == 1
+ ):
+ ip_resource = _get_implicit_resource(
+ resources, f"{bundle_id}-ip-", False, "ocf:heartbeat:IPaddr2"
+ )
+
+ if remote_resource is None and resources:
+ reporter.report(
+ reports.ReportItem.error(
+ reports.messages.ClusterStatusBundleReplicaMissingRemote(
+ bundle_id, replica_id
+ )
+ )
+ )
+ raise LibraryError()
+
+ member = None
+ if remote_resource:
+ if len(resources) == 1:
+ member = resources[0]
+ else:
+ reporter.report(
+ reports.ReportItem.error(
+ reports.messages.ClusterStatusBundleReplicaInvalidCount(
+ bundle_id, replica_id
+ )
+ )
+ )
+ raise LibraryError()
+
+ return BundleReplicaStatusDto(
+ replica_id,
+ member,
+ remote_resource,
+ container_resource,
+ ip_resource,
+ )
+
+
+def _find_duplicate_ids(resources: Sequence[AnyResourceStatusDto]) -> list[str]:
+ seen = set()
+ duplicates = []
+ for resource in resources:
+ if resource.resource_id in seen:
+ duplicates.append(resource.resource_id)
+ else:
+ seen.add(resource.resource_id)
+ return duplicates
+
+
+def _get_implicit_resource(
+ primitives: list[PrimitiveStatusDto],
+ expected_id: str,
+ exact_match: bool,
+ resource_agent: str,
+) -> Optional[PrimitiveStatusDto]:
+ for primitive in primitives:
+ matching_id = (
+ exact_match
+ and primitive.resource_id == expected_id
+ or not exact_match
+ and primitive.resource_id.startswith(expected_id)
+ )
+
+ if matching_id and primitive.resource_agent == resource_agent:
+ primitives.remove(primitive)
+ return primitive
+
+ return None
+
+
+def _validate_replicas(
+ replicas: Sequence[BundleReplicaStatusDto], bundle_id: str
+) -> reports.ReportItemList:
+ if not replicas:
+ return []
+
+ member = replicas[0].member
+ ip = replicas[0].ip_address
+ container = replicas[0].container
+
+ for replica in replicas:
+ if (
+ not _cmp_replica_members(member, replica.member, True)
+ or not _cmp_replica_members(ip, replica.ip_address, False)
+ or not _cmp_replica_members(container, replica.container, False)
+ ):
+ return [
+ reports.ReportItem.error(
+ reports.messages.ClusterStatusBundleDifferentReplicas(
+ bundle_id
+ )
+ )
+ ]
+ return []
+
+
+def _cmp_replica_members(
+ left: Optional[PrimitiveStatusDto],
+ right: Optional[PrimitiveStatusDto],
+ compare_ids: bool,
+) -> bool:
+ if left is None and right is None:
+ return True
+ if left is None:
+ return False
+ if right is None:
+ return False
+
+ if left.resource_agent != right.resource_agent:
+ return False
+
+ return not compare_ids or left.resource_id == right.resource_id
diff --git a/pcs_test/Makefile.am b/pcs_test/Makefile.am
index 32ac5eee..f036ded5 100644
--- a/pcs_test/Makefile.am
+++ b/pcs_test/Makefile.am
@@ -32,6 +32,7 @@ EXTRA_DIST = \
resources/corosync-qdevice.conf \
resources/corosync-some-node-names.conf \
resources/crm_mon.minimal.xml \
+ resources/crm_mon.all_resources.xml \
resources/fenced_metadata.xml \
resources/schedulerd_metadata.xml \
resources/pcmk_api_rng/api-result.rng \
@@ -322,6 +323,7 @@ EXTRA_DIST = \
tier0/lib/pacemaker/test_live.py \
tier0/lib/pacemaker/test_simulate.py \
tier0/lib/pacemaker/test_state.py \
+ tier0/lib/pacemaker/test_status.py \
tier0/lib/pacemaker/test_values.py \
tier0/lib/permissions/__init__.py \
tier0/lib/permissions/config/__init__.py \
diff --git a/pcs_test/resources/crm_mon.all_resources.xml b/pcs_test/resources/crm_mon.all_resources.xml
new file mode 100644
index 00000000..e493d308
--- /dev/null
+++ b/pcs_test/resources/crm_mon.all_resources.xml
@@ -0,0 +1,40 @@
+<pacemaker-result api-version="2.30" request="crm_mon --output-as xml">
+ <summary>
+ <stack type="corosync" />
+ <current_dc present="false" />
+ <last_update time="Wed Nov 6 13:45:41 2019" />
+ <last_change time="Wed Nov 6 10:42:54 2019" user="hacluster" client="crmd" origin="node1" />
+ <nodes_configured number="0" />
+ <resources_configured number="0" disabled="0" blocked="0" />
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
+ </summary>
+ <nodes>
+ <node name="node1" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="3.17.4" shutdown="false" expected_up="true" is_dc="false" resources_running="16" type="member"/>
+ </nodes>
+ <resources>
+ <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ <group id="group" number_resources="2" maintenance="false" managed="true" disabled="false">
+ <resource id="grouped" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ </group>
+ <clone id="clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
+ <resource id="cloned" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ </clone>
+ <bundle id="bundle" type="podman" image="localhost/pcmktest:http" unique="false" maintenance="false" managed="true" failed="false">
+ <replica id="0">
+ <resource id="bundle-ip-192.168.122.250" resource_agent="ocf:heartbeat:IPaddr2" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ <resource id="bundle-podman-0" resource_agent="ocf:heartbeat:podman" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ </replica>
+ </bundle>
+ </resources>
+ <status code="0" message="OK" />
+</pacemaker-result>
diff --git a/pcs_test/tier0/common/reports/test_messages.py b/pcs_test/tier0/common/reports/test_messages.py
index 58a70a37..b60360e4 100644
--- a/pcs_test/tier0/common/reports/test_messages.py
+++ b/pcs_test/tier0/common/reports/test_messages.py
@@ -5816,3 +5816,145 @@ class CannotCreateDefaultClusterPropertySet(NameBuildTest):
"cib-bootstrap-options"
),
)
+
+
+class ClusterStatusBundleDifferentReplicas(NameBuildTest):
+ def test_message(self):
+ self.assert_message_from_report(
+ "Replicas of bundle 'bundle' are not the same.",
+ reports.ClusterStatusBundleDifferentReplicas("bundle"),
+ )
+
+
+class ClusterStatusBundleMemberIdAsImplicit(NameBuildTest):
+ def test_message(self):
+ self.assert_message_from_report(
+ (
+ "Skipping bundle 'bundle': resource 'test' has "
+ "the same id as some of the implicit bundle resources."
+ ),
+ reports.ClusterStatusBundleMemberIdAsImplicit("bundle", ["test"]),
+ )
+
+ def test_multiple_ids(self):
+ self.assert_message_from_report(
+ (
+ "Skipping bundle 'bundle': resources 'test1', 'test2' have "
+ "the same id as some of the implicit bundle resources."
+ ),
+ reports.ClusterStatusBundleMemberIdAsImplicit(
+ "bundle", ["test1", "test2"]
+ ),
+ )
+
+
+class ClusterStatusBundleReplicaInvalidCount(NameBuildTest):
+ def test_message(self):
+ self.assert_message_from_report(
+ (
+ "Replica '0' of bundle 'bundle' has invalid number of members. "
+ "Expecting 2-4 members."
+ ),
+ reports.ClusterStatusBundleReplicaInvalidCount("bundle", "0"),
+ )
+
+
+class ClusterStatusBundleReplicaMissingRemote(NameBuildTest):
+ def test_message(self):
+ self.assert_message_from_report(
+ (
+ "Replica '0' of bundle 'bundle' is missing implicit pacemaker "
+ "remote resource while it must be present."
+ ),
+ reports.ClusterStatusBundleReplicaMissingRemote("bundle", "0"),
+ )
+
+
+class ClusterStatusBundleReplicaNoContainer(NameBuildTest):
+ def test_message(self):
+ self.assert_message_from_report(
+ (
+ "Replica '0' of bundle 'bundle' is missing implicit container "
+ "resource."
+ ),
+ reports.ClusterStatusBundleReplicaNoContainer("bundle", "0"),
+ )
+
+
+class ClusterStatusCloneMembersDifferentIds(NameBuildTest):
+ def test_message(self):
+ self.assert_message_from_report(
+ "Members with different ids in clone 'clone'.",
+ reports.ClusterStatusCloneMembersDifferentIds("clone"),
+ )
+
+
+class ClusterStatusCloneMixedMembers(NameBuildTest):
+ def test_message(self):
+ self.assert_message_from_report(
+ "Primitive and group members mixed in clone 'clone'.",
+ reports.ClusterStatusCloneMixedMembers("clone"),
+ )
+
+
+class ClusterStatusEmptyNodeName(NameBuildTest):
+ def test_message(self):
+ self.assert_message_from_report(
+ "Resource with id 'resource' contains node with empty name.",
+ reports.ClusterStatusEmptyNodeName("resource"),
+ )
+
+
+class ClusterStatusUnexpectedMember(NameBuildTest):
+ def test_one_expected(self):
+ self.assert_message_from_report(
+ (
+ "Unexpected resource 'member' inside of resource 'resource' of "
+ "type 'group'. Only resources of type 'primitive' "
+ "can be in group."
+ ),
+ reports.ClusterStatusUnexpectedMember(
+ resource_id="resource",
+ resource_type="group",
+ member_id="member",
+ expected_types=["primitive"],
+ ),
+ )
+
+ def test_multiple_expected(self):
+ self.assert_message_from_report(
+ (
+ "Unexpected resource 'member' inside of resource 'resource' of "
+ "type 'clone'. Only resources of type 'group'|'primitive' "
+ "can be in clone."
+ ),
+ reports.ClusterStatusUnexpectedMember(
+ resource_id="resource",
+ resource_type="clone",
+ member_id="member",
+ expected_types=["primitive", "group"],
+ ),
+ )
+
+
+class ClusterStatusUnknownPcmkRole(NameBuildTest):
+ def test_no_role(self):
+ self.assert_message_from_report(
+ "Attribute of resource with id 'resource' contains empty pcmk role.",
+ reports.ClusterStatusUnknownPcmkRole(None, "resource"),
+ )
+
+ def test_empty_role(self):
+ self.assert_message_from_report(
+ "Attribute of resource with id 'resource' contains empty pcmk role.",
+ reports.ClusterStatusUnknownPcmkRole("", "resource"),
+ )
+
+ def test_role(self):
+ self.assert_message_from_report(
+ (
+ "Attribute of resource with id 'resource' contains invalid "
+ "pcmk role 'NotValidRole'."
+ ),
+ reports.ClusterStatusUnknownPcmkRole("NotValidRole", "resource"),
+ )
diff --git a/pcs_test/tier0/lib/commands/test_status.py b/pcs_test/tier0/lib/commands/test_status.py
index ce98ec63..a5a395b5 100644
--- a/pcs_test/tier0/lib/commands/test_status.py
+++ b/pcs_test/tier0/lib/commands/test_status.py
@@ -7,7 +7,16 @@ from unittest import (
from pcs import settings
from pcs.common import file_type_codes
+from pcs.common.const import PCMK_STATUS_ROLE_STARTED
from pcs.common.reports import codes as report_codes
+from pcs.common.status_dto import (
+ BundleReplicaStatusDto,
+ BundleStatusDto,
+ CloneStatusDto,
+ GroupStatusDto,
+ PrimitiveStatusDto,
+ ResourcesStatusDto,
+)
from pcs.lib.booth import constants
from pcs.lib.commands import status
from pcs.lib.errors import LibraryError
@@ -22,6 +31,7 @@ from pcs_test.tools.command_env.config_runner_pcmk import (
RULE_EXPIRED_RETURNCODE,
RULE_IN_EFFECT_RETURNCODE,
)
+from pcs_test.tools.misc import get_test_resource as rc
from pcs_test.tools.misc import read_test_resource as rc_read
@@ -1254,3 +1264,132 @@ class FullClusterStatusPlaintextBoothWarning(FullClusterStatusPlaintextBase):
).encode("utf-8"),
)
self._assert_status_output()
+
+
+def _fixture_primitive_resource_dto(
+ resource_id: str, resource_agent: str
+) -> PrimitiveStatusDto:
+ return PrimitiveStatusDto(
+ resource_id,
+ resource_agent,
+ PCMK_STATUS_ROLE_STARTED,
+ None,
+ True,
+ False,
+ False,
+ False,
+ None,
+ False,
+ True,
+ False,
+ ["node1"],
+ None,
+ None,
+ )
+
+
+@mock.patch.object(
+ settings,
+ "pacemaker_api_result_schema",
+ rc("pcmk_api_rng/api-result.rng"),
+)
+class ResourcesStatus(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+
+ def test_empty_resources(self):
+ self.config.runner.pcmk.load_state()
+
+ result = status.resources_status(self.env_assist.get_env())
+ self.assertEqual(result, ResourcesStatusDto([]))
+
+ def test_bad_xml(self):
+ self.config.runner.pcmk.load_state(
+ resources="""
+ <resources>
+ <resource />
+ </resources>
+ """,
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: status.resources_status(
+ self.env_assist.get_env(),
+ ),
+ [fixture.error(report_codes.BAD_CLUSTER_STATE_FORMAT)],
+ False,
+ )
+
+ def test_all_resources(self):
+ self.config.runner.pcmk.load_state(
+ filename=rc("crm_mon.all_resources.xml")
+ )
+
+ result = status.resources_status(self.env_assist.get_env())
+
+ self.assertTrue(len(result.resources) == 4)
+ self.assertEqual(
+ result.resources[0],
+ _fixture_primitive_resource_dto("dummy", "ocf:pacemaker:Dummy"),
+ )
+ self.assertEqual(
+ result.resources[1],
+ GroupStatusDto(
+ "group",
+ False,
+ None,
+ True,
+ False,
+ members=[
+ _fixture_primitive_resource_dto(
+ "grouped", "ocf:pacemaker:Dummy"
+ )
+ ],
+ ),
+ )
+ self.assertEqual(
+ result.resources[2],
+ CloneStatusDto(
+ "clone",
+ False,
+ False,
+ False,
+ None,
+ True,
+ False,
+ False,
+ False,
+ None,
+ instances=[
+ _fixture_primitive_resource_dto(
+ "cloned", "ocf:pacemaker:Dummy"
+ )
+ ],
+ ),
+ )
+ self.assertEqual(
+ result.resources[3],
+ BundleStatusDto(
+ "bundle",
+ "podman",
+ "localhost/pcmktest:http",
+ False,
+ False,
+ None,
+ True,
+ False,
+ [
+ BundleReplicaStatusDto(
+ "0",
+ None,
+ None,
+ _fixture_primitive_resource_dto(
+ "bundle-podman-0", "ocf:heartbeat:podman"
+ ),
+ _fixture_primitive_resource_dto(
+ "bundle-ip-192.168.122.250", "ocf:heartbeat:IPaddr2"
+ ),
+ )
+ ],
+ ),
+ )
diff --git a/pcs_test/tier0/lib/pacemaker/test_status.py b/pcs_test/tier0/lib/pacemaker/test_status.py
new file mode 100644
index 00000000..451fb584
--- /dev/null
+++ b/pcs_test/tier0/lib/pacemaker/test_status.py
@@ -0,0 +1,1741 @@
+# pylint: disable=too-many-lines
+from typing import (
+ Optional,
+ Sequence,
+ Union,
+)
+from unittest import TestCase
+
+from lxml import etree
+
+from pcs.common import reports
+from pcs.common.const import (
+ PCMK_ROLE_STARTED,
+ PCMK_ROLE_UNKNOWN,
+ PCMK_ROLES,
+ PCMK_STATUS_ROLE_STARTED,
+ PCMK_STATUS_ROLE_STOPPED,
+ PCMK_STATUS_ROLE_UNKNOWN,
+ PCMK_STATUS_ROLE_UNPROMOTED,
+ PCMK_STATUS_ROLES,
+ PCMK_STATUS_ROLES_PENDING,
+ PCMK_STATUS_ROLES_RUNNING,
+ PcmkStatusRoleType,
+)
+from pcs.common.status_dto import (
+ BundleReplicaStatusDto,
+ BundleStatusDto,
+ CloneStatusDto,
+ GroupStatusDto,
+ PrimitiveStatusDto,
+ ResourcesStatusDto,
+)
+from pcs.lib.pacemaker import status
+
+from pcs_test.tools import fixture
+from pcs_test.tools.assertions import (
+ assert_raise_library_error,
+ assert_report_item_list_equal,
+)
+from pcs_test.tools.custom_mock import MockLibraryReportProcessor
+
+
+def fixture_primitive_xml(
+ resource_id: str = "resource",
+ resource_agent: str = "ocf:heartbeat:Dummy",
+ role: PcmkStatusRoleType = PCMK_STATUS_ROLE_STARTED,
+ target_role: Optional[str] = None,
+ managed: bool = True,
+ node_names: Sequence[str] = ("node1",),
+ add_optional_args: bool = False,
+) -> str:
+ target_role = (
+ f'target_role="{target_role}"' if target_role is not None else ""
+ )
+ active = role in PCMK_STATUS_ROLES_RUNNING
+ description = 'description="Test description"' if add_optional_args else ""
+ pending = 'pending="test"' if add_optional_args else ""
+ locked_to = 'locked_to="test"' if add_optional_args else ""
+
+ nodes = "\n".join(
+ f'<node name="{node}" id="{i}" cached="true"/>'
+ for (i, node) in enumerate(node_names)
+ )
+
+ return f"""
+ <resource
+ id="{resource_id}"
+ resource_agent="{resource_agent}"
+ role="{role}"
+ {target_role}
+ active="{active}"
+ orphaned="false"
+ blocked="false"
+ maintenance="false"
+ {description}
+ managed="{managed}"
+ failed="false"
+ failure_ignored="false"
+ {pending}
+ {locked_to}
+ nodes_running_on="{len(node_names)}"
+ >
+ {nodes}
+ </resource>
+ """
+
+
+def fixture_primitive_dto(
+ resource_id: str = "resource",
+ resource_agent: str = "ocf:heartbeat:Dummy",
+ role: PcmkStatusRoleType = PCMK_STATUS_ROLE_STARTED,
+ target_role: Optional[str] = None,
+ managed: bool = True,
+ node_names: Sequence[str] = ("node1",),
+ add_optional_args: bool = False,
+) -> PrimitiveStatusDto:
+ return PrimitiveStatusDto(
+ resource_id,
+ resource_agent,
+ role,
+ target_role,
+ active=role in PCMK_STATUS_ROLES_RUNNING,
+ orphaned=False,
+ blocked=False,
+ maintenance=False,
+ description="Test description" if add_optional_args else None,
+ managed=managed,
+ failed=False,
+ failure_ignored=False,
+ node_names=list(node_names),
+ pending="test" if add_optional_args else None,
+ locked_to="test" if add_optional_args else None,
+ )
+
+
+def fixture_group_xml(
+ resource_id: str = "resource-group",
+ description: Optional[str] = None,
+ members: Sequence[str] = (),
+) -> str:
+ description = (
+ f'description="{description}"' if description is not None else ""
+ )
+ members = "\n".join(members)
+ return f"""
+ <group
+ id="{resource_id}"
+ number_resources="{len(members)}"
+ maintenance="false"
+ {description}
+ managed="true"
+ disabled="false"
+ >
+ {members}
+ </group>
+ """
+
+
+def fixture_group_dto(
+ resource_id: str = "resource-group",
+ description: Optional[str] = None,
+ members: Sequence[PrimitiveStatusDto] = (),
+) -> GroupStatusDto:
+ return GroupStatusDto(
+ resource_id,
+ maintenance=False,
+ description=description,
+ managed=True,
+ disabled=False,
+ members=list(members),
+ )
+
+
+def fixture_clone_xml(
+ resource_id: str = "resource-clone",
+ multi_state: bool = False,
+ unique: bool = False,
+ description: Optional[str] = None,
+ target_role: Optional[str] = None,
+ instances: Sequence[str] = (),
+) -> str:
+ description = (
+ f'description="{description}"' if description is not None else ""
+ )
+ target_role = (
+ f'target_role="{target_role}"' if target_role is not None else ""
+ )
+ instances = "\n".join(instances)
+ return f"""
+ <clone
+ id="{resource_id}"
+ multi_state="{multi_state}"
+ unique="{unique}"
+ maintenance="false"
+ {description}
+ managed="true"
+ disabled="false"
+ failed="false"
+ failure_ignored="false"
+ {target_role}
+ >
+ {instances}
+ </clone>
+ """
+
+
+def fixture_clone_dto(
+ resource_id: str = "resource-clone",
+ multi_state: bool = False,
+ unique: bool = False,
+ description: Optional[str] = None,
+ target_role: Optional[str] = None,
+ instances: Union[
+ Sequence[PrimitiveStatusDto], Sequence[GroupStatusDto]
+ ] = (),
+) -> CloneStatusDto:
+ return CloneStatusDto(
+ resource_id,
+ multi_state,
+ unique,
+ maintenance=False,
+ description=description,
+ managed=True,
+ disabled=False,
+ failed=False,
+ failure_ignored=False,
+ target_role=target_role,
+ instances=list(instances),
+ )
+
+
+def fixture_replica_xml(
+ bundle_id: str = "resource-bundle",
+ replica_id: str = "0",
+ bundle_type: str = "podman",
+ ip: bool = False,
+ node_name: str = "node1",
+ member: Optional[str] = None,
+) -> str:
+ ip_resource = fixture_primitive_xml(
+ resource_id=f"{bundle_id}-ip-192.168.122.{replica_id}",
+ resource_agent="ocf:heartbeat:IPaddr2",
+ node_names=[node_name],
+ )
+ remote_resource = fixture_primitive_xml(
+ resource_id=f"{bundle_id}-{replica_id}",
+ resource_agent="ocf:pacemaker:remote",
+ node_names=[node_name],
+ )
+ container_resource = fixture_primitive_xml(
+ resource_id=f"{bundle_id}-{bundle_type}-{replica_id}",
+ resource_agent=f"ocf:heartbeat:{bundle_type}",
+ node_names=[node_name],
+ )
+ return f"""
+ <replica id="{replica_id}">
+ {ip_resource if ip else ""}
+ {member if member is not None else ""}
+ {container_resource}
+ {remote_resource if member is not None else ""}
+ </replica>
+ """
+
+
+def fixture_replica_dto(
+ bundle_id: str = "resource-bundle",
+ replica_id: str = "0",
+ bundle_type: str = "podman",
+ ip: bool = False,
+ node_name: str = "node1",
+ member: Optional[PrimitiveStatusDto] = None,
+) -> BundleReplicaStatusDto:
+ ip_resource = fixture_primitive_dto(
+ resource_id=f"{bundle_id}-ip-192.168.122.{replica_id}",
+ resource_agent="ocf:heartbeat:IPaddr2",
+ node_names=[node_name],
+ )
+ remote_resource = fixture_primitive_dto(
+ resource_id=f"{bundle_id}-{replica_id}",
+ resource_agent="ocf:pacemaker:remote",
+ node_names=[node_name],
+ )
+ container_resource = fixture_primitive_dto(
+ resource_id=f"{bundle_id}-{bundle_type}-{replica_id}",
+ resource_agent=f"ocf:heartbeat:{bundle_type}",
+ node_names=[node_name],
+ )
+ return BundleReplicaStatusDto(
+ replica_id,
+ member,
+ remote_resource if member is not None else None,
+ container_resource,
+ ip_resource if ip else None,
+ )
+
+
+def fixture_bundle_xml(
+ resource_id: str = "resource-bundle", replicas: Sequence[str] = ()
+) -> str:
+ replicas = "\n".join(replicas)
+ return f"""
+ <bundle
+ id="{resource_id}"
+ type="podman"
+ image="localhost/pcmktest:http"
+ unique="false"
+ maintenance="false"
+ managed="true"
+ failed="false"
+ >
+ {replicas}
+ </bundle>
+ """
+
+
+def fixture_bundle_dto(
+ resource_id: str = "resource-bundle",
+ replicas: Sequence[BundleReplicaStatusDto] = (),
+) -> BundleStatusDto:
+ return BundleStatusDto(
+ resource_id,
+ "podman",
+ "localhost/pcmktest:http",
+ False,
+ False,
+ None,
+ True,
+ False,
+ list(replicas),
+ )
+
+
+def fixture_crm_mon_xml(resources: list[str]) -> str:
+ # we only care about the resources element,
+ # omitting other parts to make the string shorter
+ resources = "\n".join(resources)
+ return f"""
+ <pacemaker-result
+ api-version="2.30"
+ request="/usr/sbin/crm_mon --one-shot --inactive --output-as xml"
+ >
+ <resources>
+ {resources}
+ </resources>
+ <status code="0" message="OK"/>
+ </pacemaker-result>
+ """
+
+
+class TestPrimitiveStatusToDto(TestCase):
+ # pylint: disable=protected-access
+ def setUp(self):
+ self.report_processor = MockLibraryReportProcessor()
+
+ def test_simple(self):
+ primitive_xml = etree.fromstring(fixture_primitive_xml())
+
+ result = status._primitive_to_dto(self.report_processor, primitive_xml)
+
+ self.assertEqual(result, fixture_primitive_dto())
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_empty_node_list(self):
+ primitive_xml = etree.fromstring(
+ fixture_primitive_xml(role=PCMK_STATUS_ROLE_STOPPED, node_names=[])
+ )
+ result = status._primitive_to_dto(self.report_processor, primitive_xml)
+
+ self.assertEqual(
+ result,
+ fixture_primitive_dto(role=PCMK_STATUS_ROLE_STOPPED, node_names=[]),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_all_attributes(self):
+ primitive_xml = etree.fromstring(
+ fixture_primitive_xml(
+ target_role=PCMK_STATUS_ROLE_STOPPED, add_optional_args=True
+ )
+ )
+
+ result = status._primitive_to_dto(self.report_processor, primitive_xml)
+
+ self.assertEqual(
+ result,
+ fixture_primitive_dto(
+ target_role=PCMK_STATUS_ROLE_STOPPED, add_optional_args=True
+ ),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_remove_clone_suffix(self):
+ primitive_xml = etree.fromstring(
+ fixture_primitive_xml(resource_id="resource:0")
+ )
+
+ result = status._primitive_to_dto(
+ self.report_processor, primitive_xml, True
+ )
+
+ self.assertEqual(result, fixture_primitive_dto())
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_running_on_multiple_nodes(self):
+ primitive_xml = etree.fromstring(
+ fixture_primitive_xml(node_names=["node1", "node2", "node3"])
+ )
+
+ result = status._primitive_to_dto(self.report_processor, primitive_xml)
+
+ self.assertEqual(
+ result,
+ fixture_primitive_dto(node_names=["node1", "node2", "node3"]),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_empty_node_name(self):
+ primitive_xml = etree.fromstring(fixture_primitive_xml(node_names=[""]))
+
+ assert_raise_library_error(
+ lambda: status._primitive_to_dto(
+ self.report_processor, primitive_xml
+ )
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.error(
+ reports.codes.CLUSTER_STATUS_EMPTY_NODE_NAME,
+ resource_id="resource",
+ )
+ ],
+ )
+
+ def test_empty_resource_id(self):
+ primitive_xml = etree.fromstring(fixture_primitive_xml(resource_id=""))
+
+ assert_raise_library_error(
+ lambda: status._primitive_to_dto(
+ self.report_processor, primitive_xml
+ )
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.error(
+ reports.codes.INVALID_ID_IS_EMPTY,
+ id_description="resource id",
+ )
+ ],
+ )
+
+ def test_role(self):
+ for role in PCMK_STATUS_ROLES:
+ with self.subTest(value=role):
+ primitive_xml = etree.fromstring(
+ fixture_primitive_xml(role=role)
+ )
+
+ result = status._primitive_to_dto(
+ self.report_processor, primitive_xml
+ )
+ self.assertEqual(result, fixture_primitive_dto(role=role))
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_invalid_role(self):
+ primitive_xml = etree.fromstring(
+ fixture_primitive_xml(role="NotPcmkRole")
+ )
+
+ result = status._primitive_to_dto(self.report_processor, primitive_xml)
+
+ self.assertEqual(
+ result, fixture_primitive_dto(role=PCMK_STATUS_ROLE_UNKNOWN)
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.warn(
+ reports.codes.CLUSTER_STATUS_UNKNOWN_PCMK_ROLE,
+ role="NotPcmkRole",
+ resource_id="resource",
+ )
+ ],
+ )
+
+ def test_target_role(self):
+ for role in PCMK_ROLES:
+ with self.subTest(value=role):
+ primitive_xml = etree.fromstring(
+ fixture_primitive_xml(target_role=role)
+ )
+
+ result = status._primitive_to_dto(
+ self.report_processor, primitive_xml
+ )
+
+ self.assertEqual(
+ result, fixture_primitive_dto(target_role=role)
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_invalid_target_role(self):
+ for value in PCMK_STATUS_ROLES_PENDING + ("NotPcmkRole",):
+ with self.subTest(value=value):
+ self.setUp()
+ primitive_xml = etree.fromstring(
+ fixture_primitive_xml(target_role=value)
+ )
+
+ result = status._primitive_to_dto(
+ self.report_processor, primitive_xml
+ )
+
+ self.assertEqual(
+ result, fixture_primitive_dto(target_role=PCMK_ROLE_UNKNOWN)
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.warn(
+ reports.codes.CLUSTER_STATUS_UNKNOWN_PCMK_ROLE,
+ role=value,
+ resource_id="resource",
+ )
+ ],
+ )
+
+
+class TestGroupStatusToDto(TestCase):
+ # pylint: disable=protected-access
+ def setUp(self):
+ self.report_processor = MockLibraryReportProcessor()
+
+ def test_all_attributes(self):
+ group_xml = etree.fromstring(
+ fixture_group_xml(description="Test description")
+ )
+
+ result = status._group_to_dto(self.report_processor, group_xml)
+
+ self.assertEqual(
+ result, fixture_group_dto(description="Test description")
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_single_member(self):
+ group_xml = etree.fromstring(
+ fixture_group_xml(members=[fixture_primitive_xml()])
+ )
+
+ result = status._group_to_dto(self.report_processor, group_xml)
+
+ self.assertEqual(
+ result, fixture_group_dto(members=[fixture_primitive_dto()])
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_multiple_members(self):
+ group_xml = etree.fromstring(
+ fixture_group_xml(
+ members=[
+ fixture_primitive_xml(resource_id="resource1"),
+ fixture_primitive_xml(resource_id="resource2"),
+ ]
+ )
+ )
+
+ result = status._group_to_dto(self.report_processor, group_xml)
+
+ self.assertEqual(
+ result,
+ fixture_group_dto(
+ members=[
+ fixture_primitive_dto(resource_id="resource1"),
+ fixture_primitive_dto(resource_id="resource2"),
+ ]
+ ),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_multiple_members_different_state(self):
+ group_xml = etree.fromstring(
+ fixture_group_xml(
+ members=[
+ fixture_primitive_xml(
+ resource_id="resource1",
+ role=PCMK_STATUS_ROLE_STOPPED,
+ managed=False,
+ node_names=[],
+ ),
+ fixture_primitive_xml(resource_id="resource2"),
+ ]
+ )
+ )
+
+ result = status._group_to_dto(self.report_processor, group_xml)
+
+ self.assertEqual(
+ result,
+ fixture_group_dto(
+ members=[
+ fixture_primitive_dto(
+ resource_id="resource1",
+ role=PCMK_STATUS_ROLE_STOPPED,
+ managed=False,
+ node_names=[],
+ ),
+ fixture_primitive_dto(resource_id="resource2"),
+ ]
+ ),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_invalid_member(self):
+ resources = {
+ "inner-group": '<group id="inner-group" number_resources="0" maintenance="false" managed="true" disabled="false" />',
+ "inner-clone": '<clone id="inner-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false" />',
+ "inner-bundle": '<bundle id="inner-bundle" type="podman" image="localhost/pcmktest:http" unique="false" maintenance="false" managed="true" failed="false" />',
+ }
+
+ for resource_id, member in resources.items():
+ with self.subTest(value=resource_id):
+ self.setUp()
+ group_xml = etree.fromstring(
+ fixture_group_xml(
+ resource_id="outer-group", members=[member]
+ )
+ )
+
+ # pylint: disable=cell-var-from-loop
+ assert_raise_library_error(
+ lambda: status._group_to_dto(
+ self.report_processor, group_xml
+ )
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.error(
+ reports.codes.CLUSTER_STATUS_UNEXPECTED_MEMBER,
+ resource_id="outer-group",
+ resource_type="group",
+ member_id=resource_id,
+ expected_types=["primitive"],
+ )
+ ],
+ )
+
+ def test_remove_clone_suffix(self):
+ group_xml = etree.fromstring(
+ fixture_group_xml(
+ resource_id="resource-group:0",
+ members=[fixture_primitive_xml(resource_id="resource:0")],
+ )
+ )
+
+ result = status._group_to_dto(self.report_processor, group_xml, True)
+ self.assertEqual(
+ result,
+ fixture_group_dto(members=[fixture_primitive_dto()]),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+
+class TestCloneStatusToDto(TestCase):
+ # pylint: disable=protected-access
+ def setUp(self):
+ self.report_processor = MockLibraryReportProcessor()
+
+ def test_all_attributes(self):
+ clone_xml = etree.fromstring(
+ fixture_clone_xml(
+ description="Test description",
+ target_role=PCMK_STATUS_ROLE_STARTED,
+ )
+ )
+
+ result = status._clone_to_dto(self.report_processor, clone_xml)
+
+ self.assertEqual(
+ result,
+ fixture_clone_dto(
+ description="Test description", target_role=PCMK_ROLE_STARTED
+ ),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_primitive_member(self):
+ clone_xml = etree.fromstring(
+ fixture_clone_xml(instances=[fixture_primitive_xml()])
+ )
+
+ result = status._clone_to_dto(self.report_processor, clone_xml)
+
+ self.assertEqual(
+ result, fixture_clone_dto(instances=[fixture_primitive_dto()])
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_primitive_member_multiple(self):
+ clone_xml = etree.fromstring(
+ fixture_clone_xml(
+ instances=[
+ fixture_primitive_xml(),
+ fixture_primitive_xml(node_names=["node2"]),
+ ]
+ )
+ )
+
+ result = status._clone_to_dto(self.report_processor, clone_xml)
+
+ self.assertEqual(
+ result,
+ fixture_clone_dto(
+ instances=[
+ fixture_primitive_dto(),
+ fixture_primitive_dto(node_names=["node2"]),
+ ]
+ ),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_primitive_member_unique(self):
+ clone_xml = etree.fromstring(
+ fixture_clone_xml(
+ unique=True,
+ instances=[
+ fixture_primitive_xml(resource_id="resource:0"),
+ fixture_primitive_xml(
+ resource_id="resource:1", node_names=["node2"]
+ ),
+ ],
+ )
+ )
+
+ result = status._clone_to_dto(self.report_processor, clone_xml)
+
+ self.assertEqual(
+ result,
+ fixture_clone_dto(
+ unique=True,
+ instances=[
+ fixture_primitive_dto(),
+ fixture_primitive_dto(node_names=["node2"]),
+ ],
+ ),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_primitive_member_promotable(self):
+ clone_xml = etree.fromstring(
+ fixture_clone_xml(
+ multi_state=True,
+ instances=[
+ fixture_primitive_xml(role=PCMK_STATUS_ROLE_UNPROMOTED),
+ fixture_primitive_xml(
+ role=PCMK_STATUS_ROLE_UNPROMOTED, node_names=["node2"]
+ ),
+ ],
+ )
+ )
+ result = status._clone_to_dto(self.report_processor, clone_xml)
+
+ self.assertEqual(
+ result,
+ fixture_clone_dto(
+ multi_state=True,
+ instances=[
+ fixture_primitive_dto(role=PCMK_STATUS_ROLE_UNPROMOTED),
+ fixture_primitive_dto(
+ role=PCMK_STATUS_ROLE_UNPROMOTED, node_names=["node2"]
+ ),
+ ],
+ ),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_primitive_member_different_ids(self):
+ clone_xml = etree.fromstring(
+ fixture_clone_xml(
+ instances=[
+ fixture_primitive_xml(),
+ fixture_primitive_xml(
+ resource_id="not_the_same_id", node_names=["node2"]
+ ),
+ ]
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: status._clone_to_dto(self.report_processor, clone_xml)
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.error(
+ reports.codes.CLUSTER_STATUS_CLONE_MEMBERS_DIFFERENT_IDS,
+ clone_id="resource-clone",
+ )
+ ],
+ )
+
+ def test_group_member(self):
+ clone_xml = etree.fromstring(
+ fixture_clone_xml(
+ instances=[
+ fixture_group_xml(
+ resource_id="resource-group:0",
+ members=[fixture_primitive_xml()],
+ ),
+ fixture_group_xml(
+ resource_id="resource-group:1",
+ members=[fixture_primitive_xml(node_names=["node2"])],
+ ),
+ ],
+ )
+ )
+ result = status._clone_to_dto(self.report_processor, clone_xml)
+
+ self.assertEqual(
+ result,
+ fixture_clone_dto(
+ instances=[
+ fixture_group_dto(members=[fixture_primitive_dto()]),
+ fixture_group_dto(
+ members=[fixture_primitive_dto(node_names=["node2"])]
+ ),
+ ],
+ ),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_group_member_unique(self):
+ clone_xml = etree.fromstring(
+ fixture_clone_xml(
+ unique=True,
+ instances=[
+ fixture_group_xml(
+ resource_id="resource-group:0",
+ members=[
+ fixture_primitive_xml(resource_id="resource:0")
+ ],
+ ),
+ fixture_group_xml(
+ resource_id="resource-group:1",
+ members=[
+ fixture_primitive_xml(
+ resource_id="resource:1", node_names=["node2"]
+ )
+ ],
+ ),
+ ],
+ )
+ )
+ result = status._clone_to_dto(self.report_processor, clone_xml)
+
+ self.assertEqual(
+ result,
+ fixture_clone_dto(
+ unique=True,
+ instances=[
+ fixture_group_dto(members=[fixture_primitive_dto()]),
+ fixture_group_dto(
+ members=[fixture_primitive_dto(node_names=["node2"])]
+ ),
+ ],
+ ),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_group_member_different_group_ids(self):
+ clone_xml = etree.fromstring(
+ fixture_clone_xml(
+ instances=[
+ fixture_group_xml(
+ resource_id="resource-group:0",
+ members=[fixture_primitive_xml()],
+ ),
+ fixture_group_xml(
+ resource_id="another-id-:1",
+ members=[fixture_primitive_xml(node_names=["node2"])],
+ ),
+ ],
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: status._clone_to_dto(self.report_processor, clone_xml)
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.error(
+ reports.codes.CLUSTER_STATUS_CLONE_MEMBERS_DIFFERENT_IDS,
+ clone_id="resource-clone",
+ )
+ ],
+ )
+
+ def test_group_member_different_primitive_ids(self):
+ clone_xml = etree.fromstring(
+ fixture_clone_xml(
+ instances=[
+ fixture_group_xml(
+ resource_id="resource-group:0",
+ members=[fixture_primitive_xml()],
+ ),
+ fixture_group_xml(
+ resource_id="resource-group:1",
+ members=[
+ fixture_primitive_xml(
+ resource_id="some-other-id",
+ node_names=["node2"],
+ )
+ ],
+ ),
+ ],
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: status._clone_to_dto(self.report_processor, clone_xml)
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.error(
+ reports.codes.CLUSTER_STATUS_CLONE_MEMBERS_DIFFERENT_IDS,
+ clone_id="resource-clone",
+ )
+ ],
+ )
+
+ def test_primitive_member_types_mixed(self):
+ clone_xml = etree.fromstring(
+ fixture_clone_xml(
+ instances=[
+ fixture_group_xml(
+ resource_id="resource",
+ members=[
+ fixture_primitive_xml(resource_id="inner-resource")
+ ],
+ ),
+ fixture_primitive_xml(node_names=["node2"]),
+ ],
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: status._clone_to_dto(self.report_processor, clone_xml)
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.error(
+ reports.codes.CLUSTER_STATUS_CLONE_MIXED_MEMBERS,
+ clone_id="resource-clone",
+ )
+ ],
+ )
+
+ def test_invalid_member(self):
+ resources = {
+ "inner-clone": '<clone id="inner-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false" />',
+ "inner-bundle": '<bundle id="inner-bundle" type="podman" image="localhost/pcmktest:http" unique="false" maintenance="false" managed="true" failed="false" />',
+ }
+ for resource_id, element in resources.items():
+ with self.subTest(value=resource_id):
+ self.setUp()
+ clone_xml = etree.fromstring(
+ fixture_clone_xml(instances=[element])
+ )
+
+ # pylint: disable=cell-var-from-loop
+ assert_raise_library_error(
+ lambda: status._clone_to_dto(
+ self.report_processor, clone_xml
+ )
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.error(
+ reports.codes.CLUSTER_STATUS_UNEXPECTED_MEMBER,
+ resource_id="resource-clone",
+ resource_type="clone",
+ member_id=resource_id,
+ expected_types=["primitive", "group"],
+ )
+ ],
+ )
+
+
+class TestBundleReplicaStatusToDto(TestCase):
+ # pylint: disable=protected-access
+ def setUp(self):
+ self.report_processor = MockLibraryReportProcessor()
+
+ def test_no_member_no_ip(self):
+ replica_xml = etree.fromstring(fixture_replica_xml())
+
+ bundle_id = "resource-bundle"
+ bundle_type = "podman"
+ result = status._replica_to_dto(
+ self.report_processor, replica_xml, bundle_id, bundle_type
+ )
+ self.assertEqual(result, fixture_replica_dto())
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_no_member(self):
+ replica_xml = etree.fromstring(fixture_replica_xml(ip=True))
+
+ bundle_id = "resource-bundle"
+ bundle_type = "podman"
+ result = status._replica_to_dto(
+ self.report_processor, replica_xml, bundle_id, bundle_type
+ )
+ self.assertEqual(result, fixture_replica_dto(ip=True))
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_member(self):
+ replica_xml = etree.fromstring(
+ fixture_replica_xml(
+ ip=True,
+ member=fixture_primitive_xml(
+ node_names=["resource-bundle-0"],
+ ),
+ )
+ )
+
+ bundle_id = "resource-bundle"
+ bundle_type = "podman"
+ result = status._replica_to_dto(
+ self.report_processor, replica_xml, bundle_id, bundle_type
+ )
+ self.assertEqual(
+ result,
+ fixture_replica_dto(
+ ip=True,
+ member=fixture_primitive_dto(node_names=["resource-bundle-0"]),
+ ),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_member_no_ip(self):
+ replica_xml = etree.fromstring(
+ fixture_replica_xml(
+ member=fixture_primitive_xml(
+ node_names=["resource-bundle-0"],
+ ),
+ )
+ )
+
+ bundle_id = "resource-bundle"
+ bundle_type = "podman"
+ result = status._replica_to_dto(
+ self.report_processor, replica_xml, bundle_id, bundle_type
+ )
+ self.assertEqual(
+ result,
+ fixture_replica_dto(
+ member=fixture_primitive_dto(node_names=["resource-bundle-0"])
+ ),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_no_container(self):
+ replica_xml = etree.fromstring(
+ """
+ <replica id="0">
+ <resource id="resource-bundle-ip-192.168.122.250" resource_agent="ocf:heartbeat:IPaddr2" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ <resource id="apa" resource_agent="ocf:heartbeat:apache" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="resource-bundle-0" id="resource-bundle-0" cached="true"/>
+ </resource>
+ <resource id="resource-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ </replica>
+ """
+ )
+
+ bundle_id = "resource-bundle"
+ bundle_type = "podman"
+ assert_raise_library_error(
+ lambda: status._replica_to_dto(
+ self.report_processor, replica_xml, bundle_id, bundle_type
+ )
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.error(
+ reports.codes.CLUSTER_STATUS_BUNDLE_REPLICA_NO_CONTAINER,
+ bundle_id=bundle_id,
+ replica_id="0",
+ )
+ ],
+ )
+
+ def test_empty_replica(self):
+ replica_xml = etree.fromstring('<replica id="0" />')
+
+ bundle_id = "resource-bundle"
+ bundle_type = "podman"
+ assert_raise_library_error(
+ lambda: status._replica_to_dto(
+ self.report_processor, replica_xml, bundle_id, bundle_type
+ )
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.error(
+ reports.codes.CLUSTER_STATUS_BUNDLE_REPLICA_NO_CONTAINER,
+ bundle_id=bundle_id,
+ replica_id="0",
+ )
+ ],
+ )
+
+ def test_member_no_remote(self):
+ replica_xml = etree.fromstring(
+ """
+ <replica id="0">
+ <resource id="apa" resource_agent="ocf:heartbeat:apache" role="Stopped" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ <resource id="resource-bundle-podman-0" resource_agent="ocf:heartbeat:podman" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ </replica>
+ """
+ )
+
+ bundle_id = "resource-bundle"
+ bundle_type = "podman"
+ assert_raise_library_error(
+ lambda: status._replica_to_dto(
+ self.report_processor, replica_xml, bundle_id, bundle_type
+ )
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.error(
+ reports.codes.CLUSTER_STATUS_BUNDLE_REPLICA_MISSING_REMOTE,
+ bundle_id=bundle_id,
+ replica_id="0",
+ )
+ ],
+ )
+
+ def test_member_same_id_as_container(self):
+ # xml taken from crm_mon output
+ replica_xml = etree.fromstring(
+ """
+ <replica id="0">
+ <resource id="resource-bundle-ip-192.168.122.250" resource_agent="ocf:heartbeat:IPaddr2" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="r92-1" id="1" cached="true"/>
+ </resource>
+ <resource id="resource-bundle-podman-0" resource_agent="ocf:heartbeat:apache" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ <resource id="resource-bundle-podman-0" resource_agent="ocf:heartbeat:podman" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="4">
+ <node name="r92-1" id="1" cached="true"/>
+ <node name="resource-bundle-1" id="resource-bundle-1" cached="true"/>
+ <node name="resource-bundle-0" id="resource-bundle-0" cached="true"/>
+ <node name="resource-bundle-2" id="resource-bundle-2" cached="true"/>
+ </resource>
+ <resource id="resource-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="r92-1" id="1" cached="true"/>
+ </resource>
+ </replica>
+ """
+ )
+ bundle_id = "resource-bundle"
+ bundle_type = "podman"
+ result = status._replica_to_dto(
+ self.report_processor, replica_xml, bundle_id, bundle_type
+ )
+ self.assertTrue(result is None)
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.warn(
+ reports.codes.CLUSTER_STATUS_BUNDLE_MEMBER_ID_AS_IMPLICIT,
+ bundle_id=bundle_id,
+ bad_ids=["resource-bundle-podman-0"],
+ )
+ ],
+ )
+
+ def test_member_same_id_as_remote(self):
+ # xml taken from crm_mon output
+ replica_xml = etree.fromstring(
+ """
+ <replica id="0">
+ <resource id="resource-bundle-ip-192.168.122.250" resource_agent="ocf:heartbeat:IPaddr2" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ <resource id="resource-bundle-0" resource_agent="ocf:heartbeat:apache" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ <resource id="resource-bundle-podman-0" resource_agent="ocf:heartbeat:podman" role="Stopping" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="true" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ <resource id="resource-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="true" failure_ignored="false" nodes_running_on="1">
+ <node name="resource-bundle-0" id="resource-bundle-0" cached="false"/>
+ </resource>
+ </replica>
+ """
+ )
+ bundle_id = "resource-bundle"
+ bundle_type = "podman"
+ result = status._replica_to_dto(
+ self.report_processor, replica_xml, bundle_id, bundle_type
+ )
+ self.assertTrue(result is None)
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.warn(
+ reports.codes.CLUSTER_STATUS_BUNDLE_MEMBER_ID_AS_IMPLICIT,
+ bundle_id=bundle_id,
+ bad_ids=["resource-bundle-0"],
+ )
+ ],
+ )
+
+ def test_member_same_id_as_ip(self):
+ # xml taken from crm_mon output
+ replica_xml = etree.fromstring(
+ """
+ <replica id="0">
+ <resource id="resource-bundle-ip-192.168.122.250" resource_agent="ocf:heartbeat:IPaddr2" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="4">
+ <node name="node1" id="1" cached="true"/>
+ <node name="resource-bundle-0" id="resource-bundle-0" cached="true"/>
+ <node name="resource-bundle-1" id="resource-bundle-1" cached="true"/>
+ <node name="resource-bundle-2" id="resource-bundle-2" cached="true"/>
+ </resource>
+ <resource id="resource-bundle-ip-192.168.122.250" resource_agent="ocf:heartbeat:apache" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ <resource id="resource-bundle-podman-0" resource_agent="ocf:heartbeat:podman" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ <resource id="resource-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ </replica>
+ """
+ )
+ bundle_id = "resource-bundle"
+ bundle_type = "podman"
+
+ result = status._replica_to_dto(
+ self.report_processor, replica_xml, bundle_id, bundle_type
+ )
+ self.assertTrue(result is None)
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.warn(
+ reports.codes.CLUSTER_STATUS_BUNDLE_MEMBER_ID_AS_IMPLICIT,
+ bundle_id=bundle_id,
+ bad_ids=["resource-bundle-ip-192.168.122.250"],
+ )
+ ],
+ )
+
+ def test_too_many_members(self):
+ replica_xml = etree.fromstring(
+ """
+ <replica id="0">
+ <resource id="resource-bundle-ip-192.168.122.250" resource_agent="ocf:heartbeat:IPaddr2" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="r92-1" id="1" cached="true"/>
+ </resource>
+ <resource id="apa1" resource_agent="ocf:heartbeat:apache" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="resource-bundle-0" id="resource-bundle-0" cached="true"/>
+ </resource>
+ <resource id="resource-bundle-podman-0" resource_agent="ocf:heartbeat:podman" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="r92-1" id="1" cached="true"/>
+ </resource>
+ <resource id="resource-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="r92-1" id="1" cached="true"/>
+ </resource>
+ <resource id="apa2" resource_agent="ocf:heartbeat:apache" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="resource-bundle-0" id="resource-bundle-0" cached="true"/>
+ </resource>
+ </replica>
+ """
+ )
+
+ bundle_id = "resource-bundle"
+ bundle_type = "podman"
+ assert_raise_library_error(
+ lambda: status._replica_to_dto(
+ self.report_processor, replica_xml, bundle_id, bundle_type
+ )
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.error(
+ reports.codes.CLUSTER_STATUS_BUNDLE_REPLICA_INVALID_COUNT,
+ bundle_id=bundle_id,
+ replica_id="0",
+ )
+ ],
+ )
+
+
+class TestBundleStatusToDto(TestCase):
+ # pylint: disable=protected-access
+ def setUp(self):
+ self.report_processor = MockLibraryReportProcessor()
+
+ def test_no_member(self):
+ bundle_xml = etree.fromstring(
+ fixture_bundle_xml(replicas=[fixture_replica_xml()])
+ )
+
+ result = status._bundle_to_dto(self.report_processor, bundle_xml, False)
+ self.assertEqual(
+ result, fixture_bundle_dto(replicas=[fixture_replica_dto()])
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_member(self):
+ bundle_xml = etree.fromstring(
+ fixture_bundle_xml(
+ replicas=[
+ fixture_replica_xml(
+ ip=True,
+ member=fixture_primitive_xml(
+ node_names=["resource-bundle-0"]
+ ),
+ )
+ ]
+ )
+ )
+ result = status._bundle_to_dto(self.report_processor, bundle_xml, False)
+ self.assertEqual(
+ result,
+ fixture_bundle_dto(
+ replicas=[
+ fixture_replica_dto(
+ ip=True,
+ member=fixture_primitive_dto(
+ node_names=["resource-bundle-0"]
+ ),
+ )
+ ]
+ ),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_multiple_replicas(self):
+ bundle_xml = etree.fromstring(
+ fixture_bundle_xml(
+ replicas=[
+ fixture_replica_xml(
+ ip=True,
+ member=fixture_primitive_xml(
+ node_names=["resource-bundle-0"]
+ ),
+ ),
+ fixture_replica_xml(
+ ip=True,
+ replica_id="1",
+ node_name="node2",
+ member=fixture_primitive_xml(
+ node_names=["resource-bundle-1"]
+ ),
+ ),
+ ]
+ )
+ )
+ result = status._bundle_to_dto(self.report_processor, bundle_xml, False)
+ self.assertEqual(
+ result,
+ fixture_bundle_dto(
+ replicas=[
+ fixture_replica_dto(
+ ip=True,
+ member=fixture_primitive_dto(
+ node_names=["resource-bundle-0"]
+ ),
+ ),
+ fixture_replica_dto(
+ replica_id="1",
+ ip=True,
+ node_name="node2",
+ member=fixture_primitive_dto(
+ node_names=["resource-bundle-1"]
+ ),
+ ),
+ ]
+ ),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_same_id_as_implicit(self):
+ bundle_xml = etree.fromstring(
+ """
+ <bundle id="resource-bundle" type="podman" image="localhost/pcmktest:http" unique="false" maintenance="false" managed="true" failed="false">
+ <replica id="0">
+ <resource id="resource-bundle-ip-192.168.122.250" resource_agent="ocf:heartbeat:IPaddr2" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ <resource id="resource-bundle-0" resource_agent="ocf:heartbeat:apache" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ <resource id="resource-bundle-podman-0" resource_agent="ocf:heartbeat:podman" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ <resource id="resource-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="2">
+ <node name="node1" id="1" cached="true"/>
+ <node name="resource-bundle-0" id="resource-bundle-0" cached="true"/>
+ </resource>
+ </replica>
+ </bundle>
+ """
+ )
+ result = status._bundle_to_dto(self.report_processor, bundle_xml, False)
+ self.assertTrue(result is None)
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.warn(
+ reports.codes.CLUSTER_STATUS_BUNDLE_MEMBER_ID_AS_IMPLICIT,
+ bundle_id="resource-bundle",
+ bad_ids=["resource-bundle-0"],
+ )
+ ],
+ )
+
+ def test_same_id_as_implicit_multiple_replicas(self):
+ bundle_xml = etree.fromstring(
+ """
+ <bundle id="resource-bundle" type="podman" image="localhost/pcmktest:http" unique="false" maintenance="false" managed="true" failed="false">
+ <replica id="0">
+ <resource id="resource-bundle-ip-192.168.122.250" resource_agent="ocf:heartbeat:IPaddr2" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ <resource id="resource-bundle-1" resource_agent="ocf:heartbeat:apache" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ <resource id="resource-bundle-podman-0" resource_agent="ocf:heartbeat:podman" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ <resource id="resource-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ </replica>
+ <replica id="1">
+ <resource id="resource-bundle-ip-192.168.122.251" resource_agent="ocf:heartbeat:IPaddr2" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node2" id="2" cached="true"/>
+ </resource>
+ <resource id="resource-bundle-1" resource_agent="ocf:heartbeat:apache" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ <resource id="resource-bundle-podman-1" resource_agent="ocf:heartbeat:podman" role="Stopping" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="true" failure_ignored="false" nodes_running_on="1">
+ <node name="node2" id="2" cached="true"/>
+ </resource>
+ <resource id="resource-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="true" failure_ignored="false" nodes_running_on="1">
+ <node name="resource-bundle-1" id="resource-bundle-1" cached="false"/>
+ </resource>
+ </replica>
+ </bundle>
+ """
+ )
+ result = status._bundle_to_dto(self.report_processor, bundle_xml, False)
+ self.assertTrue(result is None)
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.warn(
+ reports.codes.CLUSTER_STATUS_BUNDLE_MEMBER_ID_AS_IMPLICIT,
+ bundle_id="resource-bundle",
+ bad_ids=["resource-bundle-1"],
+ )
+ ],
+ )
+
+ def test_replicas_different(self):
+ replicas = {
+ "no-ip": fixture_replica_xml(
+ ip=False, member=fixture_primitive_xml()
+ ),
+ "different-member-id": fixture_replica_xml(
+ ip=True, member=fixture_primitive_xml(resource_id="another-id")
+ ),
+ "no-member": fixture_replica_xml(ip=True, member=None),
+ "different-member-agent": fixture_replica_xml(
+ ip=True,
+ member=fixture_primitive_xml(
+ resource_agent="ocf:heartbeat:apache"
+ ),
+ ),
+ }
+ for name, element in replicas.items():
+ with self.subTest(value=name):
+ self.setUp()
+
+ bundle_xml = etree.fromstring(
+ fixture_bundle_xml(
+ replicas=[
+ element,
+ fixture_replica_xml(
+ ip=True,
+ replica_id="1",
+ member=fixture_primitive_xml(),
+ ),
+ ]
+ )
+ )
+
+ # pylint: disable=cell-var-from-loop
+ assert_raise_library_error(
+ lambda: status._bundle_to_dto(
+ self.report_processor, bundle_xml
+ )
+ )
+
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.error(
+ reports.codes.CLUSTER_STATUS_BUNDLE_DIFFERENT_REPLICAS,
+ bundle_id="resource-bundle",
+ )
+ ],
+ )
+
+
+class TestResourcesStatusToDto(TestCase):
+ def setUp(self):
+ self.report_processor = MockLibraryReportProcessor()
+
+ def test_empty_resources(self):
+ status_xml = etree.fromstring(fixture_crm_mon_xml([]))
+
+ result = status.status_xml_to_dto(self.report_processor, status_xml)
+ self.assertEqual(result, ResourcesStatusDto([]))
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_single_primitive(self):
+ status_xml = etree.fromstring(
+ fixture_crm_mon_xml([fixture_primitive_xml()])
+ )
+
+ result = status.status_xml_to_dto(self.report_processor, status_xml)
+ self.assertEqual(result, ResourcesStatusDto([fixture_primitive_dto()]))
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_single_group(self):
+ status_xml = etree.fromstring(
+ fixture_crm_mon_xml(
+ [fixture_group_xml(members=[fixture_primitive_xml()])]
+ )
+ )
+
+ result = status.status_xml_to_dto(self.report_processor, status_xml)
+ self.assertEqual(
+ result,
+ ResourcesStatusDto(
+ [fixture_group_dto(members=[fixture_primitive_dto()])]
+ ),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_single_clone(self):
+ status_xml = etree.fromstring(
+ fixture_crm_mon_xml(
+ [fixture_clone_xml(instances=[fixture_primitive_xml()])]
+ )
+ )
+
+ result = status.status_xml_to_dto(self.report_processor, status_xml)
+ self.assertEqual(
+ result,
+ ResourcesStatusDto(
+ [fixture_clone_dto(instances=[fixture_primitive_dto()])]
+ ),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_single_bundle(self):
+ status_xml = etree.fromstring(
+ fixture_crm_mon_xml(
+ [
+ fixture_bundle_xml(
+ replicas=[
+ fixture_replica_xml(
+ ip=True,
+ member=fixture_primitive_xml(
+ node_names=["resource-bundle-0"]
+ ),
+ )
+ ]
+ )
+ ]
+ )
+ )
+
+ result = status.status_xml_to_dto(self.report_processor, status_xml)
+ self.assertEqual(
+ result,
+ ResourcesStatusDto(
+ [
+ fixture_bundle_dto(
+ replicas=[
+ fixture_replica_dto(
+ ip=True,
+ member=fixture_primitive_dto(
+ node_names=["resource-bundle-0"]
+ ),
+ )
+ ]
+ )
+ ]
+ ),
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_all_resource_types(self):
+ status_xml = etree.fromstring(
+ fixture_crm_mon_xml(
+ [
+ fixture_primitive_xml(),
+ fixture_group_xml(members=[fixture_primitive_xml()]),
+ fixture_clone_xml(instances=[fixture_primitive_xml()]),
+ fixture_bundle_xml(
+ replicas=[
+ fixture_replica_xml(
+ ip=True,
+ member=fixture_primitive_xml(
+ node_names=["resource-bundle-0"]
+ ),
+ )
+ ]
+ ),
+ ]
+ )
+ )
+ result = status.status_xml_to_dto(self.report_processor, status_xml)
+
+ self.assertEqual(result.resources[0], fixture_primitive_dto())
+ self.assertEqual(
+ result.resources[1],
+ fixture_group_dto(members=[fixture_primitive_dto()]),
+ )
+ self.assertEqual(
+ result.resources[2],
+ fixture_clone_dto(instances=[fixture_primitive_dto()]),
+ )
+ self.assertEqual(
+ result.resources[3],
+ fixture_bundle_dto(
+ replicas=[
+ fixture_replica_dto(
+ ip=True,
+ member=fixture_primitive_dto(
+ node_names=["resource-bundle-0"]
+ ),
+ )
+ ]
+ ),
+ )
+
+ def test_skip_bundle(self):
+ status_xml = etree.fromstring(
+ fixture_crm_mon_xml(
+ [
+ fixture_primitive_xml(),
+ """
+ <bundle id="resource-bundle" type="podman" image="localhost/pcmktest:http" unique="false" maintenance="false" managed="true" failed="false">
+ <replica id="0">
+ <resource id="resource-bundle-ip-192.168.122.250" resource_agent="ocf:heartbeat:IPaddr2" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ <resource id="resource-bundle-0" resource_agent="ocf:heartbeat:apache" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ <resource id="resource-bundle-podman-0" resource_agent="ocf:heartbeat:podman" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="node1" id="1" cached="true"/>
+ </resource>
+ <resource id="resource-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="2">
+ <node name="node1" id="1" cached="true"/>
+ <node name="resource-bundle-0" id="resource-bundle-0" cached="true"/>
+ </resource>
+ </replica>
+ </bundle>
+ """,
+ ]
+ )
+ )
+
+ result = status.status_xml_to_dto(self.report_processor, status_xml)
+
+ self.assertEqual(result, ResourcesStatusDto([fixture_primitive_dto()]))
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ fixture.warn(
+ reports.codes.CLUSTER_STATUS_BUNDLE_MEMBER_ID_AS_IMPLICIT,
+ bundle_id="resource-bundle",
+ bad_ids=["resource-bundle-0"],
+ )
+ ],
+ )
--
2.25.1