From 82ea9944f4b30b0f40a4dfbc0bf7f4590ea3a5ca Mon Sep 17 00:00:00 2001 From: Gowtham Shanmugasundaram Date: Mon, 20 Nov 2023 23:11:44 +0530 Subject: [PATCH] Changes to support multiple ODF cluster from the same OCP cluster Signed-off-by: Gowtham Shanmugasundaram --- .../mco/components/mco-dashboard/queries.ts | 12 +- .../status-card/status-card.spec.tsx | 339 ++++++++++++++++++ .../status-card/status-card.tsx | 22 +- .../system-capacity-card/capacity-card.tsx | 18 +- 4 files changed, 374 insertions(+), 17 deletions(-) create mode 100644 packages/mco/components/mco-dashboard/storage-system/status-card/status-card.spec.tsx diff --git a/packages/mco/components/mco-dashboard/queries.ts b/packages/mco/components/mco-dashboard/queries.ts index a19f5072b..60093b9a5 100644 --- a/packages/mco/components/mco-dashboard/queries.ts +++ b/packages/mco/components/mco-dashboard/queries.ts @@ -1,3 +1,4 @@ +import { ODF_OPERATOR } from '@odf/shared/constants'; import { TOTAL_CAPACITY_FILE_BLOCK_METRIC, USED_CAPACITY_FILE_BLOCK_METRIC, @@ -28,8 +29,9 @@ export const getLastSyncPerClusterQuery = () => // ToDo (epic 4422): Need to update as per updates in the metrics export const CAPACITY_QUERIES = { - [StorageDashboard.TOTAL_CAPACITY_FILE_BLOCK]: `(label_replace(odf_system_map{target_namespace="openshift-storage"} , "managedBy", "$1", "target_name", "(.*)")) * on (namespace, managedBy, cluster) group_right(storage_system, target_kind) ${TOTAL_CAPACITY_FILE_BLOCK_METRIC}`, - [StorageDashboard.USED_CAPACITY_FILE_BLOCK]: `(label_replace(odf_system_map{target_namespace="openshift-storage"} , "managedBy", "$1", "target_name", "(.*)")) * on (namespace, managedBy, cluster) group_right(storage_system, target_kind) ${USED_CAPACITY_FILE_BLOCK_METRIC}`, + // ToDo (epic 4422): For 4.15, Assuming "managedBy" is unique for each StorageSystem. Need to add "target_namesapce" as an another key. + [StorageDashboard.TOTAL_CAPACITY_FILE_BLOCK]: `(label_replace(odf_system_map, "managedBy", "$1", "target_name", "(.*)")) * on (namespace, managedBy, cluster) group_right(storage_system, target_kind, target_namespace) ${TOTAL_CAPACITY_FILE_BLOCK_METRIC}`, + [StorageDashboard.USED_CAPACITY_FILE_BLOCK]: `(label_replace(odf_system_map, "managedBy", "$1", "target_name", "(.*)")) * on (namespace, managedBy, cluster) group_right(storage_system, target_kind, target_namespace) ${USED_CAPACITY_FILE_BLOCK_METRIC}`, }; export const getRBDSnapshotUtilizationQuery = ( @@ -46,9 +48,9 @@ export const getRBDSnapshotUtilizationQuery = ( // ToDo (epic 4422): Need to update as per updates in the metrics export const STATUS_QUERIES = { - [StorageDashboard.SYSTEM_HEALTH]: `(label_replace(odf_system_map{target_namespace="openshift-storage"} , "managedBy", "$1", "target_name", "(.*)")) * on (namespace, managedBy, cluster) group_right(storage_system, target_kind) ${SYSTEM_HEALTH_METRIC}`, + // ToDo (epic 4422): For 4.15, Assuming "managedBy" is unique for each StorageSystem. Need to add "target_namesapce" as an another key. + [StorageDashboard.SYSTEM_HEALTH]: `(label_replace(odf_system_map, "managedBy", "$1", "target_name", "(.*)")) * on (namespace, managedBy, cluster) group_right(storage_system, target_kind, target_namespace) ${SYSTEM_HEALTH_METRIC}`, [StorageDashboard.HEALTH]: SYSTEM_HEALTH_METRIC, - [StorageDashboard.CSV_STATUS]: - 'csv_succeeded{exported_namespace="openshift-storage"}', + [StorageDashboard.CSV_STATUS]: `csv_succeeded{name=~"${ODF_OPERATOR}.*"}`, [StorageDashboard.CSV_STATUS_ALL_WHITELISTED]: 'csv_succeeded', }; diff --git a/packages/mco/components/mco-dashboard/storage-system/status-card/status-card.spec.tsx b/packages/mco/components/mco-dashboard/storage-system/status-card/status-card.spec.tsx new file mode 100644 index 000000000..30a3f766c --- /dev/null +++ b/packages/mco/components/mco-dashboard/storage-system/status-card/status-card.spec.tsx @@ -0,0 +1,339 @@ +import * as React from 'react'; +import { render, screen, fireEvent, waitFor } from '@testing-library/react'; +import { STATUS_QUERIES, StorageDashboard } from '../../queries'; +import { StatusCard } from './status-card'; + +let testCaseId = 1; + +const healthStatus = { + status: 'success', + data: { + resultType: 'vector', + result: [ + { + metric: { + __name__: `${STATUS_QUERIES[StorageDashboard.HEALTH]}`, + cluster: 'cluster-1', + clusterID: '23b41e13-5668-4fe9-83ab-ce109efb0634', + container: 'core', + endpoint: 'mgmt', + instance: '10.131.0.32:8080', + job: 'noobaa-mgmt', + target_namespace: 'namespace-1', + namespace: 'namespace-1', + pod: 'noobaa-core-0', + receive: 'true', + service: 'noobaa-mgmt', + system_type: 'OCS', + system_vendor: 'Red Hat', + tenant_id: '0a4c8828-9b16-42fc-9195-056d96cbe66f', + }, + value: [1700478807.019, '0'], + }, + { + metric: { + __name__: `${STATUS_QUERIES[StorageDashboard.HEALTH]}`, + cluster: 'cluster-1', + clusterID: '23b41e13-5668-4fe9-83ab-ce109efb0634', + container: 'mgr', + endpoint: 'http-metrics', + instance: '10.131.0.27:9283', + job: 'rook-ceph-mgr', + managedBy: 'ocs-storagecluster', + target_namespace: 'namespace-1', + namespace: 'namespace-1', + pod: 'rook-ceph-mgr-a-d4d78778d-2zxmh', + receive: 'true', + service: 'rook-ceph-mgr', + system_type: 'OCS', + system_vendor: 'Red Hat', + tenant_id: '0a4c8828-9b16-42fc-9195-056d96cbe66f', + }, + value: [1700478807.019, '0'], + }, + { + metric: { + __name__: `${STATUS_QUERIES[StorageDashboard.HEALTH]}`, + cluster: 'cluster-2', + clusterID: 'b3e8ac99-3ebf-4c39-b3af-7902b14669fe', + container: 'core', + endpoint: 'mgmt', + instance: '10.133.2.39:8080', + job: 'noobaa-mgmt', + target_namespace: 'namespace-1', + namespace: 'namespace-1', + pod: 'noobaa-core-0', + receive: 'true', + service: 'noobaa-mgmt', + system_type: 'OCS', + system_vendor: 'Red Hat', + tenant_id: '0a4c8828-9b16-42fc-9195-056d96cbe66f', + }, + value: [1700478807.019, '0'], + }, + { + metric: { + __name__: `${STATUS_QUERIES[StorageDashboard.HEALTH]}`, + cluster: 'cluster-2', + clusterID: 'b3e8ac99-3ebf-4c39-b3af-7902b14669fe', + container: 'mgr', + endpoint: 'http-metrics', + instance: '10.132.2.22:9283', + job: 'rook-ceph-mgr', + managedBy: 'ocs-storagecluster', + target_namespace: 'namespace-1', + namespace: 'namespace-1', + pod: 'rook-ceph-mgr-a-7488547d8d-9dx8f', + receive: 'true', + service: 'rook-ceph-mgr', + system_type: 'OCS', + system_vendor: 'Red Hat', + tenant_id: '0a4c8828-9b16-42fc-9195-056d96cbe66f', + }, + value: [1700478807.019, '0'], + }, + ], + }, +}; + +const storageSystemStatus = { + status: 'success', + data: { + resultType: 'vector', + result: [ + { + metric: { + cluster: 'cluster-1', + clusterID: '23b41e13-5668-4fe9-83ab-ce109efb0634', + container: 'mgr', + endpoint: 'http-metrics', + instance: '10.131.0.27:9283', + job: 'rook-ceph-mgr', + managedBy: 'ocs-storagecluster', + target_namespace: 'namespace-1', + namespace: 'namespace-1', + pod: 'rook-ceph-mgr-a-d4d78778d-2zxmh', + receive: 'true', + service: 'rook-ceph-mgr', + system_type: 'OCS', + system_vendor: 'Red Hat', + tenant_id: '0a4c8828-9b16-42fc-9195-056d96cbe66f', + storage_system: 'storagesystem-1', + target_kind: 'storagecluster.ocs.openshift.io/v1', + }, + value: [1700478807.019, '0'], + }, + { + metric: { + cluster: 'cluster-2', + clusterID: 'b3e8ac99-3ebf-4c39-b3af-7902b14669fe', + container: 'mgr', + endpoint: 'http-metrics', + instance: '10.132.2.22:9283', + job: 'rook-ceph-mgr', + managedBy: 'ocs-storagecluster', + target_namespace: 'namespace-1', + namespace: 'namespace-1', + pod: 'rook-ceph-mgr-a-7488547d8d-9dx8f', + receive: 'true', + service: 'rook-ceph-mgr', + system_type: 'OCS', + system_vendor: 'Red Hat', + tenant_id: '0a4c8828-9b16-42fc-9195-056d96cbe66f', + storage_system: 'storagesystem-2', + target_kind: 'storagecluster.ocs.openshift.io/v1', + }, + value: [1700478807.019, '0'], + }, + ], + }, +}; + +const csvStatus = { + status: 'success', + data: { + resultType: 'vector', + result: [ + { + metric: { + __name__: 'csv_succeeded', + cluster: 'cluster-1', + clusterID: '23b41e13-5668-4fe9-83ab-ce109efb0634', + container: 'olm-operator', + endpoint: 'https-metrics', + exported_namespace: 'namespace-1', + instance: '10.130.0.33:8443', + job: 'olm-operator-metrics', + name: 'odf-operator.v4.14.0-rhodf', + namespace: 'openshift-operator-lifecycle-manager', + pod: 'olm-operator-56d5ff6b6b-lhkdv', + receive: 'true', + service: 'olm-operator-metrics', + tenant_id: '0a4c8828-9b16-42fc-9195-056d96cbe66f', + version: '4.14.0-rhodf', + }, + value: [1700482361.796, '1'], + }, + { + metric: { + __name__: 'csv_succeeded', + cluster: 'cluster-2', + clusterID: 'b3e8ac99-3ebf-4c39-b3af-7902b14669fe', + container: 'olm-operator', + endpoint: 'https-metrics', + exported_namespace: 'namespace-2', + instance: '10.134.0.31:8443', + job: 'olm-operator-metrics', + name: 'odf-operator.v4.14.0-rhodf', + namespace: 'openshift-operator-lifecycle-manager', + pod: 'olm-operator-56d5ff6b6b-vmw7c', + receive: 'true', + service: 'olm-operator-metrics', + tenant_id: '0a4c8828-9b16-42fc-9195-056d96cbe66f', + version: '4.14.0-rhodf', + }, + value: [1700482361.796, '1'], + }, + ], + }, +}; + +const MockHealthBody: React.FC = ({ children }) => ( +
{children}
+); + +jest.mock( + '@odf/shared/hooks/custom-prometheus-poll/custom-prometheus-poll-hook', + () => ({ + useCustomPrometheusPoll: jest.fn((props: { query: string }) => { + if (props.query === STATUS_QUERIES[StorageDashboard.SYSTEM_HEALTH]) { + return [storageSystemStatus, undefined, false]; + } else if (props.query === STATUS_QUERIES[StorageDashboard.HEALTH]) { + if (testCaseId === 2) { + healthStatus.data.result[0].value[1] = '1'; + } + return [healthStatus, undefined, false]; + } else if (props.query === STATUS_QUERIES[StorageDashboard.CSV_STATUS]) { + if (testCaseId === 2) { + csvStatus.data.result[0].value[1] = '0'; + } + return [csvStatus, undefined, false]; + } + }), + }) +); + +jest.mock('@openshift-console/dynamic-plugin-sdk-internal', () => ({ + // HealthBody import is causing error + HealthBody: (props) => , +})); + +describe('Test ODF cluster status from different clusters and namesapces', () => { + test('All healthy case testing', async () => { + testCaseId = 1; + render(); + // Title + expect(screen.getByText('Status')).toBeInTheDocument(); + expect(screen.getAllByText('Healthy').length === 2).toBeTruthy(); + + // Operator health + expect(screen.getByText('Data Foundation')).toBeInTheDocument(); + await waitFor(() => { + fireEvent.click(screen.getByText('Data Foundation')); + // Popover + expect(screen.getByText('Data Foundation status')).toBeInTheDocument(); + expect( + screen.getByText( + 'The Data Foundation operator is the primary operator of Data Foundation' + ) + ).toBeInTheDocument(); + expect(screen.getByText('Operator status')).toBeInTheDocument(); + expect(screen.getByText('Running')).toBeInTheDocument(); + expect(screen.getByText('Degraded')).toBeInTheDocument(); + // Running operator count + expect(screen.getByText('2')).toBeInTheDocument(); + // Degraded operator count + expect(screen.getByText('0')).toBeInTheDocument(); + + // Close popover + fireEvent.click(screen.getByLabelText('Close')); + }); + + expect(screen.getByText('Systems')).toBeInTheDocument(); + fireEvent.click(screen.getByText('Systems')); + // Storage system health + await waitFor(() => { + // Popover + expect(screen.getByText('Storage System status')).toBeInTheDocument(); + expect( + screen.getByText( + 'StorageSystem is responsible for ensuring different types of file and block storage availability, storage capacity management and generic operations on storage.' + ) + ).toBeInTheDocument(); + expect(screen.getByText('Storage System (2)')).toBeInTheDocument(); + // Operator status + expect(screen.getAllByText('Warning').length === 2).toBeTruthy(); + expect(screen.getByText('Critical')).toBeInTheDocument(); + expect(screen.getByText('Error')).toBeInTheDocument(); + expect(screen.getByText('Critical')).toBeInTheDocument(); + expect(screen.getAllByText('Healthy').length === 3).toBeTruthy(); + expect(screen.getByText('Normal')).toBeInTheDocument(); + expect(screen.getAllByText(`(0)`).length === 2).toBeTruthy(); + expect(screen.getByText(`(2)`)).toBeInTheDocument(); + }); + }); + + test('Partially healthy case testing', async () => { + testCaseId = 2; + render(); + // Title + expect(screen.getByText('Status')).toBeInTheDocument(); + expect(screen.getByText('Error')).toBeInTheDocument(); + expect(screen.getByText('Warning')).toBeInTheDocument(); + expect(screen.getAllByText('Degraded').length === 2).toBeTruthy(); + + // Operator health + expect(screen.getByText('Data Foundation')).toBeInTheDocument(); + await waitFor(() => { + fireEvent.click(screen.getByText('Data Foundation')); + // Popover + expect(screen.getByText('Data Foundation status')).toBeInTheDocument(); + expect( + screen.getByText( + 'The Data Foundation operator is the primary operator of Data Foundation' + ) + ).toBeInTheDocument(); + expect(screen.getByText('Operator status')).toBeInTheDocument(); + expect(screen.getByText('Running')).toBeInTheDocument(); + expect(screen.getAllByText('Degraded').length === 3).toBeTruthy(); + // Running and degraded operator count + expect(screen.getAllByText('1').length === 2).toBeTruthy(); + + // Close popover + fireEvent.click(screen.getByLabelText('Close')); + }); + + expect(screen.getByText('Systems')).toBeInTheDocument(); + fireEvent.click(screen.getByText('Systems')); + // Storage system health + await waitFor(() => { + // Popover + expect(screen.getByText('Storage System status')).toBeInTheDocument(); + expect( + screen.getByText( + 'StorageSystem is responsible for ensuring different types of file and block storage availability, storage capacity management and generic operations on storage.' + ) + ).toBeInTheDocument(); + expect(screen.getByText('Storage System (2)')).toBeInTheDocument(); + // Operator status + expect(screen.getAllByText('Warning').length === 3).toBeTruthy(); + expect(screen.getByText('Critical')).toBeInTheDocument(); + expect(screen.getAllByText('Error').length === 2).toBeTruthy(); + expect(screen.getByText('Critical')).toBeInTheDocument(); + expect(screen.getByText('Healthy')).toBeInTheDocument(); + expect(screen.getByText('Normal')).toBeInTheDocument(); + expect(screen.getAllByText(`(1)`).length === 2).toBeTruthy(); + expect(screen.getByText(`(0)`)).toBeInTheDocument(); + }); + }); +}); diff --git a/packages/mco/components/mco-dashboard/storage-system/status-card/status-card.tsx b/packages/mco/components/mco-dashboard/storage-system/status-card/status-card.tsx index fb43e9a8c..5fb31c7ea 100644 --- a/packages/mco/components/mco-dashboard/storage-system/status-card/status-card.tsx +++ b/packages/mco/components/mco-dashboard/storage-system/status-card/status-card.tsx @@ -1,5 +1,4 @@ import * as React from 'react'; -import { ODF_OPERATOR } from '@odf/shared/constants'; import HealthItem from '@odf/shared/dashboards/status-card/HealthItem'; import { healthStateMap, @@ -36,6 +35,9 @@ type SubSystemMap = { [key: string]: string; }; +const getUniqueKey = (namespace: string, clusterName: string) => + `${namespace}-${clusterName}`; + const getWorstHealth = (healthData: SystemHealthMap[]) => healthData.reduce( (acc: string, item: SystemHealthMap) => @@ -58,7 +60,9 @@ const setSubSystemMap = ( (item: PrometheusResult) => !item?.metric.managedBy && item?.metric.system_type === 'OCS' && - (subSystemMap[item?.metric.cluster] = item?.value[1]) + (subSystemMap[ + getUniqueKey(item?.metric.target_namespace, item?.metric.cluster) + ] = item?.value[1]) ); const setHealthData = ( @@ -71,7 +75,9 @@ const setHealthData = ( const healthVal = item?.value[1]; const unifiedHealthVal = getUnifiedHealthValue( healthVal, - subSystemMap[item?.metric.cluster] + subSystemMap[ + getUniqueKey(item?.metric.target_namespace, item?.metric.cluster) + ] ); healthData.push({ systemName: item?.metric?.storage_system, @@ -147,12 +153,10 @@ const setCSVStatusData = ( csvData: PrometheusResponse, csvStatusData: CSVStatusMap[] ) => - csvData?.data?.result?.forEach( - (item: PrometheusResult) => - item?.metric.name.startsWith(ODF_OPERATOR) && - csvStatusData.push({ - rawCSVData: item?.value[1], - }) + csvData?.data?.result?.forEach((item: PrometheusResult) => + csvStatusData.push({ + rawCSVData: item?.value[1], + }) ); const CSVStatusHealthItem: React.FC = () => { diff --git a/packages/mco/components/mco-dashboard/storage-system/system-capacity-card/capacity-card.tsx b/packages/mco/components/mco-dashboard/storage-system/system-capacity-card/capacity-card.tsx index 1b457c351..551ce076b 100644 --- a/packages/mco/components/mco-dashboard/storage-system/system-capacity-card/capacity-card.tsx +++ b/packages/mco/components/mco-dashboard/storage-system/system-capacity-card/capacity-card.tsx @@ -53,6 +53,7 @@ import './capacity-card.scss'; type CapacityMetricDatum = { systemName: string; + namespace: string; targetKind: string; clusterName: string; totalValue: HumanizeResult; @@ -71,6 +72,12 @@ type ManagedClusterLinkMap = { string: string }; type ClusterClaimObject = { name: string; value: string }; +const getUniqueKey = ( + systemName: string, + namespace: string, + clusterName: string +) => `${systemName}-${namespace}-${clusterName}`; + const getClusterURL = (clusterClaimsList: ClusterClaimObject[]) => clusterClaimsList?.find( (claimObj: ClusterClaimObject) => claimObj?.name === CLUSTER_CLAIM_URL_NAME @@ -274,13 +281,15 @@ const SystemCapacityCard: React.FC = () => { ? usedCapacity?.data?.result?.reduce( (acc: CapacityMetricDatumMap, usedMetric: PrometheusResult) => { const systemName = usedMetric?.metric?.storage_system; + const namespace = usedMetric?.metric?.target_namespace; const targetKind = usedMetric?.metric?.target_kind; const clusterName = usedMetric?.metric?.cluster; const clusterURL = ManagedClusterLink.hasOwnProperty(clusterName) ? ManagedClusterLink[clusterName] : undefined; - acc[systemName + clusterName] = { + acc[getUniqueKey(systemName, namespace, clusterName)] = { systemName, + namespace, targetKind, clusterName, usedValue: humanizeBinaryBytes(usedMetric?.value?.[1]), @@ -296,8 +305,11 @@ const SystemCapacityCard: React.FC = () => { !loadingTotalCapacity && !errorTotalCapacity && totalCapacity?.data?.result?.forEach((totalMetric: PrometheusResult) => { - const dataMapKey = - totalMetric?.metric?.storage_system + totalMetric?.metric?.cluster; + const dataMapKey = getUniqueKey( + totalMetric?.metric?.storage_system, + totalMetric?.metric?.target_namespace, + totalMetric?.metric?.cluster + ); dataMap.hasOwnProperty(dataMapKey) && (dataMap[dataMapKey].totalValue = !!totalMetric?.value?.[1] ? humanizeBinaryBytes(totalMetric?.value?.[1])