Skip to content

Commit

Permalink
refactoring (support multiple StorageSystems per ODF)
Browse files Browse the repository at this point in the history
  • Loading branch information
SanjalKatiyar committed Nov 24, 2023
1 parent 4ece3b0 commit 70d7ccc
Show file tree
Hide file tree
Showing 31 changed files with 357 additions and 196 deletions.
5 changes: 4 additions & 1 deletion locales/en/plugin__odf-console.json
Original file line number Diff line number Diff line change
Expand Up @@ -911,8 +911,11 @@
"Search...": "Search...",
"Expand to fullscreen": "Expand to fullscreen",
"Exit fullscreen": "Exit fullscreen",
"This view is only supported for Internal mode cluster.": "This view is only supported for Internal mode cluster.",
"Show message": "Show message",
"Hide message": "Hide message",
"Back to main view": "Back to main view",
"Topology view is not supported for External Mode": "Topology view is not supported for External Mode",
"Topology view is not supported for External mode": "Topology view is not supported for External mode",
"No StorageCluster found": "No StorageCluster found",
"Set up a storage cluster to view the topology": "Set up a storage cluster to view the topology",
"A minimal cluster deployment will be performed.": "A minimal cluster deployment will be performed.",
Expand Down
6 changes: 2 additions & 4 deletions packages/mco/components/mco-dashboard/queries.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,8 @@ export const LAST_SYNC_TIME_QUERY = 'ramen_sync_duration_seconds';
export const getLastSyncPerClusterQuery = () =>
`${LAST_SYNC_TIME_QUERY}{${DRPC_OBJECT_TYPE}, ${RAMEN_HUB_OPERATOR_METRICS_SERVICE}}`;

// ToDo (epic 4422): Need to update as per updates in the metrics
export const CAPACITY_QUERIES = {
// ToDo (epic 4422): For 4.15, Assuming "managedBy" is unique for each StorageSystem. Need to add "target_namesapce" as an another key.
// ToDo (epic 4422): For 4.15, assuming "managedBy" is unique for each StorageSystem (works for now). Need to add "target_namesapce" as an another key to capacity metrics.
[StorageDashboard.TOTAL_CAPACITY_FILE_BLOCK]: `(label_replace(odf_system_map, "managedBy", "$1", "target_name", "(.*)")) * on (namespace, managedBy, cluster) group_right(storage_system, target_kind, target_namespace) ${TOTAL_CAPACITY_FILE_BLOCK_METRIC}`,
[StorageDashboard.USED_CAPACITY_FILE_BLOCK]: `(label_replace(odf_system_map, "managedBy", "$1", "target_name", "(.*)")) * on (namespace, managedBy, cluster) group_right(storage_system, target_kind, target_namespace) ${USED_CAPACITY_FILE_BLOCK_METRIC}`,
};
Expand All @@ -46,9 +45,8 @@ export const getRBDSnapshotUtilizationQuery = (
return queries[queryName];
};

// ToDo (epic 4422): Need to update as per updates in the metrics
export const STATUS_QUERIES = {
// ToDo (epic 4422): For 4.15, Assuming "managedBy" is unique for each StorageSystem. Need to add "target_namesapce" as an another key.
// ToDo (epic 4422): For 4.15, assuming "managedBy" is unique for each StorageSystem (works for now). Need to add "target_namesapce" as an another key to health metrics.
[StorageDashboard.SYSTEM_HEALTH]: `(label_replace(odf_system_map, "managedBy", "$1", "target_name", "(.*)")) * on (namespace, managedBy, cluster) group_right(storage_system, target_kind, target_namespace) ${SYSTEM_HEALTH_METRIC}`,
[StorageDashboard.HEALTH]: SYSTEM_HEALTH_METRIC,
[StorageDashboard.CSV_STATUS]: `csv_succeeded{name=~"${ODF_OPERATOR}.*"}`,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,8 @@ const setSubSystemMap = (
subSysHealthData: PrometheusResponse,
subSystemMap: SubSystemMap
) =>
// ToDo (epic 4422): "namespace" in "odf_system_*" metrics signifies odf-operator pod's namespace.
// Add a label to metric for StorageSystem namespace as well and use that instead (update query).
subSysHealthData?.data?.result?.forEach(
(item: PrometheusResult) =>
!item?.metric.managedBy &&
Expand All @@ -70,6 +72,8 @@ const setHealthData = (
healthData: SystemHealthMap[],
subSystemMap: SubSystemMap
) =>
// ToDo (epic 4422): "namespace" in "odf_system_*" metrics signifies odf-operator pod's namespace.
// Add a label to metric for StorageSystem namespace as well and use that instead (update query).
sysHealthData?.data?.result?.forEach((item: PrometheusResult) => {
const { apiGroup } = getGVK(item?.metric.target_kind);
const healthVal = item?.value[1];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,12 +151,20 @@ const headerColumns = (t: TFunction) => [
];

const getRow: GetRow = (
{ systemName, targetKind, clusterName, totalValue, usedValue, clusterURL },
{
systemName,
namespace: systemNamespace,
targetKind,
clusterName,
totalValue,
usedValue,
clusterURL,
},
index
) => {
const { apiGroup, apiVersion, kind } = getGVK(targetKind);
const systemKind = referenceForGroupVersionKind(apiGroup)(apiVersion)(kind);
const systemPath = getDashboardLink(systemKind, systemName);
const systemPath = getDashboardLink(systemKind, systemName, systemNamespace);
const isPercentage = !!totalValue;
const progress = isPercentage ? getPercentage(usedValue, totalValue) : 100;
const value = isPercentage
Expand Down Expand Up @@ -280,6 +288,8 @@ const SystemCapacityCard: React.FC = () => {
!loadingUsedCapacity && !errorUsedCapacity
? usedCapacity?.data?.result?.reduce(
(acc: CapacityMetricDatumMap, usedMetric: PrometheusResult) => {
// ToDo (epic 4422): "namespace" in "odf_system_*" metrics signifies odf-operator pod's namespace.
// Add a label to metric for StorageSystem namespace as well and use that instead (update query).
const systemName = usedMetric?.metric?.storage_system;
const namespace = usedMetric?.metric?.target_namespace;
const targetKind = usedMetric?.metric?.target_kind;
Expand All @@ -305,6 +315,8 @@ const SystemCapacityCard: React.FC = () => {
!loadingTotalCapacity &&
!errorTotalCapacity &&
totalCapacity?.data?.result?.forEach((totalMetric: PrometheusResult) => {
// ToDo (epic 4422): "namespace" in "odf_system_*" metrics signifies odf-operator pod's namespace.
// Add a label to metric for StorageSystem namespace as well and use that instead (update query).
const dataMapKey = getUniqueKey(
totalMetric?.metric?.storage_system,
totalMetric?.metric?.target_namespace,
Expand Down
4 changes: 2 additions & 2 deletions packages/ocs/dashboards/persistent-external/status-card.tsx
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import * as React from 'react';
import { cephClusterResource } from '@odf/core/resources';
import { getCephHealthState } from '@odf/ocs/utils';
import { K8sResourceKind } from '@odf/shared/types';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import { useK8sWatchResource } from '@openshift-console/dynamic-plugin-sdk';
Expand All @@ -14,15 +15,14 @@ import {
CardHeader,
CardTitle,
} from '@patternfly/react-core';
import { getCephHealthState } from '../persistent-internal/status-card/utils';

export const StatusCard: React.FC = () => {
const { t } = useCustomTranslation();
const [data, loaded, loadError] =
useK8sWatchResource<K8sResourceKind[]>(cephClusterResource);

const cephHealth = getCephHealthState(
{ ceph: { data, loaded, loadError } },
{ ceph: { data: data?.[0], loaded, loadError } },
t
);

Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import * as React from 'react';
import { getCephHealthState } from '@odf/ocs/utils';
import { healthStateMapping } from '@odf/shared/dashboards/status-card/states';
import {
useCustomPrometheusPoll,
Expand Down Expand Up @@ -33,7 +34,7 @@ import {
} from '@patternfly/react-core';
import { CephClusterModel } from '../../../models';
import { DATA_RESILIENCY_QUERY, StorageDashboardQuery } from '../../../queries';
import { getCephHealthState, getDataResiliencyState } from './utils';
import { getDataResiliencyState } from './utils';
import { whitelistedHealthChecksRef } from './whitelisted-health-checks';
import './healthchecks.scss';

Expand Down Expand Up @@ -129,7 +130,7 @@ export const StatusCard: React.FC = () => {
);

const cephHealthState = getCephHealthState(
{ ceph: { data, loaded, loadError } },
{ ceph: { data: data?.[0], loaded, loadError } },
t
);
const dataResiliencyState = getDataResiliencyState(
Expand Down
48 changes: 1 addition & 47 deletions packages/ocs/dashboards/persistent-internal/status-card/utils.ts
Original file line number Diff line number Diff line change
@@ -1,52 +1,6 @@
import { getResiliencyProgress } from '@odf/shared/utils';
import { HealthState } from '@openshift-console/dynamic-plugin-sdk';
import {
PrometheusHealthHandler,
ResourceHealthHandler,
SubsystemHealth,
} from '@openshift-console/dynamic-plugin-sdk/lib/extensions/dashboard-types';
import { TFunction } from 'i18next';
import { WatchCephResource } from '../../../utils';

const CephHealthStatus = (status: string, t: TFunction): SubsystemHealth => {
switch (status) {
case 'HEALTH_OK':
return {
state: HealthState.OK,
};
case 'HEALTH_WARN':
return {
state: HealthState.WARNING,
message: t('Warning'),
};
case 'HEALTH_ERR':
return {
state: HealthState.ERROR,
message: t('Error'),
};
default:
return { state: HealthState.UNKNOWN };
}
};

export const getCephHealthState: ResourceHealthHandler<WatchCephResource> = (
{ ceph },
t
) => {
const { data, loaded, loadError } = ceph;
const status = data?.[0]?.status?.ceph?.health;

if (loadError) {
return { state: HealthState.NOT_AVAILABLE };
}
if (!loaded) {
return { state: HealthState.LOADING };
}
if (data.length === 0) {
return { state: HealthState.NOT_AVAILABLE };
}
return CephHealthStatus(status, t);
};
import { PrometheusHealthHandler } from '@openshift-console/dynamic-plugin-sdk/lib/extensions/dashboard-types';

export const getDataResiliencyState: PrometheusHealthHandler = (
responses,
Expand Down
Loading

0 comments on commit 70d7ccc

Please sign in to comment.