diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PendingDeletionEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PendingDeletionEndpoint.java index 534c036dcf6..b1dafc6c474 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PendingDeletionEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PendingDeletionEndpoint.java @@ -17,24 +17,16 @@ package org.apache.hadoop.ozone.recon.api; -import java.io.BufferedWriter; -import java.io.OutputStreamWriter; -import java.nio.charset.StandardCharsets; import java.util.Map; import javax.inject.Inject; import javax.ws.rs.GET; import javax.ws.rs.Path; import javax.ws.rs.Produces; import javax.ws.rs.QueryParam; -import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.Response; -import javax.ws.rs.core.StreamingOutput; -import org.apache.commons.csv.CSVFormat; -import org.apache.commons.csv.CSVPrinter; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.ozone.recon.api.types.DataNodeMetricsServiceResponse; -import org.apache.hadoop.ozone.recon.api.types.DatanodePendingDeletionMetrics; import org.apache.hadoop.ozone.recon.api.types.ScmPendingDeletion; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -89,51 +81,6 @@ public Response getPendingDeletionByComponent( } } - @GET - @Path("/download") - public Response downloadPendingDeleteData() { - DataNodeMetricsServiceResponse dnMetricsResponse = dataNodeMetricsService.getCollectedMetrics(null); - - if (dnMetricsResponse.getStatus() != DataNodeMetricsService.MetricCollectionStatus.FINISHED) { - return Response.status(Response.Status.ACCEPTED) - .entity(dnMetricsResponse) - .type("application/json") - .build(); - } - - if (null == dnMetricsResponse.getPendingDeletionPerDataNode()) { - return Response.status(Response.Status.INTERNAL_SERVER_ERROR) - .entity("Metrics data is missing despite FINISHED status.") - .type("text/plain") - .build(); - } - - StreamingOutput stream = output -> { - CSVFormat format = CSVFormat.DEFAULT.builder() - .setHeader("HostName", "Datanode UUID", "Pending Block Size (bytes)").build(); - try (CSVPrinter csvPrinter = new CSVPrinter( - new BufferedWriter(new OutputStreamWriter(output, StandardCharsets.UTF_8)), format)) { - for (DatanodePendingDeletionMetrics metric : dnMetricsResponse.getPendingDeletionPerDataNode()) { - csvPrinter.printRecord( - metric.getHostName(), - metric.getDatanodeUuid(), - metric.getPendingBlockSize() - ); - } - csvPrinter.flush(); - } catch (Exception e) { - LOG.error("Failed to stream CSV", e); - throw new WebApplicationException("Failed to generate CSV", e); - } - }; - - return Response.status(Response.Status.ACCEPTED) - .entity(stream) - .type("text/csv") - .header("Content-Disposition", "attachment; filename=\"pending_deletion_all_datanode_stats.csv\"") - .build(); - } - private Response handleDataNodeMetrics(Integer limit) { if (null != limit && limit < 1) { return Response.status(Response.Status.BAD_REQUEST) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java index e5ae2550475..25ed673bfa6 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java @@ -20,23 +20,34 @@ import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE; +import java.io.BufferedWriter; import java.io.IOException; +import java.io.OutputStreamWriter; +import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Function; import java.util.stream.Collectors; import javax.inject.Inject; import javax.ws.rs.GET; import javax.ws.rs.Path; import javax.ws.rs.Produces; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; +import javax.ws.rs.core.StreamingOutput; +import org.apache.commons.csv.CSVFormat; +import org.apache.commons.csv.CSVPrinter; import org.apache.hadoop.hdds.fs.SpaceUsageSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.ozone.recon.api.types.DUResponse; +import org.apache.hadoop.ozone.recon.api.types.DataNodeMetricsServiceResponse; +import org.apache.hadoop.ozone.recon.api.types.DatanodePendingDeletionMetrics; import org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport; import org.apache.hadoop.ozone.recon.api.types.GlobalNamespaceReport; import org.apache.hadoop.ozone.recon.api.types.GlobalStorageReport; @@ -71,16 +82,19 @@ public class StorageDistributionEndpoint { private static final Logger LOG = LoggerFactory.getLogger(StorageDistributionEndpoint.class); private final ReconGlobalStatsManager reconGlobalStatsManager; private final ReconGlobalMetricsService reconGlobalMetricsService; + private final DataNodeMetricsService dataNodeMetricsService; @Inject public StorageDistributionEndpoint(OzoneStorageContainerManager reconSCM, NSSummaryEndpoint nsSummaryEndpoint, ReconGlobalStatsManager reconGlobalStatsManager, - ReconGlobalMetricsService reconGlobalMetricsService) { + ReconGlobalMetricsService reconGlobalMetricsService, + DataNodeMetricsService dataNodeMetricsService) { this.nodeManager = (ReconNodeManager) reconSCM.getScmNodeManager(); this.nsSummaryEndpoint = nsSummaryEndpoint; this.reconGlobalStatsManager = reconGlobalStatsManager; this.reconGlobalMetricsService = reconGlobalMetricsService; + this.dataNodeMetricsService = dataNodeMetricsService; } @GET @@ -114,6 +128,85 @@ public Response getStorageDistribution() { } } + @GET + @Path("/download") + public Response downloadDataNodeDistribution() { + DataNodeMetricsServiceResponse metricsResponse = + dataNodeMetricsService.getCollectedMetrics(null); + + if (metricsResponse.getStatus() != DataNodeMetricsService.MetricCollectionStatus.FINISHED) { + return Response.status(Response.Status.ACCEPTED) + .entity(metricsResponse) + .type(MediaType.APPLICATION_JSON) + .build(); + } + + List pendingDeletionMetrics = + metricsResponse.getPendingDeletionPerDataNode(); + + if (pendingDeletionMetrics == null) { + return Response.status(Response.Status.INTERNAL_SERVER_ERROR) + .entity("Metrics data is missing despite FINISHED status.") + .type(MediaType.TEXT_PLAIN) + .build(); + } + + Map reportByUuid = + collectDatanodeReports().stream() + .collect(Collectors.toMap( + DatanodeStorageReport::getDatanodeUuid, + Function.identity())); + + StreamingOutput stream = output -> { + CSVFormat format = CSVFormat.DEFAULT.builder() + .setHeader( + "HostName", + "Datanode UUID", + "Capacity", + "Used Space", + "Remaining Space", + "Committed Space", + "Reserved Space", + "Minimum Free Space", + "Pending Block Size") + .build(); + + try (CSVPrinter printer = new CSVPrinter( + new BufferedWriter(new OutputStreamWriter(output, StandardCharsets.UTF_8)), + format)) { + + for (DatanodePendingDeletionMetrics metric : pendingDeletionMetrics) { + DatanodeStorageReport report = reportByUuid.get(metric.getDatanodeUuid()); + if (report == null) { + continue; // skip if report is missing + } + + printer.printRecord( + metric.getHostName(), + metric.getDatanodeUuid(), + report.getCapacity(), + report.getUsed(), + report.getRemaining(), + report.getCommitted(), + report.getReserved(), + report.getMinimumFreeSpace(), + metric.getPendingBlockSize() + ); + } + printer.flush(); + } catch (Exception e) { + LOG.error("Failed to stream CSV", e); + throw new WebApplicationException("Failed to generate CSV", e); + } + }; + + return Response.status(Response.Status.ACCEPTED) + .entity(stream) + .type("text/csv") + .header("Content-Disposition", "attachment; filename=\"datanode_storage_and_pending_deletion_stats.csv\"") + .build(); + } + private GlobalStorageReport calculateGlobalStorageReport() { try { SCMNodeStat stats = nodeManager.getStats(); @@ -189,7 +282,7 @@ private StorageCapacityDistributionResponse buildStorageDistributionResponse( .build(); } - private List collectDatanodeReports() { + public List collectDatanodeReports() { return nodeManager.getAllNodes().stream() .map(this::getStorageReport) .filter(Objects::nonNull) // Filter out null reports diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/capacity/capacity.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/capacity/capacity.tsx index ec9ad436e59..0fb9d0baed7 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/capacity/capacity.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/capacity/capacity.tsx @@ -47,7 +47,7 @@ type CapacityState = { const Capacity: React.FC = () => { const PENDING_POLL_INTERVAL = 5 * 1000; - const DN_CSV_DOWNLOAD_URL = '/api/v1/pendingDeletion/download'; + const DN_CSV_DOWNLOAD_URL = '/api/v1/storageDistribution/download'; const DN_STATUS_URL = '/api/v1/pendingDeletion?component=dn'; const DOWNLOAD_POLL_TIMEOUT_MS = 10 * 60 * 1000; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestPendingDeletionEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestPendingDeletionEndpoint.java index 3c9af15d4dc..7951f9b2b3e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestPendingDeletionEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestPendingDeletionEndpoint.java @@ -18,20 +18,14 @@ package org.apache.hadoop.ozone.recon.api; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import java.io.ByteArrayOutputStream; -import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.HashMap; -import java.util.List; import java.util.Map; import javax.ws.rs.core.Response; -import javax.ws.rs.core.StreamingOutput; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.ozone.recon.api.types.DataNodeMetricsServiceResponse; @@ -212,58 +206,4 @@ public void testOmComponentReturnsPendingDeletionSizes() { assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()); assertEquals(pendingSizes, response.getEntity()); } - - @Test - public void testDownloadReturnsAcceptedWhenCollectionInProgress() { - DataNodeMetricsServiceResponse metricsResponse = DataNodeMetricsServiceResponse.newBuilder() - .setStatus(DataNodeMetricsService.MetricCollectionStatus.IN_PROGRESS) - .build(); - when(dataNodeMetricsService.getCollectedMetrics(null)).thenReturn(metricsResponse); - - Response response = pendingDeletionEndpoint.downloadPendingDeleteData(); - - assertEquals(Response.Status.ACCEPTED.getStatusCode(), response.getStatus()); - assertEquals("application/json", response.getMediaType().toString()); - assertEquals(metricsResponse, response.getEntity()); - } - - @Test - public void testDownloadReturnsServerErrorWhenMetricsMissing() { - DataNodeMetricsServiceResponse metricsResponse = DataNodeMetricsServiceResponse.newBuilder() - .setStatus(DataNodeMetricsService.MetricCollectionStatus.FINISHED) - .build(); - when(dataNodeMetricsService.getCollectedMetrics(null)).thenReturn(metricsResponse); - - Response response = pendingDeletionEndpoint.downloadPendingDeleteData(); - - assertEquals(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(), response.getStatus()); - assertEquals("Metrics data is missing despite FINISHED status.", response.getEntity()); - assertEquals("text/plain", response.getMediaType().toString()); - } - - @Test - public void testDownloadReturnsCsvWithMetrics() throws Exception { - List pendingDeletionMetrics = Arrays.asList( - new DatanodePendingDeletionMetrics("dn1", "uuid-1", 10L), - new DatanodePendingDeletionMetrics("dn2", "uuid-2", 20L)); - DataNodeMetricsServiceResponse metricsResponse = DataNodeMetricsServiceResponse.newBuilder() - .setStatus(DataNodeMetricsService.MetricCollectionStatus.FINISHED) - .setPendingDeletion(pendingDeletionMetrics) - .build(); - when(dataNodeMetricsService.getCollectedMetrics(null)).thenReturn(metricsResponse); - - Response response = pendingDeletionEndpoint.downloadPendingDeleteData(); - - assertEquals(Response.Status.ACCEPTED.getStatusCode(), response.getStatus()); - assertEquals("text/csv", response.getMediaType().toString()); - assertEquals("attachment; filename=\"pending_deletion_all_datanode_stats.csv\"", - response.getHeaderString("Content-Disposition")); - StreamingOutput streamingOutput = assertInstanceOf(StreamingOutput.class, response.getEntity()); - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - streamingOutput.write(outputStream); - String csv = new String(outputStream.toByteArray(), StandardCharsets.UTF_8); - assertTrue(csv.contains("HostName,Datanode UUID,Pending Block Size (bytes)")); - assertTrue(csv.contains("dn1,uuid-1,10")); - assertTrue(csv.contains("dn2,uuid-2,20")); - } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestStorageDistributionEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestStorageDistributionEndpoint.java new file mode 100644 index 00000000000..6786985b77f --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestStorageDistributionEndpoint.java @@ -0,0 +1,186 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.ByteArrayOutputStream; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.StreamingOutput; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; +import org.apache.hadoop.hdds.scm.node.DatanodeInfo; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.ozone.recon.api.types.DataNodeMetricsServiceResponse; +import org.apache.hadoop.ozone.recon.api.types.DatanodePendingDeletionMetrics; +import org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport; +import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; +import org.apache.hadoop.ozone.recon.spi.ReconGlobalStatsManager; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +/** + * The TestStorageDistributionEndpoint class contains unit tests for verifying + * the functionality of the {@link StorageDistributionEndpoint} class. + * + */ +public class TestStorageDistributionEndpoint { + private DataNodeMetricsService dataNodeMetricsService; + private StorageDistributionEndpoint storageDistributionEndpoint; + private ReconNodeManager nodeManager = mock(ReconNodeManager.class); + + @BeforeEach + public void setup() { + ReconGlobalMetricsService reconGlobalMetricsService = mock(ReconGlobalMetricsService.class); + dataNodeMetricsService = mock(DataNodeMetricsService.class); + NSSummaryEndpoint nssummaryEndpoint = mock(NSSummaryEndpoint.class); + OzoneStorageContainerManager reconSCM = mock(OzoneStorageContainerManager.class); + when(reconSCM.getScmNodeManager()).thenReturn(nodeManager); + ReconGlobalStatsManager reconGlobalStatsManager = mock(ReconGlobalStatsManager.class); + storageDistributionEndpoint = new StorageDistributionEndpoint(reconSCM, + nssummaryEndpoint, + reconGlobalStatsManager, + reconGlobalMetricsService, + dataNodeMetricsService); + } + + @Test + public void testDownloadReturnsAcceptedWhenCollectionInProgress() { + DataNodeMetricsServiceResponse metricsResponse = DataNodeMetricsServiceResponse.newBuilder() + .setStatus(DataNodeMetricsService.MetricCollectionStatus.IN_PROGRESS) + .build(); + when(dataNodeMetricsService.getCollectedMetrics(null)).thenReturn(metricsResponse); + Response response = storageDistributionEndpoint.downloadDataNodeDistribution(); + + assertEquals(Response.Status.ACCEPTED.getStatusCode(), response.getStatus()); + assertEquals("application/json", response.getMediaType().toString()); + assertEquals(metricsResponse, response.getEntity()); + } + + @Test + public void testDownloadReturnsServerErrorWhenMetricsMissing() { + DataNodeMetricsServiceResponse metricsResponse = DataNodeMetricsServiceResponse.newBuilder() + .setStatus(DataNodeMetricsService.MetricCollectionStatus.FINISHED) + .build(); + when(dataNodeMetricsService.getCollectedMetrics(null)).thenReturn(metricsResponse); + Response response = storageDistributionEndpoint.downloadDataNodeDistribution(); + + assertEquals(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(), response.getStatus()); + assertEquals("Metrics data is missing despite FINISHED status.", response.getEntity()); + assertEquals("text/plain", response.getMediaType().toString()); + } + + @Test + public void testDownloadReturnsCsvWithMetrics() throws Exception { + // given + UUID uuid1 = UUID.randomUUID(); + UUID uuid2 = UUID.randomUUID(); + + String dataNode1 = "dn1"; + String dataNode2 = "dn2"; + + List pendingDeletionMetrics = Arrays.asList( + new DatanodePendingDeletionMetrics(dataNode1, uuid1.toString(), 10L), + new DatanodePendingDeletionMetrics(dataNode2, uuid2.toString(), 20L) + ); + + DataNodeMetricsServiceResponse metricsResponse = + DataNodeMetricsServiceResponse.newBuilder() + .setStatus(DataNodeMetricsService.MetricCollectionStatus.FINISHED) + .setPendingDeletion(pendingDeletionMetrics) + .build(); + + when(dataNodeMetricsService.getCollectedMetrics(null)) + .thenReturn(metricsResponse); + + mockDatanodeStorageReports(pendingDeletionMetrics); + mockNodeManagerStats(uuid1, uuid2); + + Response response = storageDistributionEndpoint.downloadDataNodeDistribution(); + + // then + assertEquals(Response.Status.ACCEPTED.getStatusCode(), response.getStatus()); + assertEquals("text/csv", response.getMediaType().toString()); + assertEquals( + "attachment; filename=\"datanode_storage_and_pending_deletion_stats.csv\"", + response.getHeaderString("Content-Disposition") + ); + + String csv = readCsv(response); + + assertTrue(csv.contains( + "HostName,Datanode UUID,Capacity,Used Space,Remaining Space," + + "Committed Space,Reserved Space,Minimum Free Space,Pending Block Size" + )); + assertTrue(csv.contains(dataNode1 + "," + uuid1 + ",100,10,10,10,5,5,10")); + assertTrue(csv.contains(dataNode2 + "," + uuid2 + ",100,10,10,10,5,5,20")); + } + + private void mockDatanodeStorageReports( + List metrics) { + + List reports = metrics.stream() + .map(m -> DatanodeStorageReport.newBuilder() + .setDatanodeUuid(m.getDatanodeUuid()) + .setHostName(m.getHostName()) + .build()) + .collect(Collectors.toList()); + + when(storageDistributionEndpoint.collectDatanodeReports()) + .thenReturn(reports); + } + + private void mockNodeManagerStats(UUID uuid1, UUID uuid2) { + DatanodeDetails dn1 = DatanodeDetails.newBuilder() + .setUuid(uuid1) + .setHostName("dn1") + .build(); + + DatanodeDetails dn2 = DatanodeDetails.newBuilder() + .setUuid(uuid2) + .setHostName("dn3") + .build(); + + List datanodes = Arrays.asList( + new DatanodeInfo(dn1, null, null), + new DatanodeInfo(dn2, null, null) + ); + + when(nodeManager.getAllNodes()).thenReturn(datanodes); + when(nodeManager.getNodeStat(dn1)) + .thenReturn(new SCMNodeMetric(100, 10, 10, 10, 5, 5)); + when(nodeManager.getNodeStat(dn2)) + .thenReturn(new SCMNodeMetric(100, 10, 10, 10, 5, 5)); + } + + private String readCsv(Response response) throws Exception { + StreamingOutput output = assertInstanceOf(StreamingOutput.class, response.getEntity()); + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + output.write(outputStream); + return new String(outputStream.toByteArray(), StandardCharsets.UTF_8); + } +}