diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java index 37aaa30d2b2e..7ededa08bc13 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java @@ -178,12 +178,12 @@ public void execute() throws IOException { Map> newTableSetTimestampMap = backupManager.readLogTimestampMap(); - backupManager - .deleteBulkLoadedRows(bulkLoadsToDelete.stream().map(BulkLoad::getRowKey).toList()); - // backup complete backupInfo.setTableSetTimestampMap(newTableSetTimestampMap); completeBackup(conn, backupInfo, BackupType.FULL, conf); + + backupManager + .deleteBulkLoadedRows(bulkLoadsToDelete.stream().map(BulkLoad::getRowKey).toList()); } catch (Exception e) { failBackup(conn, backupInfo, backupManager, e, "Unexpected BackupException : ", BackupType.FULL, conf); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java index 481f8d755e75..0a6a01b9c3e2 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java @@ -19,6 +19,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -27,12 +28,16 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; import org.apache.hadoop.hbase.backup.impl.BulkLoad; +import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient; import org.apache.hadoop.hbase.backup.util.BackupUtils; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; @@ -40,6 +45,7 @@ import org.apache.hadoop.hbase.tool.BulkLoadHFiles; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.util.HFileTestUtil; import org.junit.jupiter.api.Tag; @@ -237,6 +243,60 @@ public void testUpdateFileListsMissingArchivedFile() throws Exception { } } + @Test + public void testFailedFullBackupKeepsTrackedBulkLoads() throws Exception { + TableName tableName = TableName.valueOf("bulk-load-full-failure-" + System.nanoTime()); + TEST_UTIL.createTable(tableName, famName).close(); + + try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) { + String backupId = backupTables(BackupType.FULL, List.of(tableName), BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupId)); + + String keyPrefix = "full-backup-failure"; + FileSystem fs = TEST_UTIL.getTestFileSystem(); + Path baseDirectory = new Path(TEST_UTIL.getDataTestDirOnTestFS(TEST_NAME), + tableName.getNameWithNamespaceInclAsString().replace(':', '_') + "_" + keyPrefix); + Path hfilePath = + new Path(baseDirectory, Bytes.toString(famName) + Path.SEPARATOR + "hfile_" + keyPrefix); + + HFileTestUtil.createHFile(TEST_UTIL.getConfiguration(), fs, hfilePath, famName, qualName, + Bytes.toBytes(keyPrefix), Bytes.toBytes(keyPrefix + "z"), ROWS_IN_BULK_LOAD); + + Map bulkLoadResult = + BulkLoadHFiles.create(TEST_UTIL.getConfiguration()).bulkLoad(tableName, baseDirectory); + assertFalse(bulkLoadResult.isEmpty()); + assertEquals(1, systemTable.readBulkloadRows(List.of(tableName)).size()); + + BackupRequest request = createBackupRequest(BackupType.FULL, List.of(tableName), + BACKUP_ROOT_DIR); + long failingBackupTs = EnvironmentEdgeManager.currentTime(); + String failingBackupId = BackupRestoreConstants.BACKUPID_PREFIX + failingBackupTs; + while (failingBackupId.equals(backupId)) { + failingBackupId = BackupRestoreConstants.BACKUPID_PREFIX + (++failingBackupTs); + } + Path targetTableBackupDir = + new Path(BackupUtils.getTableBackupDir(BACKUP_ROOT_DIR, failingBackupId, tableName)); + FileSystem.get(targetTableBackupDir.toUri(), conf1).mkdirs(targetTableBackupDir); + + try (Connection conn = ConnectionFactory.createConnection(conf1)) { + FullTableBackupClient failingClient = + new FullTableBackupClient(conn, failingBackupId, request) { + @Override + protected void completeBackup(final Connection conn, BackupInfo backupInfo, + BackupType type, Configuration conf) throws IOException { + throw new IOException("Injected failure before completing backup"); + } + }; + + assertThrows(IOException.class, failingClient::execute); + } + assertTrue(checkFailed(failingBackupId)); + assertEquals(1, systemTable.readBulkloadRows(List.of(tableName)).size()); + } finally { + TEST_UTIL.deleteTable(tableName); + } + } + private void performBulkLoad(String keyPrefix) throws IOException { FileSystem fs = TEST_UTIL.getTestFileSystem(); Path baseDirectory = TEST_UTIL.getDataTestDirOnTestFS(TEST_NAME);