-
Notifications
You must be signed in to change notification settings - Fork 3.4k
HBASE-29240: Backup ancestry trees should have the ability to be invalidated #6891
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -18,15 +18,24 @@ | |
| package org.apache.hadoop.hbase.backup; | ||
|
|
||
| import java.io.IOException; | ||
| import java.util.List; | ||
| import java.util.Objects; | ||
| import org.apache.hadoop.conf.Configuration; | ||
| import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; | ||
| import org.apache.hadoop.hbase.backup.impl.BackupManager; | ||
| import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient; | ||
| import org.apache.hadoop.hbase.backup.impl.IncrementalBackupsDisallowedException; | ||
| import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient; | ||
| import org.apache.hadoop.hbase.backup.impl.TableBackupClient; | ||
| import org.apache.hadoop.hbase.client.Connection; | ||
| import org.apache.yetus.audience.InterfaceAudience; | ||
| import org.slf4j.Logger; | ||
| import org.slf4j.LoggerFactory; | ||
|
|
||
| @InterfaceAudience.Private | ||
| public final class BackupClientFactory { | ||
| private static final Logger LOG = LoggerFactory.getLogger(BackupClientFactory.class); | ||
|
|
||
| private BackupClientFactory() { | ||
| } | ||
|
|
||
|
|
@@ -49,8 +58,45 @@ public static TableBackupClient create(Connection conn, String backupId, BackupR | |
| BackupType type = request.getBackupType(); | ||
| if (type == BackupType.FULL) { | ||
| return new FullTableBackupClient(conn, backupId, request); | ||
| } else { | ||
| return new IncrementalTableBackupClient(conn, backupId, request); | ||
| } | ||
|
|
||
| String latestFullBackup = getLatestFullBackupId(conn, request); | ||
|
|
||
| try (BackupAdmin admin = new BackupAdminImpl(conn)) { | ||
| boolean disallowFurtherIncrementals = | ||
| admin.getBackupInfo(latestFullBackup).isDisallowFurtherIncrementals(); | ||
|
|
||
| if (!disallowFurtherIncrementals) { | ||
| return new IncrementalTableBackupClient(conn, backupId, request); | ||
| } | ||
|
|
||
| if (request.getFailOnDisallowedIncrementals()) { | ||
| throw new IncrementalBackupsDisallowedException(request); | ||
| } | ||
|
|
||
| LOG.info("Incremental backups disallowed for backupId {}, creating a full backup", | ||
| latestFullBackup); | ||
| return new FullTableBackupClient(conn, backupId, request); | ||
| } | ||
| } | ||
|
|
||
| private static String getLatestFullBackupId(Connection conn, BackupRequest request) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The log line above made me realize this approach is wrong. An incremental backup might build on top of multiple FULL backups: it includes all tables for which a FULL backup exists in the backup root. For example, given tables T1, T2, T3:
If we were to truncate T2, the full backup at time 0 would get the disallowedIncremental marker. But this method would only check the full backup at time 1. Correct behavior would be to check all FULL backups (from newest to oldest) up until all tables present in the backup root are covered. Realizing the complexity here, we do have 2 options:
|
||
| throws IOException { | ||
| try (BackupManager backupManager = new BackupManager(conn, conn.getConfiguration())) { | ||
| // Sorted in desc order by time | ||
| List<BackupInfo> backups = backupManager.getBackupHistory(true); | ||
|
|
||
| for (BackupInfo info : backups) { | ||
| if ( | ||
| info.getType() == BackupType.FULL | ||
| && Objects.equals(info.getBackupRootDir(), request.getTargetRootDir()) | ||
| ) { | ||
| return info.getBackupId(); | ||
| } | ||
| } | ||
| } | ||
| throw new RuntimeException( | ||
| "Could not find a valid full backup for incremental request for tables" | ||
| + request.getTableList()); | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -67,6 +67,17 @@ public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx, | |
| deleteBulkLoads(cfg, tableName, (ignored) -> true); | ||
| } | ||
|
|
||
| @Override | ||
| public void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx, | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should add a check whether the backup system is enabled or not: |
||
| TableName tableName) throws IOException { | ||
| Configuration cfg = ctx.getEnvironment().getConfiguration(); | ||
| if (!BackupManager.isBackupEnabled(cfg)) { | ||
| LOG.debug("Skipping preTruncateTable hook since backup is disabled"); | ||
| return; | ||
| } | ||
| disallowIncrementalBackups(ctx.getEnvironment(), tableName); | ||
| } | ||
|
|
||
| @Override | ||
| public void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx, | ||
| TableName tableName) throws IOException { | ||
|
|
@@ -76,6 +87,7 @@ public void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx, | |
| return; | ||
| } | ||
| deleteBulkLoads(cfg, tableName, (ignored) -> true); | ||
| disallowIncrementalBackups(ctx.getEnvironment(), tableName); | ||
| } | ||
|
|
||
| @Override | ||
|
|
@@ -113,4 +125,20 @@ private void deleteBulkLoads(Configuration config, TableName tableName, | |
| tbl.deleteBulkLoadedRows(rowsToDelete); | ||
| } | ||
| } | ||
|
|
||
| private static void disallowIncrementalBackups(MasterCoprocessorEnvironment env, | ||
| TableName tableName) throws IOException { | ||
| Configuration conf = env.getConfiguration(); | ||
| if (tableName.equals(BackupSystemTable.getTableName(conf))) { | ||
| return; | ||
| } | ||
|
|
||
| BackupSystemTable table = new BackupSystemTable(env.getConnection()); | ||
| try { | ||
| table.startBackupExclusiveOperation(); | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Does it make more sense to include lock acquisition/release inside the
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hmm, do we actually need to use the lock here? Thinking out loud... Yes: this prevents a truncation happening while a backup is running, which might cause incomplete data to be stored. In that case, it makes sense that No: the lock system seems intended to avoid multiple backup operations from overlapping. Using it here would mean it also impacts a non-backup-related system. Is it really bad if a truncate happens during a backup? How is it different from a bulk-load happening or regular mutations during a backup (which is allowed). Currently hovering towards that we don't need a lock here. |
||
| table.disallowFurtherIncrementals(tableName); | ||
| } finally { | ||
| table.finishBackupExclusiveOperation(); | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Any exceptions will bubble up to the caller. If modern backups are enabled and we fail to invalidate the table ancestry, it will effectively block the truncation. This sounds correct, otherwise we're stuck in a situation where the table has been truncated, and the operator creating a new backup has no way of knowing at backup creation time.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sounds good. And to add to that: if the truncation were to fail at some other point (after we already invalidated the backup), there's no harm done. Only effect is that a full backup will be done where one could do an incremental one. |
||
| } | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -277,6 +277,41 @@ public void close() { | |
| // do nothing | ||
| } | ||
|
|
||
| /** | ||
| * @param toDisallow Any most recent full back containing this table will be marked as disallowing | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This line makes little sense. Typos? |
||
| * further incrementals | ||
| */ | ||
| public void disallowFurtherIncrementals(TableName toDisallow) throws IOException { | ||
| List<BackupInfo> fullTableBackups = getCompletedFullBackupsSortedByHistoryDesc(); | ||
| List<Put> invalidatePuts = new ArrayList<>(fullTableBackups.size()); | ||
| Set<String> backupRootDirsSeen = new HashSet<>(fullTableBackups.size()); | ||
|
|
||
| for (BackupInfo backupInfo : fullTableBackups) { | ||
| // to minimize the amount of mutations against the backup system table, we only | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We also only need to consider the most recent FULL backup per backup root.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Makes sense, added an additional check to make sure we aren't adding Puts for older full backups |
||
| // need to update the most recent full backups that allow incremental backups | ||
| if ( | ||
| backupInfo.getTables().contains(toDisallow) && backupInfo.getType() == BackupType.FULL | ||
| && !backupInfo.isDisallowFurtherIncrementals() | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This check can be moved to an inner check. In case we truncate the same table multiple times in a row, current behavior will keep adjusting older and older backupInfos. |
||
| && !backupRootDirsSeen.contains(backupInfo.getBackupRootDir()) | ||
| ) { | ||
| backupInfo.setDisallowFurtherIncrementals(true); | ||
| backupRootDirsSeen.add(backupInfo.getBackupRootDir()); | ||
| invalidatePuts.add(createPutForBackupInfo(backupInfo)); | ||
| LOG.info("Disallowing incremental backups for backup {} due to table {}", | ||
| backupInfo.getBackupId(), toDisallow); | ||
| } | ||
| } | ||
|
|
||
| try (BufferedMutator mutator = connection.getBufferedMutator(tableName)) { | ||
| mutator.mutate(invalidatePuts); | ||
| } | ||
|
|
||
| // Clean up bulkloaded HFiles associated with the table | ||
| List<byte[]> bulkloadedRows = | ||
| readBulkloadRows(List.of(toDisallow)).stream().map(BulkLoad::getRowKey).toList(); | ||
| deleteBulkLoadedRows(bulkloadedRows); | ||
| } | ||
|
|
||
| /** | ||
| * Updates status (state) of a backup session in backup system table table | ||
| * @param info backup info | ||
|
|
@@ -840,6 +875,24 @@ public ArrayList<BackupInfo> getBackupInfos(BackupState state) throws IOExceptio | |
| } | ||
| } | ||
|
|
||
| private List<BackupInfo> getCompletedFullBackupsSortedByHistoryDesc() throws IOException { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Better to re-use Note that the backupInfos are sorted naturally due to the scanning order. |
||
| Scan scan = createScanForBackupHistory(); | ||
| List<BackupInfo> backups = new ArrayList<>(); | ||
|
|
||
| try (Table table = connection.getTable(tableName)) { | ||
| ResultScanner scanner = table.getScanner(scan); | ||
| Result res; | ||
| while ((res = scanner.next()) != null) { | ||
| res.advance(); | ||
| BackupInfo context = cellToBackupInfo(res.current()); | ||
| if (context.getState() == BackupState.COMPLETE && context.getType() == BackupType.FULL) { | ||
| backups.add(context); | ||
| } | ||
| } | ||
| } | ||
| return BackupUtils.sortHistoryListDesc(backups); | ||
| } | ||
|
|
||
| /** | ||
| * Write the current timestamps for each regionserver to backup system table after a successful | ||
| * full or incremental backup. The saved timestamp is of the last log file that was backed up | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,33 @@ | ||
| /* | ||
| * Licensed to the Apache Software Foundation (ASF) under one | ||
| * or more contributor license agreements. See the NOTICE file | ||
| * distributed with this work for additional information | ||
| * regarding copyright ownership. The ASF licenses this file | ||
| * to you under the Apache License, Version 2.0 (the | ||
| * "License"); you may not use this file except in compliance | ||
| * with the License. You may obtain a copy of the License at | ||
| * | ||
| * http://www.apache.org/licenses/LICENSE-2.0 | ||
| * | ||
| * Unless required by applicable law or agreed to in writing, software | ||
| * distributed under the License is distributed on an "AS IS" BASIS, | ||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| * See the License for the specific language governing permissions and | ||
| * limitations under the License. | ||
| */ | ||
| package org.apache.hadoop.hbase.backup.impl; | ||
|
|
||
| import org.apache.hadoop.hbase.HBaseIOException; | ||
| import org.apache.hadoop.hbase.backup.BackupRequest; | ||
| import org.apache.yetus.audience.InterfaceAudience; | ||
| import org.apache.yetus.audience.InterfaceStability; | ||
|
|
||
| @InterfaceAudience.Public | ||
| @InterfaceStability.Evolving | ||
| public class IncrementalBackupsDisallowedException extends HBaseIOException { | ||
| public IncrementalBackupsDisallowedException(BackupRequest request) { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For helping API users deal with this exception, I'd put the table list as an accessible field in the exception. So a user could do: |
||
| super("Could not take incremental backup for tables " | ||
| + "%s because is disallowed, please take a full backup instead" | ||
| .formatted(request.getTableList())); | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -496,7 +496,7 @@ public static String getTableBackupDir(String backupRootDir, String backupId, | |
| * @param historyList history list | ||
| * @return sorted list of BackupCompleteData | ||
| */ | ||
| public static ArrayList<BackupInfo> sortHistoryListDesc(ArrayList<BackupInfo> historyList) { | ||
| public static ArrayList<BackupInfo> sortHistoryListDesc(List<BackupInfo> historyList) { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think this method can just be removed. The list coming from |
||
| ArrayList<BackupInfo> list = new ArrayList<>(); | ||
| TreeMap<String, BackupInfo> map = new TreeMap<>(); | ||
| for (BackupInfo h : historyList) { | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -18,6 +18,7 @@ | |
| package org.apache.hadoop.hbase.backup; | ||
|
|
||
| import static org.junit.Assert.assertEquals; | ||
| import static org.junit.Assert.assertFalse; | ||
| import static org.junit.Assert.assertNotNull; | ||
| import static org.junit.Assert.assertNull; | ||
| import static org.junit.Assert.assertTrue; | ||
|
|
@@ -53,6 +54,8 @@ | |
| import org.junit.Test; | ||
| import org.junit.experimental.categories.Category; | ||
|
|
||
| import org.apache.hbase.thirdparty.com.google.common.collect.Lists; | ||
|
|
||
| /** | ||
| * Test cases for backup system table API | ||
| */ | ||
|
|
@@ -479,6 +482,49 @@ public void testBackupSetList() throws IOException { | |
| } | ||
| } | ||
|
|
||
| @Test | ||
| public void testDisallowFurtherIncrementals() throws Exception { | ||
| try (BackupSystemTable table = new BackupSystemTable(conn)) { | ||
| TableName toInvalidate = TableName.valueOf("t1"); | ||
| List<TableName> t1 = Lists.newArrayList(toInvalidate, TableName.valueOf("t2")); | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Your naming of the lists is a bit confusing. Suggestion: |
||
| List<TableName> t2 = Lists.newArrayList(toInvalidate, TableName.valueOf("t3")); | ||
| List<TableName> t3 = Lists.newArrayList(TableName.valueOf("t2"), TableName.valueOf("t3")); | ||
|
|
||
| BackupInfo backup = createBackupInfo(); | ||
| backup.setState(BackupState.COMPLETE); | ||
|
|
||
| backup.setTables(t1); | ||
| backup.setBackupId("backup1"); | ||
| backup.setBackupRootDir("backup1"); | ||
| backup.setStartTs(0L); | ||
| table.updateBackupInfo(backup); | ||
|
|
||
| backup.setTables(t2); | ||
| backup.setBackupId("backup2"); | ||
| backup.setBackupRootDir("backup2"); | ||
| backup.setStartTs(1L); | ||
| table.updateBackupInfo(backup); | ||
|
|
||
| backup.setTables(t3); | ||
| backup.setBackupId("backup3"); | ||
| backup.setBackupRootDir("backup2"); | ||
| backup.setStartTs(2L); | ||
| table.updateBackupInfo(backup); | ||
|
|
||
| table.disallowFurtherIncrementals(toInvalidate); | ||
| BackupInfo result = table.readBackupInfo("backup1"); | ||
| assertTrue(result.isDisallowFurtherIncrementals()); | ||
|
|
||
| table.disallowFurtherIncrementals(toInvalidate); | ||
| result = table.readBackupInfo("backup2"); | ||
| assertTrue(result.isDisallowFurtherIncrementals()); | ||
|
|
||
| table.disallowFurtherIncrementals(toInvalidate); | ||
| result = table.readBackupInfo("backup3"); | ||
| assertFalse(result.isDisallowFurtherIncrementals()); | ||
| } | ||
| } | ||
|
|
||
| private boolean compare(BackupInfo one, BackupInfo two) { | ||
| return one.getBackupId().equals(two.getBackupId()) && one.getType().equals(two.getType()) | ||
| && one.getBackupRootDir().equals(two.getBackupRootDir()) | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Nit: it might be unclear what the
backupIdrefers to.