Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,24 @@
package org.apache.hadoop.hbase.backup;

import java.io.IOException;
import java.util.List;
import java.util.Objects;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
import org.apache.hadoop.hbase.backup.impl.BackupManager;
import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient;
import org.apache.hadoop.hbase.backup.impl.IncrementalBackupsDisallowedException;
import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient;
import org.apache.hadoop.hbase.backup.impl.TableBackupClient;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

@InterfaceAudience.Private
public final class BackupClientFactory {
private static final Logger LOG = LoggerFactory.getLogger(BackupClientFactory.class);

private BackupClientFactory() {
}

Expand All @@ -49,8 +58,45 @@ public static TableBackupClient create(Connection conn, String backupId, BackupR
BackupType type = request.getBackupType();
if (type == BackupType.FULL) {
return new FullTableBackupClient(conn, backupId, request);
} else {
return new IncrementalTableBackupClient(conn, backupId, request);
}

String latestFullBackup = getLatestFullBackupId(conn, request);

try (BackupAdmin admin = new BackupAdminImpl(conn)) {
boolean disallowFurtherIncrementals =
admin.getBackupInfo(latestFullBackup).isDisallowFurtherIncrementals();

if (!disallowFurtherIncrementals) {
return new IncrementalTableBackupClient(conn, backupId, request);
}

if (request.getFailOnDisallowedIncrementals()) {
throw new IncrementalBackupsDisallowedException(request);
}

LOG.info("Incremental backups disallowed for backupId {}, creating a full backup",
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: it might be unclear what the backupId refers to.

latestFullBackup);
return new FullTableBackupClient(conn, backupId, request);
}
}

private static String getLatestFullBackupId(Connection conn, BackupRequest request)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The log line above made me realize this approach is wrong.

An incremental backup might build on top of multiple FULL backups: it includes all tables for which a FULL backup exists in the backup root. For example, given tables T1, T2, T3:

  • Timestamp 0: Full backup: T1, T2
  • Timestamp 1: Full backup: T1, T3
  • Timestamp 2: Incr backup: T1, T2, T3

If we were to truncate T2, the full backup at time 0 would get the disallowedIncremental marker.

But this method would only check the full backup at time 1. Correct behavior would be to check all FULL backups (from newest to oldest) up until all tables present in the backup root are covered. TableBackupClient#getAncestors might be useful here.

Realizing the complexity here, we do have 2 options:

  • Current approach - meaning that creating a single FULL backup to T2 would re-enable incremental backups.
  • Somewhat simpler approach, though with more code changes - we could move the "disallow incrementals" to the backuproot-level data (similar to eg backupStartCode). This would mean only a single GET has to be done in order to find out whether or not incrementals are allowed.

throws IOException {
try (BackupManager backupManager = new BackupManager(conn, conn.getConfiguration())) {
// Sorted in desc order by time
List<BackupInfo> backups = backupManager.getBackupHistory(true);

for (BackupInfo info : backups) {
if (
info.getType() == BackupType.FULL
&& Objects.equals(info.getBackupRootDir(), request.getTargetRootDir())
) {
return info.getBackupId();
}
}
}
throw new RuntimeException(
"Could not find a valid full backup for incremental request for tables"
+ request.getTableList());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,8 @@ public enum BackupPhase {
*/
private boolean noChecksumVerify;

private boolean disallowFurtherIncrementals = false;

public BackupInfo() {
backupTableInfoMap = new HashMap<>();
}
Expand Down Expand Up @@ -203,6 +205,14 @@ public void setBandwidth(long bandwidth) {
this.bandwidth = bandwidth;
}

public void setDisallowFurtherIncrementals(boolean disallowFurtherIncrementals) {
this.disallowFurtherIncrementals = disallowFurtherIncrementals;
}

public boolean isDisallowFurtherIncrementals() {
return disallowFurtherIncrementals;
}

public void setNoChecksumVerify(boolean noChecksumVerify) {
this.noChecksumVerify = noChecksumVerify;
}
Expand Down Expand Up @@ -423,6 +433,7 @@ public BackupProtos.BackupInfo toProtosBackupInfo() {
builder.setBackupType(BackupProtos.BackupType.valueOf(getType().name()));
builder.setWorkersNumber(workers);
builder.setBandwidth(bandwidth);
builder.setDisallowFurtherIncrementals(disallowFurtherIncrementals);
return builder.build();
}

Expand Down Expand Up @@ -518,6 +529,7 @@ public static BackupInfo fromProto(BackupProtos.BackupInfo proto) {
context.setType(BackupType.valueOf(proto.getBackupType().name()));
context.setWorkers(proto.getWorkersNumber());
context.setBandwidth(proto.getBandwidth());
context.setDisallowFurtherIncrementals(proto.getDisallowFurtherIncrementals());
return context;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,17 @@ public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
deleteBulkLoads(cfg, tableName, (ignored) -> true);
}

@Override
public void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should add a check whether the backup system is enabled or not:

    if (!BackupManager.isBackupEnabled(cfg)) {
      LOG.debug("Skipping preTruncateTable hook since backup is disabled");
      return;
    }

TableName tableName) throws IOException {
Configuration cfg = ctx.getEnvironment().getConfiguration();
if (!BackupManager.isBackupEnabled(cfg)) {
LOG.debug("Skipping preTruncateTable hook since backup is disabled");
return;
}
disallowIncrementalBackups(ctx.getEnvironment(), tableName);
}

@Override
public void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) throws IOException {
Expand All @@ -76,6 +87,7 @@ public void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
return;
}
deleteBulkLoads(cfg, tableName, (ignored) -> true);
disallowIncrementalBackups(ctx.getEnvironment(), tableName);
}

@Override
Expand Down Expand Up @@ -113,4 +125,20 @@ private void deleteBulkLoads(Configuration config, TableName tableName,
tbl.deleteBulkLoadedRows(rowsToDelete);
}
}

private static void disallowIncrementalBackups(MasterCoprocessorEnvironment env,
TableName tableName) throws IOException {
Configuration conf = env.getConfiguration();
if (tableName.equals(BackupSystemTable.getTableName(conf))) {
return;
}

BackupSystemTable table = new BackupSystemTable(env.getConnection());
try {
table.startBackupExclusiveOperation();
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does it make more sense to include lock acquisition/release inside the disallowFurtherIncremetnals method? That's not the pattern that we follow now, but I wonder if we should to make sure each caller isn't required to take out the lock

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm, do we actually need to use the lock here? Thinking out loud...

Yes: this prevents a truncation happening while a backup is running, which might cause incomplete data to be stored. In that case, it makes sense that deleteBulkLoads is also included in this lock. So the lock should stay outside of disallowFurtherIncremetnals.

No: the lock system seems intended to avoid multiple backup operations from overlapping. Using it here would mean it also impacts a non-backup-related system. Is it really bad if a truncate happens during a backup? How is it different from a bulk-load happening or regular mutations during a backup (which is allowed).

Currently hovering towards that we don't need a lock here.

table.disallowFurtherIncrementals(tableName);
} finally {
table.finishBackupExclusiveOperation();
Copy link
Contributor Author

@hgromer hgromer Apr 7, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Any exceptions will bubble up to the caller. If modern backups are enabled and we fail to invalidate the table ancestry, it will effectively block the truncation. This sounds correct, otherwise we're stuck in a situation where the table has been truncated, and the operator creating a new backup has no way of knowing at backup creation time.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sounds good. And to add to that: if the truncation were to fail at some other point (after we already invalidated the backup), there's no harm done. Only effect is that a full backup will be done where one could do an incremental one.

}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,11 @@ public Builder() {
request = new BackupRequest();
}

public Builder withFailOnDisallowedIncrementals(boolean failOnDisallowedIncrementals) {
request.failOnDisallowedIncrementals = failOnDisallowedIncrementals;
return this;
}

public Builder withBackupType(BackupType type) {
request.setBackupType(type);
return this;
Expand Down Expand Up @@ -89,10 +94,15 @@ public BackupRequest build() {
private boolean noChecksumVerify = false;
private String backupSetName;
private String yarnPoolName;
private boolean failOnDisallowedIncrementals = false;

private BackupRequest() {
}

public boolean getFailOnDisallowedIncrementals() {
return failOnDisallowedIncrementals;
}

private BackupRequest setBackupType(BackupType type) {
this.type = type;
return this;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
import org.apache.hadoop.hbase.backup.BackupHFileCleaner;
import org.apache.hadoop.hbase.backup.BackupInfo;
import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
import org.apache.hadoop.hbase.backup.BackupMasterObserver;
import org.apache.hadoop.hbase.backup.BackupObserver;
import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
import org.apache.hadoop.hbase.backup.BackupType;
Expand Down Expand Up @@ -131,6 +132,10 @@ public static void decorateMasterConfiguration(Configuration conf) {
+ " Added master procedure manager: {}. Added master observer: {}",
cleanerClass, masterProcedureClass, BackupHFileCleaner.class.getName(), observerClass);
}

String observers = conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
(observers == null ? "" : observers + ",") + BackupMasterObserver.class.getName());
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,41 @@ public void close() {
// do nothing
}

/**
* @param toDisallow Any most recent full back containing this table will be marked as disallowing
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This line makes little sense. Typos?

* further incrementals
*/
public void disallowFurtherIncrementals(TableName toDisallow) throws IOException {
List<BackupInfo> fullTableBackups = getCompletedFullBackupsSortedByHistoryDesc();
List<Put> invalidatePuts = new ArrayList<>(fullTableBackups.size());
Set<String> backupRootDirsSeen = new HashSet<>(fullTableBackups.size());

for (BackupInfo backupInfo : fullTableBackups) {
// to minimize the amount of mutations against the backup system table, we only
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We also only need to consider the most recent FULL backup per backup root.

Copy link
Contributor Author

@hgromer hgromer Apr 15, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Makes sense, added an additional check to make sure we aren't adding Puts for older full backups

// need to update the most recent full backups that allow incremental backups
if (
backupInfo.getTables().contains(toDisallow) && backupInfo.getType() == BackupType.FULL
&& !backupInfo.isDisallowFurtherIncrementals()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This check can be moved to an inner check. In case we truncate the same table multiple times in a row, current behavior will keep adjusting older and older backupInfos.

&& !backupRootDirsSeen.contains(backupInfo.getBackupRootDir())
) {
backupInfo.setDisallowFurtherIncrementals(true);
backupRootDirsSeen.add(backupInfo.getBackupRootDir());
invalidatePuts.add(createPutForBackupInfo(backupInfo));
LOG.info("Disallowing incremental backups for backup {} due to table {}",
backupInfo.getBackupId(), toDisallow);
}
}

try (BufferedMutator mutator = connection.getBufferedMutator(tableName)) {
mutator.mutate(invalidatePuts);
}

// Clean up bulkloaded HFiles associated with the table
List<byte[]> bulkloadedRows =
readBulkloadRows(List.of(toDisallow)).stream().map(BulkLoad::getRowKey).toList();
deleteBulkLoadedRows(bulkloadedRows);
}

/**
* Updates status (state) of a backup session in backup system table table
* @param info backup info
Expand Down Expand Up @@ -840,6 +875,24 @@ public ArrayList<BackupInfo> getBackupInfos(BackupState state) throws IOExceptio
}
}

private List<BackupInfo> getCompletedFullBackupsSortedByHistoryDesc() throws IOException {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Better to re-use BackupSystemTable#getBackupInfos.

Note that the backupInfos are sorted naturally due to the scanning order.

Scan scan = createScanForBackupHistory();
List<BackupInfo> backups = new ArrayList<>();

try (Table table = connection.getTable(tableName)) {
ResultScanner scanner = table.getScanner(scan);
Result res;
while ((res = scanner.next()) != null) {
res.advance();
BackupInfo context = cellToBackupInfo(res.current());
if (context.getState() == BackupState.COMPLETE && context.getType() == BackupType.FULL) {
backups.add(context);
}
}
}
return BackupUtils.sortHistoryListDesc(backups);
}

/**
* Write the current timestamps for each regionserver to backup system table after a successful
* full or incremental backup. The saved timestamp is of the last log file that was backed up
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.impl;

import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.backup.BackupRequest;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;

@InterfaceAudience.Public
@InterfaceStability.Evolving
public class IncrementalBackupsDisallowedException extends HBaseIOException {
public IncrementalBackupsDisallowedException(BackupRequest request) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For helping API users deal with this exception, I'd put the table list as an accessible field in the exception.

So a user could do:

try {
  makeHbaseIncrBackup();
} catch (IncrementalBackupsDisallowedException e) {
  makeHbaseFullBackup(e.tables());
  makeHbaseIncrBackup();
}

super("Could not take incremental backup for tables "
+ "%s because is disallowed, please take a full backup instead"
.formatted(request.getTableList()));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -496,7 +496,7 @@ public static String getTableBackupDir(String backupRootDir, String backupId,
* @param historyList history list
* @return sorted list of BackupCompleteData
*/
public static ArrayList<BackupInfo> sortHistoryListDesc(ArrayList<BackupInfo> historyList) {
public static ArrayList<BackupInfo> sortHistoryListDesc(List<BackupInfo> historyList) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this method can just be removed. The list coming from BackupSystemTable#getBackupInfos is already sorted due to the scanning order. (If I'm mistaken here, the javadoc for getBackupInfos should be updated.)

ArrayList<BackupInfo> list = new ArrayList<>();
TreeMap<String, BackupInfo> map = new TreeMap<>();
for (BackupInfo h : historyList) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.backup;

import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
Expand Down Expand Up @@ -53,6 +54,8 @@
import org.junit.Test;
import org.junit.experimental.categories.Category;

import org.apache.hbase.thirdparty.com.google.common.collect.Lists;

/**
* Test cases for backup system table API
*/
Expand Down Expand Up @@ -479,6 +482,49 @@ public void testBackupSetList() throws IOException {
}
}

@Test
public void testDisallowFurtherIncrementals() throws Exception {
try (BackupSystemTable table = new BackupSystemTable(conn)) {
TableName toInvalidate = TableName.valueOf("t1");
List<TableName> t1 = Lists.newArrayList(toInvalidate, TableName.valueOf("t2"));
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Your naming of the lists is a bit confusing. Suggestion: t1_t2, t1_t3, ...

List<TableName> t2 = Lists.newArrayList(toInvalidate, TableName.valueOf("t3"));
List<TableName> t3 = Lists.newArrayList(TableName.valueOf("t2"), TableName.valueOf("t3"));

BackupInfo backup = createBackupInfo();
backup.setState(BackupState.COMPLETE);

backup.setTables(t1);
backup.setBackupId("backup1");
backup.setBackupRootDir("backup1");
backup.setStartTs(0L);
table.updateBackupInfo(backup);

backup.setTables(t2);
backup.setBackupId("backup2");
backup.setBackupRootDir("backup2");
backup.setStartTs(1L);
table.updateBackupInfo(backup);

backup.setTables(t3);
backup.setBackupId("backup3");
backup.setBackupRootDir("backup2");
backup.setStartTs(2L);
table.updateBackupInfo(backup);

table.disallowFurtherIncrementals(toInvalidate);
BackupInfo result = table.readBackupInfo("backup1");
assertTrue(result.isDisallowFurtherIncrementals());

table.disallowFurtherIncrementals(toInvalidate);
result = table.readBackupInfo("backup2");
assertTrue(result.isDisallowFurtherIncrementals());

table.disallowFurtherIncrementals(toInvalidate);
result = table.readBackupInfo("backup3");
assertFalse(result.isDisallowFurtherIncrementals());
}
}

private boolean compare(BackupInfo one, BackupInfo two) {
return one.getBackupId().equals(two.getBackupId()) && one.getType().equals(two.getType())
&& one.getBackupRootDir().equals(two.getBackupRootDir())
Expand Down
Loading