Skip to content

Commit a9d1c2a

Browse files
author
Hernan Gelaf-Romer
committed
HBASE-29240: Backup ancestry trees should have the ability to be invalidated
1 parent 65a6d8a commit a9d1c2a

File tree

11 files changed

+344
-4
lines changed

11 files changed

+344
-4
lines changed

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java

Lines changed: 48 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,15 +18,24 @@
1818
package org.apache.hadoop.hbase.backup;
1919

2020
import java.io.IOException;
21+
import java.util.List;
22+
import java.util.Objects;
2123
import org.apache.hadoop.conf.Configuration;
24+
import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
25+
import org.apache.hadoop.hbase.backup.impl.BackupManager;
2226
import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient;
27+
import org.apache.hadoop.hbase.backup.impl.IncrementalBackupsDisallowedException;
2328
import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient;
2429
import org.apache.hadoop.hbase.backup.impl.TableBackupClient;
2530
import org.apache.hadoop.hbase.client.Connection;
2631
import org.apache.yetus.audience.InterfaceAudience;
32+
import org.slf4j.Logger;
33+
import org.slf4j.LoggerFactory;
2734

2835
@InterfaceAudience.Private
2936
public final class BackupClientFactory {
37+
private static final Logger LOG = LoggerFactory.getLogger(BackupClientFactory.class);
38+
3039
private BackupClientFactory() {
3140
}
3241

@@ -49,8 +58,45 @@ public static TableBackupClient create(Connection conn, String backupId, BackupR
4958
BackupType type = request.getBackupType();
5059
if (type == BackupType.FULL) {
5160
return new FullTableBackupClient(conn, backupId, request);
52-
} else {
53-
return new IncrementalTableBackupClient(conn, backupId, request);
5461
}
62+
63+
String latestFullBackup = getLatestFullBackupId(conn, request);
64+
65+
try (BackupAdmin admin = new BackupAdminImpl(conn)) {
66+
boolean disallowFurtherIncrementals =
67+
admin.getBackupInfo(latestFullBackup).isDisallowFurtherIncrementals();
68+
69+
if (!disallowFurtherIncrementals) {
70+
return new IncrementalTableBackupClient(conn, backupId, request);
71+
}
72+
73+
if (request.getFailOnDisallowedIncrementals()) {
74+
throw new IncrementalBackupsDisallowedException(request);
75+
}
76+
77+
LOG.info("Incremental backups disallowed for backupId {}, creating a full backup",
78+
latestFullBackup);
79+
return new FullTableBackupClient(conn, backupId, request);
80+
}
81+
}
82+
83+
private static String getLatestFullBackupId(Connection conn, BackupRequest request)
84+
throws IOException {
85+
try (BackupManager backupManager = new BackupManager(conn, conn.getConfiguration())) {
86+
// Sorted in desc order by time
87+
List<BackupInfo> backups = backupManager.getBackupHistory(true);
88+
89+
for (BackupInfo info : backups) {
90+
if (
91+
info.getType() == BackupType.FULL
92+
&& Objects.equals(info.getBackupRootDir(), request.getTargetRootDir())
93+
) {
94+
return info.getBackupId();
95+
}
96+
}
97+
}
98+
throw new RuntimeException(
99+
"Could not find a valid full backup for incremental request for tables"
100+
+ request.getTableList());
55101
}
56102
}

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,8 @@ public enum BackupPhase {
170170
*/
171171
private boolean noChecksumVerify;
172172

173+
private boolean disallowFurtherIncrementals = false;
174+
173175
public BackupInfo() {
174176
backupTableInfoMap = new HashMap<>();
175177
}
@@ -203,6 +205,14 @@ public void setBandwidth(long bandwidth) {
203205
this.bandwidth = bandwidth;
204206
}
205207

208+
public void setDisallowFurtherIncrementals(boolean disallowFurtherIncrementals) {
209+
this.disallowFurtherIncrementals = disallowFurtherIncrementals;
210+
}
211+
212+
public boolean isDisallowFurtherIncrementals() {
213+
return disallowFurtherIncrementals;
214+
}
215+
206216
public void setNoChecksumVerify(boolean noChecksumVerify) {
207217
this.noChecksumVerify = noChecksumVerify;
208218
}
@@ -423,6 +433,7 @@ public BackupProtos.BackupInfo toProtosBackupInfo() {
423433
builder.setBackupType(BackupProtos.BackupType.valueOf(getType().name()));
424434
builder.setWorkersNumber(workers);
425435
builder.setBandwidth(bandwidth);
436+
builder.setDisallowFurtherIncrementals(disallowFurtherIncrementals);
426437
return builder.build();
427438
}
428439

@@ -518,6 +529,7 @@ public static BackupInfo fromProto(BackupProtos.BackupInfo proto) {
518529
context.setType(BackupType.valueOf(proto.getBackupType().name()));
519530
context.setWorkers(proto.getWorkersNumber());
520531
context.setBandwidth(proto.getBandwidth());
532+
context.setDisallowFurtherIncrementals(proto.getDisallowFurtherIncrementals());
521533
return context;
522534
}
523535

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMasterObserver.java

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@
4141
import org.apache.yetus.audience.InterfaceAudience;
4242
import org.slf4j.Logger;
4343
import org.slf4j.LoggerFactory;
44-
4544
import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
4645

4746
/**
@@ -67,6 +66,17 @@ public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
6766
deleteBulkLoads(cfg, tableName, (ignored) -> true);
6867
}
6968

69+
@Override
70+
public void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
71+
TableName tableName) throws IOException {
72+
Configuration cfg = ctx.getEnvironment().getConfiguration();
73+
if (!BackupManager.isBackupEnabled(cfg)) {
74+
LOG.debug("Skipping preTruncateTable hook since backup is disabled");
75+
return;
76+
}
77+
disallowIncrementalBackups(ctx.getEnvironment(), tableName);
78+
}
79+
7080
@Override
7181
public void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
7282
TableName tableName) throws IOException {
@@ -76,6 +86,7 @@ public void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
7686
return;
7787
}
7888
deleteBulkLoads(cfg, tableName, (ignored) -> true);
89+
disallowIncrementalBackups(ctx.getEnvironment(), tableName);
7990
}
8091

8192
@Override
@@ -113,4 +124,20 @@ private void deleteBulkLoads(Configuration config, TableName tableName,
113124
tbl.deleteBulkLoadedRows(rowsToDelete);
114125
}
115126
}
127+
128+
private static void disallowIncrementalBackups(MasterCoprocessorEnvironment env,
129+
TableName tableName) throws IOException {
130+
Configuration conf = env.getConfiguration();
131+
if (tableName.equals(BackupSystemTable.getTableName(conf))) {
132+
return;
133+
}
134+
135+
BackupSystemTable table = new BackupSystemTable(env.getConnection());
136+
try {
137+
table.startBackupExclusiveOperation();
138+
table.disallowFurtherIncrementals(tableName);
139+
} finally {
140+
table.finishBackupExclusiveOperation();
141+
}
142+
}
116143
}

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,11 @@ public Builder() {
3535
request = new BackupRequest();
3636
}
3737

38+
public Builder withFailOnDisallowedIncrementals(boolean failOnDisallowedIncrementals) {
39+
request.failOnDisallowedIncrementals = failOnDisallowedIncrementals;
40+
return this;
41+
}
42+
3843
public Builder withBackupType(BackupType type) {
3944
request.setBackupType(type);
4045
return this;
@@ -89,10 +94,15 @@ public BackupRequest build() {
8994
private boolean noChecksumVerify = false;
9095
private String backupSetName;
9196
private String yarnPoolName;
97+
private boolean failOnDisallowedIncrementals = false;
9298

9399
private BackupRequest() {
94100
}
95101

102+
public boolean getFailOnDisallowedIncrementals() {
103+
return failOnDisallowedIncrementals;
104+
}
105+
96106
private BackupRequest setBackupType(BackupType type) {
97107
this.type = type;
98108
return this;

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
import org.apache.hadoop.hbase.backup.BackupHFileCleaner;
3131
import org.apache.hadoop.hbase.backup.BackupInfo;
3232
import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
33+
import org.apache.hadoop.hbase.backup.BackupMasterObserver;
3334
import org.apache.hadoop.hbase.backup.BackupObserver;
3435
import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
3536
import org.apache.hadoop.hbase.backup.BackupType;
@@ -131,6 +132,10 @@ public static void decorateMasterConfiguration(Configuration conf) {
131132
+ " Added master procedure manager: {}. Added master observer: {}",
132133
cleanerClass, masterProcedureClass, BackupHFileCleaner.class.getName(), observerClass);
133134
}
135+
136+
String observers = conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
137+
conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
138+
(observers == null ? "" : observers + ",") + BackupMasterObserver.class.getName());
134139
}
135140

136141
/**

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -277,6 +277,41 @@ public void close() {
277277
// do nothing
278278
}
279279

280+
/**
281+
* @param toDisallow Any most recent full back containing this table will be marked as disallowing
282+
* further incrementals
283+
*/
284+
public void disallowFurtherIncrementals(TableName toDisallow) throws IOException {
285+
List<BackupInfo> fullTableBackups = getCompletedFullBackupsSortedByHistoryDesc();
286+
List<Put> invalidatePuts = new ArrayList<>(fullTableBackups.size());
287+
Set<String> backupRootDirsSeen = new HashSet<>(fullTableBackups.size());
288+
289+
for (BackupInfo backupInfo : fullTableBackups) {
290+
// to minimize the amount of mutations against the backup system table, we only
291+
// need to update the most recent full backups that allow incremental backups
292+
if (
293+
backupInfo.getTables().contains(toDisallow) && backupInfo.getType() == BackupType.FULL
294+
&& !backupInfo.isDisallowFurtherIncrementals()
295+
&& !backupRootDirsSeen.contains(backupInfo.getBackupRootDir())
296+
) {
297+
backupInfo.setDisallowFurtherIncrementals(true);
298+
backupRootDirsSeen.add(backupInfo.getBackupRootDir());
299+
invalidatePuts.add(createPutForBackupInfo(backupInfo));
300+
LOG.info("Disallowing incremental backups for backup {} due to table {}",
301+
backupInfo.getBackupId(), toDisallow);
302+
}
303+
}
304+
305+
try (BufferedMutator mutator = connection.getBufferedMutator(tableName)) {
306+
mutator.mutate(invalidatePuts);
307+
}
308+
309+
// Clean up bulkloaded HFiles associated with the table
310+
List<byte[]> bulkloadedRows =
311+
readBulkloadRows(List.of(toDisallow)).stream().map(BulkLoad::getRowKey).toList();
312+
deleteBulkLoadedRows(bulkloadedRows);
313+
}
314+
280315
/**
281316
* Updates status (state) of a backup session in backup system table table
282317
* @param info backup info
@@ -840,6 +875,24 @@ public ArrayList<BackupInfo> getBackupInfos(BackupState state) throws IOExceptio
840875
}
841876
}
842877

878+
private List<BackupInfo> getCompletedFullBackupsSortedByHistoryDesc() throws IOException {
879+
Scan scan = createScanForBackupHistory();
880+
List<BackupInfo> backups = new ArrayList<>();
881+
882+
try (Table table = connection.getTable(tableName)) {
883+
ResultScanner scanner = table.getScanner(scan);
884+
Result res;
885+
while ((res = scanner.next()) != null) {
886+
res.advance();
887+
BackupInfo context = cellToBackupInfo(res.current());
888+
if (context.getState() == BackupState.COMPLETE && context.getType() == BackupType.FULL) {
889+
backups.add(context);
890+
}
891+
}
892+
}
893+
return BackupUtils.sortHistoryListDesc(backups);
894+
}
895+
843896
/**
844897
* Write the current timestamps for each regionserver to backup system table after a successful
845898
* full or incremental backup. The saved timestamp is of the last log file that was backed up
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hbase.backup.impl;
19+
20+
import org.apache.hadoop.hbase.HBaseIOException;
21+
import org.apache.hadoop.hbase.backup.BackupRequest;
22+
import org.apache.yetus.audience.InterfaceAudience;
23+
import org.apache.yetus.audience.InterfaceStability;
24+
25+
@InterfaceAudience.Public
26+
@InterfaceStability.Evolving
27+
public class IncrementalBackupsDisallowedException extends HBaseIOException {
28+
public IncrementalBackupsDisallowedException(BackupRequest request) {
29+
super("Could not take incremental backup for tables "
30+
+ "%s because is disallowed, please take a full backup instead"
31+
.formatted(request.getTableList()));
32+
}
33+
}

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -496,7 +496,7 @@ public static String getTableBackupDir(String backupRootDir, String backupId,
496496
* @param historyList history list
497497
* @return sorted list of BackupCompleteData
498498
*/
499-
public static ArrayList<BackupInfo> sortHistoryListDesc(ArrayList<BackupInfo> historyList) {
499+
public static ArrayList<BackupInfo> sortHistoryListDesc(List<BackupInfo> historyList) {
500500
ArrayList<BackupInfo> list = new ArrayList<>();
501501
TreeMap<String, BackupInfo> map = new TreeMap<>();
502502
for (BackupInfo h : historyList) {

hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
package org.apache.hadoop.hbase.backup;
1919

2020
import static org.junit.Assert.assertEquals;
21+
import static org.junit.Assert.assertFalse;
2122
import static org.junit.Assert.assertNotNull;
2223
import static org.junit.Assert.assertNull;
2324
import static org.junit.Assert.assertTrue;
@@ -53,6 +54,8 @@
5354
import org.junit.Test;
5455
import org.junit.experimental.categories.Category;
5556

57+
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
58+
5659
/**
5760
* Test cases for backup system table API
5861
*/
@@ -479,6 +482,49 @@ public void testBackupSetList() throws IOException {
479482
}
480483
}
481484

485+
@Test
486+
public void testDisallowFurtherIncrementals() throws Exception {
487+
try (BackupSystemTable table = new BackupSystemTable(conn)) {
488+
TableName toInvalidate = TableName.valueOf("t1");
489+
List<TableName> t1 = Lists.newArrayList(toInvalidate, TableName.valueOf("t2"));
490+
List<TableName> t2 = Lists.newArrayList(toInvalidate, TableName.valueOf("t3"));
491+
List<TableName> t3 = Lists.newArrayList(TableName.valueOf("t2"), TableName.valueOf("t3"));
492+
493+
BackupInfo backup = createBackupInfo();
494+
backup.setState(BackupState.COMPLETE);
495+
496+
backup.setTables(t1);
497+
backup.setBackupId("backup1");
498+
backup.setBackupRootDir("backup1");
499+
backup.setStartTs(0L);
500+
table.updateBackupInfo(backup);
501+
502+
backup.setTables(t2);
503+
backup.setBackupId("backup2");
504+
backup.setBackupRootDir("backup2");
505+
backup.setStartTs(1L);
506+
table.updateBackupInfo(backup);
507+
508+
backup.setTables(t3);
509+
backup.setBackupId("backup3");
510+
backup.setBackupRootDir("backup2");
511+
backup.setStartTs(2L);
512+
table.updateBackupInfo(backup);
513+
514+
table.disallowFurtherIncrementals(toInvalidate);
515+
BackupInfo result = table.readBackupInfo("backup1");
516+
assertTrue(result.isDisallowFurtherIncrementals());
517+
518+
table.disallowFurtherIncrementals(toInvalidate);
519+
result = table.readBackupInfo("backup2");
520+
assertTrue(result.isDisallowFurtherIncrementals());
521+
522+
table.disallowFurtherIncrementals(toInvalidate);
523+
result = table.readBackupInfo("backup3");
524+
assertFalse(result.isDisallowFurtherIncrementals());
525+
}
526+
}
527+
482528
private boolean compare(BackupInfo one, BackupInfo two) {
483529
return one.getBackupId().equals(two.getBackupId()) && one.getType().equals(two.getType())
484530
&& one.getBackupRootDir().equals(two.getBackupRootDir())

0 commit comments

Comments
 (0)