Skip to content

Commit 6ce9fd1

Browse files
author
Hernan Gelaf-Romer
committed
HBASE-29240: Backup ancestry trees should have the ability to be invalidated
1 parent 78f0101 commit 6ce9fd1

File tree

11 files changed

+371
-3
lines changed

11 files changed

+371
-3
lines changed

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java

Lines changed: 48 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,15 +18,24 @@
1818
package org.apache.hadoop.hbase.backup;
1919

2020
import java.io.IOException;
21+
import java.util.List;
22+
import java.util.Objects;
2123
import org.apache.hadoop.conf.Configuration;
24+
import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
25+
import org.apache.hadoop.hbase.backup.impl.BackupManager;
2226
import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient;
27+
import org.apache.hadoop.hbase.backup.impl.IncrementalBackupsDisallowedException;
2328
import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient;
2429
import org.apache.hadoop.hbase.backup.impl.TableBackupClient;
2530
import org.apache.hadoop.hbase.client.Connection;
2631
import org.apache.yetus.audience.InterfaceAudience;
32+
import org.slf4j.Logger;
33+
import org.slf4j.LoggerFactory;
2734

2835
@InterfaceAudience.Private
2936
public final class BackupClientFactory {
37+
private static final Logger LOG = LoggerFactory.getLogger(BackupClientFactory.class);
38+
3039
private BackupClientFactory() {
3140
}
3241

@@ -49,8 +58,45 @@ public static TableBackupClient create(Connection conn, String backupId, BackupR
4958
BackupType type = request.getBackupType();
5059
if (type == BackupType.FULL) {
5160
return new FullTableBackupClient(conn, backupId, request);
52-
} else {
53-
return new IncrementalTableBackupClient(conn, backupId, request);
5461
}
62+
63+
String latestFullBackup = getLatestFullBackupId(conn, request);
64+
65+
try (BackupAdmin admin = new BackupAdminImpl(conn)) {
66+
boolean disallowFurtherIncrementals =
67+
admin.getBackupInfo(latestFullBackup).isDisallowFurtherIncrementals();
68+
69+
if (!disallowFurtherIncrementals) {
70+
return new IncrementalTableBackupClient(conn, backupId, request);
71+
}
72+
73+
if (request.getFailOnDisallowedIncrementals()) {
74+
throw new IncrementalBackupsDisallowedException(request);
75+
}
76+
77+
LOG.info("Incremental backups disallowed for backupId {}, creating a full backup",
78+
latestFullBackup);
79+
return new FullTableBackupClient(conn, backupId, request);
80+
}
81+
}
82+
83+
private static String getLatestFullBackupId(Connection conn, BackupRequest request)
84+
throws IOException {
85+
try (BackupManager backupManager = new BackupManager(conn, conn.getConfiguration())) {
86+
// Sorted in desc order by time
87+
List<BackupInfo> backups = backupManager.getBackupHistory(true);
88+
89+
for (BackupInfo info : backups) {
90+
if (
91+
info.getType() == BackupType.FULL
92+
&& Objects.equals(info.getBackupRootDir(), request.getTargetRootDir())
93+
) {
94+
return info.getBackupId();
95+
}
96+
}
97+
}
98+
throw new RuntimeException(
99+
"Could not find a valid full backup for incremental request for tables"
100+
+ request.getTableList());
55101
}
56102
}

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,8 @@ public enum BackupPhase {
170170
*/
171171
private boolean noChecksumVerify;
172172

173+
private boolean disallowFurtherIncrementals = false;
174+
173175
public BackupInfo() {
174176
backupTableInfoMap = new HashMap<>();
175177
}
@@ -203,6 +205,14 @@ public void setBandwidth(long bandwidth) {
203205
this.bandwidth = bandwidth;
204206
}
205207

208+
public void setDisallowFurtherIncrementals(boolean disallowFurtherIncrementals) {
209+
this.disallowFurtherIncrementals = disallowFurtherIncrementals;
210+
}
211+
212+
public boolean isDisallowFurtherIncrementals() {
213+
return disallowFurtherIncrementals;
214+
}
215+
206216
public void setNoChecksumVerify(boolean noChecksumVerify) {
207217
this.noChecksumVerify = noChecksumVerify;
208218
}
@@ -423,6 +433,7 @@ public BackupProtos.BackupInfo toProtosBackupInfo() {
423433
builder.setBackupType(BackupProtos.BackupType.valueOf(getType().name()));
424434
builder.setWorkersNumber(workers);
425435
builder.setBandwidth(bandwidth);
436+
builder.setDisallowFurtherIncrementals(disallowFurtherIncrementals);
426437
return builder.build();
427438
}
428439

@@ -518,6 +529,7 @@ public static BackupInfo fromProto(BackupProtos.BackupInfo proto) {
518529
context.setType(BackupType.valueOf(proto.getBackupType().name()));
519530
context.setWorkers(proto.getWorkersNumber());
520531
context.setBandwidth(proto.getBandwidth());
532+
context.setDisallowFurtherIncrementals(proto.getDisallowFurtherIncrementals());
521533
return context;
522534
}
523535

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hbase.backup;
19+
20+
import java.io.IOException;
21+
import java.util.Optional;
22+
import org.apache.hadoop.conf.Configuration;
23+
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
24+
import org.apache.hadoop.hbase.TableName;
25+
import org.apache.hadoop.hbase.backup.impl.BackupManager;
26+
import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
27+
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
28+
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
29+
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
30+
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
31+
import org.apache.yetus.audience.InterfaceAudience;
32+
33+
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
34+
public class BackupMasterObserver implements MasterObserver, MasterCoprocessor {
35+
36+
@Override
37+
public void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
38+
TableName tableName) throws IOException {
39+
disallowIncrementalBackups(ctx.getEnvironment(), tableName);
40+
}
41+
42+
@Override
43+
public void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
44+
TableName tableName) throws IOException {
45+
disallowIncrementalBackups(ctx.getEnvironment(), tableName);
46+
}
47+
48+
@Override
49+
public Optional<MasterObserver> getMasterObserver() {
50+
return Optional.of(this);
51+
}
52+
53+
private static void disallowIncrementalBackups(MasterCoprocessorEnvironment env,
54+
TableName tableName) throws IOException {
55+
Configuration conf = env.getConfiguration();
56+
if (
57+
tableName.equals(BackupSystemTable.getTableName(conf)) || !BackupManager.isBackupEnabled(conf)
58+
) {
59+
return;
60+
}
61+
62+
BackupSystemTable table = new BackupSystemTable(env.getConnection());
63+
try {
64+
table.startBackupExclusiveOperation();
65+
table.disallowFurtherIncrementals(tableName);
66+
} finally {
67+
table.finishBackupExclusiveOperation();
68+
}
69+
}
70+
}

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,11 @@ public Builder() {
3535
request = new BackupRequest();
3636
}
3737

38+
public Builder withFailOnDisallowedIncrementals(boolean failOnDisallowedIncrementals) {
39+
request.failOnDisallowedIncrementals = failOnDisallowedIncrementals;
40+
return this;
41+
}
42+
3843
public Builder withBackupType(BackupType type) {
3944
request.setBackupType(type);
4045
return this;
@@ -89,10 +94,15 @@ public BackupRequest build() {
8994
private boolean noChecksumVerify = false;
9095
private String backupSetName;
9196
private String yarnPoolName;
97+
private boolean failOnDisallowedIncrementals = false;
9298

9399
private BackupRequest() {
94100
}
95101

102+
public boolean getFailOnDisallowedIncrementals() {
103+
return failOnDisallowedIncrementals;
104+
}
105+
96106
private BackupRequest setBackupType(BackupType type) {
97107
this.type = type;
98108
return this;

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
import org.apache.hadoop.hbase.backup.BackupHFileCleaner;
3131
import org.apache.hadoop.hbase.backup.BackupInfo;
3232
import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
33+
import org.apache.hadoop.hbase.backup.BackupMasterObserver;
3334
import org.apache.hadoop.hbase.backup.BackupObserver;
3435
import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
3536
import org.apache.hadoop.hbase.backup.BackupType;
@@ -125,6 +126,10 @@ public static void decorateMasterConfiguration(Configuration conf) {
125126
+ "Added master procedure manager: {}",
126127
cleanerClass, masterProcedureClass, BackupHFileCleaner.class.getName());
127128
}
129+
130+
String observers = conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
131+
conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
132+
(observers == null ? "" : observers + ",") + BackupMasterObserver.class.getName());
128133
}
129134

130135
/**

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -277,6 +277,32 @@ public void close() {
277277
// do nothing
278278
}
279279

280+
public void disallowFurtherIncrementals(TableName toDisallow) throws IOException {
281+
List<BackupInfo> fullTableBackups = getCompletedFullBackupsSortedByHistoryDesc();
282+
List<Put> invalidatePuts = new ArrayList<>(fullTableBackups.size());
283+
Set<String> backupRootDirsSeen = new HashSet<>(fullTableBackups.size());
284+
285+
for (BackupInfo backupInfo : fullTableBackups) {
286+
// to minimize the amount of mutations against the backup system table, we only
287+
// need to update the most recent full backups that allow incremental backups
288+
if (
289+
backupInfo.getTables().contains(toDisallow) && backupInfo.getType() == BackupType.FULL
290+
&& !backupInfo.isDisallowFurtherIncrementals()
291+
&& !backupRootDirsSeen.contains(backupInfo.getBackupRootDir())
292+
) {
293+
backupInfo.setDisallowFurtherIncrementals(true);
294+
backupRootDirsSeen.add(backupInfo.getBackupRootDir());
295+
invalidatePuts.add(createPutForBackupInfo(backupInfo));
296+
LOG.info("Disallowing incremental backups for backup {} due to table {}",
297+
backupInfo.getBackupId(), toDisallow);
298+
}
299+
}
300+
301+
try (BufferedMutator mutator = connection.getBufferedMutator(tableName)) {
302+
mutator.mutate(invalidatePuts);
303+
}
304+
}
305+
280306
/**
281307
* Updates status (state) of a backup session in backup system table table
282308
* @param info backup info
@@ -841,6 +867,24 @@ public ArrayList<BackupInfo> getBackupInfos(BackupState state) throws IOExceptio
841867
}
842868
}
843869

870+
private List<BackupInfo> getCompletedFullBackupsSortedByHistoryDesc() throws IOException {
871+
Scan scan = createScanForBackupHistory();
872+
List<BackupInfo> backups = new ArrayList<>();
873+
874+
try (Table table = connection.getTable(tableName)) {
875+
ResultScanner scanner = table.getScanner(scan);
876+
Result res;
877+
while ((res = scanner.next()) != null) {
878+
res.advance();
879+
BackupInfo context = cellToBackupInfo(res.current());
880+
if (context.getState() == BackupState.COMPLETE && context.getType() == BackupType.FULL) {
881+
backups.add(context);
882+
}
883+
}
884+
}
885+
return BackupUtils.sortHistoryListDesc(backups);
886+
}
887+
844888
/**
845889
* Write the current timestamps for each regionserver to backup system table after a successful
846890
* full or incremental backup. The saved timestamp is of the last log file that was backed up
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hbase.backup.impl;
19+
20+
import org.apache.hadoop.hbase.HBaseIOException;
21+
import org.apache.hadoop.hbase.backup.BackupRequest;
22+
import org.apache.yetus.audience.InterfaceAudience;
23+
import org.apache.yetus.audience.InterfaceStability;
24+
25+
@InterfaceAudience.Public
26+
@InterfaceStability.Evolving
27+
public class IncrementalBackupsDisallowedException extends HBaseIOException {
28+
public IncrementalBackupsDisallowedException(BackupRequest request) {
29+
super("Could not take incremental backup for tables "
30+
+ "%s because is disallowed, please take a full backup instead"
31+
.formatted(request.getTableList()));
32+
}
33+
}

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -496,7 +496,7 @@ public static String getTableBackupDir(String backupRootDir, String backupId,
496496
* @param historyList history list
497497
* @return sorted list of BackupCompleteData
498498
*/
499-
public static ArrayList<BackupInfo> sortHistoryListDesc(ArrayList<BackupInfo> historyList) {
499+
public static ArrayList<BackupInfo> sortHistoryListDesc(List<BackupInfo> historyList) {
500500
ArrayList<BackupInfo> list = new ArrayList<>();
501501
TreeMap<String, BackupInfo> map = new TreeMap<>();
502502
for (BackupInfo h : historyList) {

hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
package org.apache.hadoop.hbase.backup;
1919

2020
import static org.junit.Assert.assertEquals;
21+
import static org.junit.Assert.assertFalse;
2122
import static org.junit.Assert.assertNotNull;
2223
import static org.junit.Assert.assertNull;
2324
import static org.junit.Assert.assertTrue;
@@ -53,6 +54,8 @@
5354
import org.junit.Test;
5455
import org.junit.experimental.categories.Category;
5556

57+
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
58+
5659
/**
5760
* Test cases for backup system table API
5861
*/
@@ -479,6 +482,43 @@ public void testBackupSetList() throws IOException {
479482
}
480483
}
481484

485+
@Test
486+
public void testDisallowFurtherIncrementals() throws Exception {
487+
try (BackupSystemTable table = new BackupSystemTable(conn)) {
488+
TableName toInvalidate = TableName.valueOf("t1");
489+
List<TableName> t1 = Lists.newArrayList(toInvalidate, TableName.valueOf("t2"));
490+
List<TableName> t2 = Lists.newArrayList(toInvalidate, TableName.valueOf("t3"));
491+
List<TableName> t3 = Lists.newArrayList(TableName.valueOf("t2"), TableName.valueOf("t3"));
492+
493+
BackupInfo backup = createBackupInfo();
494+
backup.setState(BackupState.COMPLETE);
495+
496+
backup.setTables(t1);
497+
backup.setBackupId("backup1");
498+
table.updateBackupInfo(backup);
499+
500+
backup.setTables(t2);
501+
backup.setBackupId("backup2");
502+
table.updateBackupInfo(backup);
503+
504+
backup.setTables(t3);
505+
backup.setBackupId("backup3");
506+
table.updateBackupInfo(backup);
507+
508+
table.disallowFurtherIncrementals(toInvalidate);
509+
BackupInfo result = table.readBackupInfo("backup1");
510+
assertTrue(result.isDisallowFurtherIncrementals());
511+
512+
table.disallowFurtherIncrementals(toInvalidate);
513+
result = table.readBackupInfo("backup2");
514+
assertTrue(result.isDisallowFurtherIncrementals());
515+
516+
table.disallowFurtherIncrementals(toInvalidate);
517+
result = table.readBackupInfo("backup3");
518+
assertFalse(result.isDisallowFurtherIncrementals());
519+
}
520+
}
521+
482522
private boolean compare(BackupInfo one, BackupInfo two) {
483523
return one.getBackupId().equals(two.getBackupId()) && one.getType().equals(two.getType())
484524
&& one.getBackupRootDir().equals(two.getBackupRootDir())

0 commit comments

Comments
 (0)