Skip to content

Commit 29bc490

Browse files
chenxu14chandrasekhar-188k
authored andcommitted
HBASE-22335 do add hfile ref only when replication_scope is 1
1 parent 99bd5b5 commit 29bc490

File tree

2 files changed

+101
-1
lines changed

2 files changed

+101
-1
lines changed

hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationObserver.java

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
import org.apache.hadoop.conf.Configuration;
2424
import org.apache.hadoop.fs.Path;
2525
import org.apache.hadoop.hbase.HConstants;
26+
import org.apache.hadoop.hbase.TableName;
2627
import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
2728
import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
2829
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -31,6 +32,7 @@
3132
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
3233
import org.apache.hadoop.hbase.regionserver.HRegionServer;
3334
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
35+
import org.apache.hadoop.hbase.util.Bytes;
3436
import org.apache.hadoop.hbase.util.Pair;
3537
import org.apache.yetus.audience.InterfaceAudience;
3638
import org.slf4j.Logger;
@@ -65,11 +67,23 @@ public void preCommitStoreFile(final ObserverContext<? extends RegionCoprocessor
6567
+ "data replication.");
6668
return;
6769
}
70+
TableName tableName = env.getRegionInfo().getTable();
71+
if (
72+
env.getRegion().getTableDescriptor().getColumnFamily(family).getScope()
73+
!= HConstants.REPLICATION_SCOPE_GLOBAL
74+
) {
75+
LOG.debug(
76+
"Skipping recording bulk load entries in preCommitStoreFile for table:{}, family:{}, Because the replication is not enabled",
77+
tableName, Bytes.toString(family));
78+
return;
79+
}
80+
6881
// This is completely cheating AND getting a HRegionServer from a RegionServerEnvironment is
6982
// just going to break. This is all private. Not allowed. Regions shouldn't assume they are
7083
// hosted in a RegionServer. TODO: fix.
7184
RegionServerServices rss = ((HasRegionServerServices) env).getRegionServerServices();
7285
Replication rep = (Replication) ((HRegionServer) rss).getReplicationSourceService();
73-
rep.addHFileRefsToQueue(env.getRegionInfo().getTable(), family, pairs);
86+
87+
rep.addHFileRefsToQueue(tableName, family, pairs);
7488
}
7589
}

hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,15 @@
2020
import static org.apache.hadoop.hbase.HConstants.REPLICATION_CLUSTER_ID;
2121
import static org.apache.hadoop.hbase.HConstants.REPLICATION_CONF_DIR;
2222
import static org.junit.Assert.assertEquals;
23+
import static org.junit.Assert.assertNotEquals;
2324
import static org.junit.Assert.assertTrue;
2425

2526
import java.io.File;
2627
import java.io.FileOutputStream;
2728
import java.io.IOException;
2829
import java.net.UnknownHostException;
30+
import java.util.ArrayList;
31+
import java.util.HashMap;
2932
import java.util.List;
3033
import java.util.Map;
3134
import java.util.Optional;
@@ -60,6 +63,8 @@
6063
import org.apache.hadoop.hbase.io.hfile.HFile;
6164
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
6265
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
66+
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
67+
import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
6368
import org.apache.hadoop.hbase.replication.TestReplicationBase;
6469
import org.apache.hadoop.hbase.testclassification.MediumTests;
6570
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
@@ -121,6 +126,8 @@ public class TestBulkLoadReplication extends TestReplicationBase {
121126
@ClassRule
122127
public static TemporaryFolder testFolder = new TemporaryFolder();
123128

129+
private static ReplicationQueueStorage queueStorage;
130+
124131
@BeforeClass
125132
public static void setUpBeforeClass() throws Exception {
126133
setupBulkLoadConfigsForCluster(CONF1, PEER1_CLUSTER_ID);
@@ -129,6 +136,8 @@ public static void setUpBeforeClass() throws Exception {
129136
setupConfig(UTIL3, "/3");
130137
TestReplicationBase.setUpBeforeClass();
131138
startThirdCluster();
139+
queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(UTIL1.getConnection(),
140+
UTIL1.getConfiguration());
132141
}
133142

134143
private static void startThirdCluster() throws Exception {
@@ -322,4 +331,81 @@ public void postBulkLoadHFile(ObserverContext<? extends RegionCoprocessorEnviron
322331
});
323332
}
324333
}
334+
335+
@Test
336+
public void testBulkloadReplicationActiveActiveForNoRepFamily() throws Exception {
337+
Table peer1TestTable = UTIL1.getConnection().getTable(TestReplicationBase.tableName);
338+
Table peer2TestTable = UTIL2.getConnection().getTable(TestReplicationBase.tableName);
339+
Table peer3TestTable = UTIL3.getConnection().getTable(TestReplicationBase.tableName);
340+
byte[] row = Bytes.toBytes("004");
341+
byte[] value = Bytes.toBytes("v4");
342+
assertBulkLoadConditionsForNoRepFamily(row, value, UTIL1, peer1TestTable, peer2TestTable,
343+
peer3TestTable);
344+
// additional wait to make sure no extra bulk load happens
345+
Thread.sleep(400);
346+
assertEquals(1, BULK_LOADS_COUNT.get());
347+
assertEquals(0, queueStorage.getAllHFileRefs().size());
348+
}
349+
350+
private void assertBulkLoadConditionsForNoRepFamily(byte[] row, byte[] value,
351+
HBaseTestingUtil utility, Table... tables) throws Exception {
352+
BULK_LOAD_LATCH = new CountDownLatch(1);
353+
bulkLoadOnClusterForNoRepFamily(row, value, utility);
354+
assertTrue(BULK_LOAD_LATCH.await(1, TimeUnit.MINUTES));
355+
assertTableHasValue(tables[0], row, value);
356+
assertTableNotHasValue(tables[1], row, value);
357+
assertTableNotHasValue(tables[2], row, value);
358+
}
359+
360+
private void bulkLoadOnClusterForNoRepFamily(byte[] row, byte[] value, HBaseTestingUtil cluster)
361+
throws Exception {
362+
String bulkloadFile = createHFileForNoRepFamilies(row, value, cluster.getConfiguration());
363+
Path bulkLoadFilePath = new Path(bulkloadFile);
364+
copyToHdfsForNoRepFamily(bulkloadFile, cluster.getDFSCluster());
365+
BulkLoadHFilesTool bulkLoadHFilesTool = new BulkLoadHFilesTool(cluster.getConfiguration());
366+
Map<byte[], List<Path>> family2Files = new HashMap<>();
367+
List<Path> files = new ArrayList<>();
368+
files.add(new Path(
369+
BULK_LOAD_BASE_DIR + "/" + Bytes.toString(noRepfamName) + "/" + bulkLoadFilePath.getName()));
370+
family2Files.put(noRepfamName, files);
371+
bulkLoadHFilesTool.bulkLoad(tableName, family2Files);
372+
}
373+
374+
private String createHFileForNoRepFamilies(byte[] row, byte[] value, Configuration clusterConfig)
375+
throws IOException {
376+
ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY);
377+
cellBuilder.setRow(row).setFamily(TestReplicationBase.noRepfamName)
378+
.setQualifier(Bytes.toBytes("1")).setValue(value).setType(Cell.Type.Put);
379+
380+
HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(clusterConfig);
381+
// TODO We need a way to do this without creating files
382+
File hFileLocation = testFolder.newFile();
383+
FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation), null);
384+
try {
385+
hFileFactory.withOutputStream(out);
386+
hFileFactory.withFileContext(new HFileContextBuilder().build());
387+
HFile.Writer writer = hFileFactory.create();
388+
try {
389+
writer.append(new KeyValue(cellBuilder.build()));
390+
} finally {
391+
writer.close();
392+
}
393+
} finally {
394+
out.close();
395+
}
396+
return hFileLocation.getAbsoluteFile().getAbsolutePath();
397+
}
398+
399+
private void copyToHdfsForNoRepFamily(String bulkLoadFilePath, MiniDFSCluster cluster)
400+
throws Exception {
401+
Path bulkLoadDir = new Path(BULK_LOAD_BASE_DIR + "/" + Bytes.toString(noRepfamName) + "/");
402+
cluster.getFileSystem().mkdirs(bulkLoadDir);
403+
cluster.getFileSystem().copyFromLocalFile(new Path(bulkLoadFilePath), bulkLoadDir);
404+
}
405+
406+
private void assertTableNotHasValue(Table table, byte[] row, byte[] value) throws IOException {
407+
Get get = new Get(row);
408+
Result result = table.get(get);
409+
assertNotEquals(Bytes.toString(value), Bytes.toString(result.value()));
410+
}
325411
}

0 commit comments

Comments
 (0)