diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperAdminTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperAdminTest.java index e776aea707d..fe595de8759 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperAdminTest.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperAdminTest.java @@ -23,13 +23,13 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.bookkeeper.util.BookKeeperConstants.AVAILABLE_NODE; import static org.apache.bookkeeper.util.BookKeeperConstants.READONLY; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import com.google.common.net.InetAddresses; import java.io.File; @@ -78,8 +78,7 @@ import org.apache.commons.io.FileUtils; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.ZooDefs.Ids; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -88,669 +87,701 @@ */ public class BookKeeperAdminTest extends BookKeeperClusterTestCase { - private static final Logger LOG = LoggerFactory.getLogger(BookKeeperAdminTest.class); - private DigestType digestType = DigestType.CRC32; - private static final String PASSWORD = "testPasswd"; - private static final int numOfBookies = 2; - private final int lostBookieRecoveryDelayInitValue = 1800; - - public BookKeeperAdminTest() { - super(numOfBookies, 480); - baseConf.setLostBookieRecoveryDelay(lostBookieRecoveryDelayInitValue); - baseConf.setOpenLedgerRereplicationGracePeriod(String.valueOf(30000)); - setAutoRecoveryEnabled(true); + private static final Logger LOG = LoggerFactory.getLogger(BookKeeperAdminTest.class); + + private static final String PASSWORD = "testPasswd"; + + private static final int numOfBookies = 2; + + private final int lostBookieRecoveryDelayInitValue = 1800; + + private final DigestType digestType = DigestType.CRC32; + + public BookKeeperAdminTest() { + super(numOfBookies, 480); + baseConf.setLostBookieRecoveryDelay(lostBookieRecoveryDelayInitValue); + baseConf.setOpenLedgerRereplicationGracePeriod(String.valueOf(30000)); + setAutoRecoveryEnabled(true); + } + + @Test + void lostBookieRecoveryDelayValue() throws Exception { + try (BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString())) { + assertEquals(lostBookieRecoveryDelayInitValue, bkAdmin.getLostBookieRecoveryDelay(), + "LostBookieRecoveryDelay"); + int newLostBookieRecoveryDelayValue = 2400; + bkAdmin.setLostBookieRecoveryDelay(newLostBookieRecoveryDelayValue); + assertEquals(newLostBookieRecoveryDelayValue, bkAdmin.getLostBookieRecoveryDelay(), + "LostBookieRecoveryDelay"); + newLostBookieRecoveryDelayValue = 3000; + bkAdmin.setLostBookieRecoveryDelay(newLostBookieRecoveryDelayValue); + assertEquals(newLostBookieRecoveryDelayValue, bkAdmin.getLostBookieRecoveryDelay(), + "LostBookieRecoveryDelay"); + LOG.info("Test Done"); } - - @Test - public void testLostBookieRecoveryDelayValue() throws Exception { - try (BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString())) { - assertEquals("LostBookieRecoveryDelay", - lostBookieRecoveryDelayInitValue, bkAdmin.getLostBookieRecoveryDelay()); - int newLostBookieRecoveryDelayValue = 2400; - bkAdmin.setLostBookieRecoveryDelay(newLostBookieRecoveryDelayValue); - assertEquals("LostBookieRecoveryDelay", - newLostBookieRecoveryDelayValue, bkAdmin.getLostBookieRecoveryDelay()); - newLostBookieRecoveryDelayValue = 3000; - bkAdmin.setLostBookieRecoveryDelay(newLostBookieRecoveryDelayValue); - assertEquals("LostBookieRecoveryDelay", - newLostBookieRecoveryDelayValue, bkAdmin.getLostBookieRecoveryDelay()); - LOG.info("Test Done"); - } + } + + @Test + void triggerAuditWithStoreSystemTimeAsLedgerUnderreplicatedMarkTime() throws Exception { + testTriggerAudit(true); + } + + @Test + void triggerAuditWithoutStoreSystemTimeAsLedgerUnderreplicatedMarkTime() throws Exception { + testTriggerAudit(false); + } + + public void testTriggerAudit(boolean storeSystemTimeAsLedgerUnderreplicatedMarkTime) + throws Exception { + restartBookies(c -> { + c.setStoreSystemTimeAsLedgerUnderreplicatedMarkTime( + storeSystemTimeAsLedgerUnderreplicatedMarkTime); + return c; + }); + ClientConfiguration thisClientConf = new ClientConfiguration(baseClientConf); + thisClientConf + .setStoreSystemTimeAsLedgerUnderreplicatedMarkTime( + storeSystemTimeAsLedgerUnderreplicatedMarkTime); + long testStartSystime = System.currentTimeMillis(); + ZkLedgerUnderreplicationManager urLedgerMgr = new ZkLedgerUnderreplicationManager( + thisClientConf, zkc); + BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString()); + int lostBookieRecoveryDelayValue = bkAdmin.getLostBookieRecoveryDelay(); + urLedgerMgr.disableLedgerReplication(); + try { + bkAdmin.triggerAudit(); + fail("Trigger Audit should have failed because LedgerReplication is disabled"); + } catch (UnavailableException une) { + // expected } - - @Test - public void testTriggerAuditWithStoreSystemTimeAsLedgerUnderreplicatedMarkTime() throws Exception { - testTriggerAudit(true); + assertEquals(lostBookieRecoveryDelayValue, bkAdmin.getLostBookieRecoveryDelay(), + "LostBookieRecoveryDelay"); + urLedgerMgr.enableLedgerReplication(); + bkAdmin.triggerAudit(); + assertEquals(lostBookieRecoveryDelayValue, bkAdmin.getLostBookieRecoveryDelay(), + "LostBookieRecoveryDelay"); + long ledgerId = 1L; + LedgerHandle ledgerHandle = + bkc.createLedgerAdv(ledgerId, numBookies, numBookies, numBookies, digestType, + PASSWORD.getBytes(), null); + ledgerHandle.addEntry(0, "data".getBytes()); + ledgerHandle.close(); + + BookieServer bookieToKill = serverByIndex(1); + killBookie(1); + /* + * since lostBookieRecoveryDelay is set, when a bookie is died, it will + * not start Audit process immediately. But when triggerAudit is called + * it will force audit process. + */ + bkAdmin.triggerAudit(); + Thread.sleep(500); + Iterator underreplicatedLedgerItr = urLedgerMgr + .listLedgersToRereplicate(null); + assertTrue(underreplicatedLedgerItr.hasNext(), + "There are supposed to be underreplicatedledgers"); + UnderreplicatedLedger underreplicatedLedger = underreplicatedLedgerItr.next(); + assertEquals(ledgerId, underreplicatedLedger.getLedgerId(), "Underreplicated ledgerId"); + assertTrue(underreplicatedLedger.getReplicaList().contains(bookieToKill.getBookieId().getId()), + "Missingreplica of Underreplicated ledgerId should contain " + bookieToKill); + if (storeSystemTimeAsLedgerUnderreplicatedMarkTime) { + long ctimeOfURL = underreplicatedLedger.getCtime(); + assertTrue((ctimeOfURL > testStartSystime) && (ctimeOfURL < System.currentTimeMillis()), + "ctime of underreplicated ledger should be greater than test starttime"); + } else { + assertEquals(UnderreplicatedLedger.UNASSIGNED_CTIME, underreplicatedLedger.getCtime(), + "ctime of underreplicated ledger should not be set"); } - - @Test - public void testTriggerAuditWithoutStoreSystemTimeAsLedgerUnderreplicatedMarkTime() throws Exception { - testTriggerAudit(false); + bkAdmin.close(); + } + + @Test + void bookieInit() throws Exception { + ServerConfiguration confOfExistingBookie = newServerConfiguration(); + BookieId bookieId = BookieImpl.getBookieId(confOfExistingBookie); + try ( + MetadataBookieDriver driver = + BookieResources.createMetadataDriver(confOfExistingBookie, NullStatsLogger.INSTANCE); + RegistrationManager rm = driver.createRegistrationManager()) { + CookieValidation cookieValidation = new LegacyCookieValidation(confOfExistingBookie, rm); + cookieValidation.checkCookies(Main.storageDirectoriesFromConf(confOfExistingBookie)); + rm.registerBookie(bookieId, false /* readOnly */, BookieServiceInfo.EMPTY); + assertFalse(BookKeeperAdmin.initBookie(confOfExistingBookie), + "initBookie shouldn't have succeeded, since bookie is still running with that configuration"); } - public void testTriggerAudit(boolean storeSystemTimeAsLedgerUnderreplicatedMarkTime) throws Exception { - restartBookies(c -> { - c.setStoreSystemTimeAsLedgerUnderreplicatedMarkTime(storeSystemTimeAsLedgerUnderreplicatedMarkTime); - return c; - }); - ClientConfiguration thisClientConf = new ClientConfiguration(baseClientConf); - thisClientConf - .setStoreSystemTimeAsLedgerUnderreplicatedMarkTime(storeSystemTimeAsLedgerUnderreplicatedMarkTime); - long testStartSystime = System.currentTimeMillis(); - ZkLedgerUnderreplicationManager urLedgerMgr = new ZkLedgerUnderreplicationManager(thisClientConf, zkc); - BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString()); - int lostBookieRecoveryDelayValue = bkAdmin.getLostBookieRecoveryDelay(); - urLedgerMgr.disableLedgerReplication(); - try { - bkAdmin.triggerAudit(); - fail("Trigger Audit should have failed because LedgerReplication is disabled"); - } catch (UnavailableException une) { - // expected - } - assertEquals("LostBookieRecoveryDelay", lostBookieRecoveryDelayValue, bkAdmin.getLostBookieRecoveryDelay()); - urLedgerMgr.enableLedgerReplication(); - bkAdmin.triggerAudit(); - assertEquals("LostBookieRecoveryDelay", lostBookieRecoveryDelayValue, bkAdmin.getLostBookieRecoveryDelay()); - long ledgerId = 1L; - LedgerHandle ledgerHandle = bkc.createLedgerAdv(ledgerId, numBookies, numBookies, numBookies, digestType, - PASSWORD.getBytes(), null); - ledgerHandle.addEntry(0, "data".getBytes()); - ledgerHandle.close(); - - BookieServer bookieToKill = serverByIndex(1); - killBookie(1); - /* - * since lostBookieRecoveryDelay is set, when a bookie is died, it will - * not start Audit process immediately. But when triggerAudit is called - * it will force audit process. - */ - bkAdmin.triggerAudit(); - Thread.sleep(500); - Iterator underreplicatedLedgerItr = urLedgerMgr.listLedgersToRereplicate(null); - assertTrue("There are supposed to be underreplicatedledgers", underreplicatedLedgerItr.hasNext()); - UnderreplicatedLedger underreplicatedLedger = underreplicatedLedgerItr.next(); - assertEquals("Underreplicated ledgerId", ledgerId, underreplicatedLedger.getLedgerId()); - assertTrue("Missingreplica of Underreplicated ledgerId should contain " + bookieToKill, - underreplicatedLedger.getReplicaList().contains(bookieToKill.getBookieId().getId())); - if (storeSystemTimeAsLedgerUnderreplicatedMarkTime) { - long ctimeOfURL = underreplicatedLedger.getCtime(); - assertTrue("ctime of underreplicated ledger should be greater than test starttime", - (ctimeOfURL > testStartSystime) && (ctimeOfURL < System.currentTimeMillis())); - } else { - assertEquals("ctime of underreplicated ledger should not be set", UnderreplicatedLedger.UNASSIGNED_CTIME, - underreplicatedLedger.getCtime()); - } - bkAdmin.close(); - } - - @Test - public void testBookieInit() throws Exception { - ServerConfiguration confOfExistingBookie = newServerConfiguration(); - BookieId bookieId = BookieImpl.getBookieId(confOfExistingBookie); - try (MetadataBookieDriver driver = BookieResources.createMetadataDriver( - confOfExistingBookie, NullStatsLogger.INSTANCE); - RegistrationManager rm = driver.createRegistrationManager()) { - CookieValidation cookieValidation = new LegacyCookieValidation(confOfExistingBookie, rm); - cookieValidation.checkCookies(Main.storageDirectoriesFromConf(confOfExistingBookie)); - rm.registerBookie(bookieId, false /* readOnly */, BookieServiceInfo.EMPTY); - Assert.assertFalse( - "initBookie shouldn't have succeeded, since bookie is still running with that configuration", - BookKeeperAdmin.initBookie(confOfExistingBookie)); - } - - Assert.assertFalse("initBookie shouldn't have succeeded, since previous bookie is not formatted yet completely", - BookKeeperAdmin.initBookie(confOfExistingBookie)); + assertFalse(BookKeeperAdmin.initBookie(confOfExistingBookie), + "initBookie shouldn't have succeeded, since previous bookie is not formatted yet completely"); - File[] ledgerDirs = confOfExistingBookie.getLedgerDirs(); - for (File ledgerDir : ledgerDirs) { - FileUtils.deleteDirectory(ledgerDir); - } - Assert.assertFalse("initBookie shouldn't have succeeded, since previous bookie is not formatted yet completely", - BookKeeperAdmin.initBookie(confOfExistingBookie)); - - File[] indexDirs = confOfExistingBookie.getIndexDirs(); - if (indexDirs != null) { - for (File indexDir : indexDirs) { - FileUtils.deleteDirectory(indexDir); - } - } - Assert.assertFalse("initBookie shouldn't have succeeded, since cookie in ZK is not deleted yet", - BookKeeperAdmin.initBookie(confOfExistingBookie)); - String bookieCookiePath = - ZKMetadataDriverBase.resolveZkLedgersRootPath(confOfExistingBookie) - + "/" + BookKeeperConstants.COOKIE_NODE - + "/" + bookieId.toString(); - zkc.delete(bookieCookiePath, -1); - - Assert.assertTrue("initBookie shouldn't succeeded", - BookKeeperAdmin.initBookie(confOfExistingBookie)); + File[] ledgerDirs = confOfExistingBookie.getLedgerDirs(); + for (File ledgerDir : ledgerDirs) { + FileUtils.deleteDirectory(ledgerDir); } - - @Test - public void testInitNewCluster() throws Exception { - ServerConfiguration newConfig = new ServerConfiguration(baseConf); - String ledgersRootPath = "/testledgers"; - newConfig.setMetadataServiceUri(newMetadataServiceUri(ledgersRootPath)); - Assert.assertTrue("New cluster should be initialized successfully", BookKeeperAdmin.initNewCluster(newConfig)); - - Assert.assertTrue("Cluster rootpath should have been created successfully " + ledgersRootPath, - (zkc.exists(ledgersRootPath, false) != null)); - String availableBookiesPath = ZKMetadataDriverBase.resolveZkLedgersRootPath(newConfig) + "/" + AVAILABLE_NODE; - Assert.assertTrue("AvailableBookiesPath should have been created successfully " + availableBookiesPath, - (zkc.exists(availableBookiesPath, false) != null)); - String readonlyBookiesPath = availableBookiesPath + "/" + READONLY; - Assert.assertTrue("ReadonlyBookiesPath should have been created successfully " + readonlyBookiesPath, - (zkc.exists(readonlyBookiesPath, false) != null)); - String instanceIdPath = ZKMetadataDriverBase.resolveZkLedgersRootPath(newConfig) - + "/" + BookKeeperConstants.INSTANCEID; - Assert.assertTrue("InstanceId node should have been created successfully" + instanceIdPath, - (zkc.exists(instanceIdPath, false) != null)); - - String ledgersLayout = ledgersRootPath + "/" + BookKeeperConstants.LAYOUT_ZNODE; - Assert.assertTrue("Layout node should have been created successfully" + ledgersLayout, - (zkc.exists(ledgersLayout, false) != null)); - - /** - * create znodes simulating existence of Bookies in the cluster - */ - int numOfBookies = 3; - Random rand = new Random(); - for (int i = 0; i < numOfBookies; i++) { - String ipString = InetAddresses.fromInteger(rand.nextInt()).getHostAddress(); - String regPath = ZKMetadataDriverBase.resolveZkLedgersRootPath(newConfig) - + "/" + AVAILABLE_NODE + "/" + ipString + ":3181"; - zkc.create(regPath, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); - } - - /* - * now it should be possible to create ledger and delete the same - */ - BookKeeper bk = new BookKeeper(new ClientConfiguration(newConfig)); - LedgerHandle lh = bk.createLedger(numOfBookies, numOfBookies, numOfBookies, BookKeeper.DigestType.MAC, - new byte[0]); - bk.deleteLedger(lh.ledgerId); - bk.close(); + assertFalse(BookKeeperAdmin.initBookie(confOfExistingBookie), + "initBookie shouldn't have succeeded, since previous bookie is not formatted yet completely"); + + File[] indexDirs = confOfExistingBookie.getIndexDirs(); + if (indexDirs != null) { + for (File indexDir : indexDirs) { + FileUtils.deleteDirectory(indexDir); + } } - - @Test - public void testNukeExistingClusterWithForceOption() throws Exception { - String ledgersRootPath = "/testledgers"; - ServerConfiguration newConfig = new ServerConfiguration(baseConf); - newConfig.setMetadataServiceUri(newMetadataServiceUri(ledgersRootPath)); - List bookiesRegPaths = new ArrayList(); - initiateNewClusterAndCreateLedgers(newConfig, bookiesRegPaths); - - /* - * before nuking existing cluster, bookies shouldn't be registered - * anymore - */ - for (int i = 0; i < bookiesRegPaths.size(); i++) { - zkc.delete(bookiesRegPaths.get(i), -1); - } - - Assert.assertTrue("New cluster should be nuked successfully", - BookKeeperAdmin.nukeExistingCluster(newConfig, ledgersRootPath, null, true)); - Assert.assertTrue("Cluster rootpath should have been deleted successfully " + ledgersRootPath, - (zkc.exists(ledgersRootPath, false) == null)); + assertFalse(BookKeeperAdmin.initBookie(confOfExistingBookie), + "initBookie shouldn't have succeeded, since cookie in ZK is not deleted yet"); + String bookieCookiePath = + ZKMetadataDriverBase.resolveZkLedgersRootPath(confOfExistingBookie) + "/" + + BookKeeperConstants.COOKIE_NODE + "/" + bookieId.toString(); + zkc.delete(bookieCookiePath, -1); + + assertTrue(BookKeeperAdmin.initBookie(confOfExistingBookie), "initBookie shouldn't succeeded"); + } + + @Test + void initNewCluster() throws Exception { + ServerConfiguration newConfig = new ServerConfiguration(baseConf); + String ledgersRootPath = "/testledgers"; + newConfig.setMetadataServiceUri(newMetadataServiceUri(ledgersRootPath)); + assertTrue(BookKeeperAdmin.initNewCluster(newConfig), + "New cluster should be initialized successfully"); + + assertTrue((zkc.exists(ledgersRootPath, false) != null), + "Cluster rootpath should have been created successfully " + ledgersRootPath); + String availableBookiesPath = + ZKMetadataDriverBase.resolveZkLedgersRootPath(newConfig) + "/" + AVAILABLE_NODE; + assertTrue((zkc.exists(availableBookiesPath, false) != null), + "AvailableBookiesPath should have been created successfully " + availableBookiesPath); + String readonlyBookiesPath = availableBookiesPath + "/" + READONLY; + assertTrue((zkc.exists(readonlyBookiesPath, false) != null), + "ReadonlyBookiesPath should have been created successfully " + readonlyBookiesPath); + String instanceIdPath = + ZKMetadataDriverBase.resolveZkLedgersRootPath(newConfig) + "/" + + BookKeeperConstants.INSTANCEID; + assertTrue((zkc.exists(instanceIdPath, false) != null), + "InstanceId node should have been created successfully" + instanceIdPath); + + String ledgersLayout = ledgersRootPath + "/" + BookKeeperConstants.LAYOUT_ZNODE; + assertTrue((zkc.exists(ledgersLayout, false) != null), + "Layout node should have been created successfully" + ledgersLayout); + + /** + * create znodes simulating existence of Bookies in the cluster + */ + int numOfBookies = 3; + Random rand = new Random(); + for (int i = 0; i < numOfBookies; i++) { + String ipString = InetAddresses.fromInteger(rand.nextInt()).getHostAddress(); + String regPath = + ZKMetadataDriverBase.resolveZkLedgersRootPath(newConfig) + "/" + AVAILABLE_NODE + "/" + + ipString + ":3181"; + zkc.create(regPath, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); } - @Test - public void testNukeExistingClusterWithInstanceId() throws Exception { - String ledgersRootPath = "/testledgers"; - ServerConfiguration newConfig = new ServerConfiguration(baseConf); - newConfig.setMetadataServiceUri(newMetadataServiceUri(ledgersRootPath)); - List bookiesRegPaths = new ArrayList(); - initiateNewClusterAndCreateLedgers(newConfig, bookiesRegPaths); - - /* - * before nuking existing cluster, bookies shouldn't be registered - * anymore - */ - for (int i = 0; i < bookiesRegPaths.size(); i++) { - zkc.delete(bookiesRegPaths.get(i), -1); - } - - byte[] data = zkc.getData( - ZKMetadataDriverBase.resolveZkLedgersRootPath(newConfig) + "/" + BookKeeperConstants.INSTANCEID, - false, null); - String readInstanceId = new String(data, UTF_8); - - Assert.assertTrue("New cluster should be nuked successfully", - BookKeeperAdmin.nukeExistingCluster(newConfig, ledgersRootPath, readInstanceId, false)); - Assert.assertTrue("Cluster rootpath should have been deleted successfully " + ledgersRootPath, - (zkc.exists(ledgersRootPath, false) == null)); + /* + * now it should be possible to create ledger and delete the same + */ + BookKeeper bk = new BookKeeper(new ClientConfiguration(newConfig)); + LedgerHandle lh = + bk.createLedger(numOfBookies, numOfBookies, numOfBookies, BookKeeper.DigestType.MAC, + new byte[0]); + bk.deleteLedger(lh.ledgerId); + bk.close(); + } + + @Test + void nukeExistingClusterWithForceOption() throws Exception { + String ledgersRootPath = "/testledgers"; + ServerConfiguration newConfig = new ServerConfiguration(baseConf); + newConfig.setMetadataServiceUri(newMetadataServiceUri(ledgersRootPath)); + List bookiesRegPaths = new ArrayList(); + initiateNewClusterAndCreateLedgers(newConfig, bookiesRegPaths); + + /* + * before nuking existing cluster, bookies shouldn't be registered + * anymore + */ + for (int i = 0; i < bookiesRegPaths.size(); i++) { + zkc.delete(bookiesRegPaths.get(i), -1); } - @Test - public void tryNukingExistingClustersWithInvalidParams() throws Exception { - String ledgersRootPath = "/testledgers"; - ServerConfiguration newConfig = new ServerConfiguration(baseConf); - newConfig.setMetadataServiceUri(newMetadataServiceUri(ledgersRootPath)); - List bookiesRegPaths = new ArrayList(); - initiateNewClusterAndCreateLedgers(newConfig, bookiesRegPaths); - - /* - * create ledger with a specific ledgerid - */ - BookKeeper bk = new BookKeeper(new ClientConfiguration(newConfig)); - long ledgerId = 23456789L; - LedgerHandle lh = bk.createLedgerAdv(ledgerId, 1, 1, 1, BookKeeper.DigestType.MAC, new byte[0], null); - lh.close(); - - /* - * read instanceId - */ - byte[] data = zkc.getData( - ZKMetadataDriverBase.resolveZkLedgersRootPath(newConfig) + "/" + BookKeeperConstants.INSTANCEID, - false, null); - String readInstanceId = new String(data, UTF_8); - - /* - * register a RO bookie - */ - String ipString = InetAddresses.fromInteger((new Random()).nextInt()).getHostAddress(); - String roBookieRegPath = ZKMetadataDriverBase.resolveZkLedgersRootPath(newConfig) - + "/" + AVAILABLE_NODE + "/" + READONLY + "/" + ipString + ":3181"; - zkc.create(roBookieRegPath, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); - - Assert.assertFalse("Cluster should'nt be nuked since instanceid is not provided and force option is not set", - BookKeeperAdmin.nukeExistingCluster(newConfig, ledgersRootPath, null, false)); - Assert.assertFalse("Cluster should'nt be nuked since incorrect instanceid is provided", - BookKeeperAdmin.nukeExistingCluster(newConfig, ledgersRootPath, "incorrectinstanceid", false)); - Assert.assertFalse("Cluster should'nt be nuked since bookies are still registered", - BookKeeperAdmin.nukeExistingCluster(newConfig, ledgersRootPath, readInstanceId, false)); - /* - * delete all rw bookies registration - */ - for (int i = 0; i < bookiesRegPaths.size(); i++) { - zkc.delete(bookiesRegPaths.get(i), -1); - } - Assert.assertFalse("Cluster should'nt be nuked since ro bookie is still registered", - BookKeeperAdmin.nukeExistingCluster(newConfig, ledgersRootPath, readInstanceId, false)); - - /* - * make sure no node is deleted - */ - Assert.assertTrue("Cluster rootpath should be existing " + ledgersRootPath, - (zkc.exists(ledgersRootPath, false) != null)); - String availableBookiesPath = ZKMetadataDriverBase.resolveZkLedgersRootPath(newConfig) + "/" + AVAILABLE_NODE; - Assert.assertTrue("AvailableBookiesPath should be existing " + availableBookiesPath, - (zkc.exists(availableBookiesPath, false) != null)); - String instanceIdPath = ZKMetadataDriverBase.resolveZkLedgersRootPath(newConfig) - + "/" + BookKeeperConstants.INSTANCEID; - Assert.assertTrue("InstanceId node should be existing" + instanceIdPath, - (zkc.exists(instanceIdPath, false) != null)); - String ledgersLayout = ledgersRootPath + "/" + BookKeeperConstants.LAYOUT_ZNODE; - Assert.assertTrue("Layout node should be existing" + ledgersLayout, (zkc.exists(ledgersLayout, false) != null)); - - /* - * ledger should not be deleted. - */ - lh = bk.openLedgerNoRecovery(ledgerId, BookKeeper.DigestType.MAC, new byte[0]); - lh.close(); - bk.close(); - - /* - * delete ro bookie reg znode - */ - zkc.delete(roBookieRegPath, -1); - - Assert.assertTrue("Cluster should be nuked since no bookie is registered", - BookKeeperAdmin.nukeExistingCluster(newConfig, ledgersRootPath, readInstanceId, false)); - Assert.assertTrue("Cluster rootpath should have been deleted successfully " + ledgersRootPath, - (zkc.exists(ledgersRootPath, false) == null)); + assertTrue(BookKeeperAdmin.nukeExistingCluster(newConfig, ledgersRootPath, null, true), + "New cluster should be nuked successfully"); + assertTrue((zkc.exists(ledgersRootPath, false) == null), + "Cluster rootpath should have been deleted successfully " + ledgersRootPath); + } + + @Test + void nukeExistingClusterWithInstanceId() throws Exception { + String ledgersRootPath = "/testledgers"; + ServerConfiguration newConfig = new ServerConfiguration(baseConf); + newConfig.setMetadataServiceUri(newMetadataServiceUri(ledgersRootPath)); + List bookiesRegPaths = new ArrayList(); + initiateNewClusterAndCreateLedgers(newConfig, bookiesRegPaths); + + /* + * before nuking existing cluster, bookies shouldn't be registered + * anymore + */ + for (int i = 0; i < bookiesRegPaths.size(); i++) { + zkc.delete(bookiesRegPaths.get(i), -1); } - void initiateNewClusterAndCreateLedgers(ServerConfiguration newConfig, List bookiesRegPaths) - throws Exception { - Assert.assertTrue("New cluster should be initialized successfully", BookKeeperAdmin.initNewCluster(newConfig)); - - /** - * create znodes simulating existence of Bookies in the cluster - */ - int numberOfBookies = 3; - Random rand = new Random(); - for (int i = 0; i < numberOfBookies; i++) { - String ipString = InetAddresses.fromInteger(rand.nextInt()).getHostAddress(); - bookiesRegPaths.add(ZKMetadataDriverBase.resolveZkLedgersRootPath(newConfig) - + "/" + AVAILABLE_NODE + "/" + ipString + ":3181"); - zkc.create(bookiesRegPaths.get(i), new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); - } - - /* - * now it should be possible to create ledger and delete the same - */ - BookKeeper bk = new BookKeeper(new ClientConfiguration(newConfig)); - LedgerHandle lh; - int numOfLedgers = 5; - for (int i = 0; i < numOfLedgers; i++) { - lh = bk.createLedger(numberOfBookies, numberOfBookies, numberOfBookies, BookKeeper.DigestType.MAC, - new byte[0]); - lh.close(); - } - bk.close(); + byte[] data = + zkc.getData(ZKMetadataDriverBase.resolveZkLedgersRootPath(newConfig) + "/" + + BookKeeperConstants.INSTANCEID, + false, null); + String readInstanceId = new String(data, UTF_8); + + assertTrue( + BookKeeperAdmin.nukeExistingCluster(newConfig, ledgersRootPath, readInstanceId, false), + "New cluster should be nuked successfully"); + assertTrue((zkc.exists(ledgersRootPath, false) == null), + "Cluster rootpath should have been deleted successfully " + ledgersRootPath); + } + + @Test + void tryNukingExistingClustersWithInvalidParams() throws Exception { + String ledgersRootPath = "/testledgers"; + ServerConfiguration newConfig = new ServerConfiguration(baseConf); + newConfig.setMetadataServiceUri(newMetadataServiceUri(ledgersRootPath)); + List bookiesRegPaths = new ArrayList(); + initiateNewClusterAndCreateLedgers(newConfig, bookiesRegPaths); + + /* + * create ledger with a specific ledgerid + */ + BookKeeper bk = new BookKeeper(new ClientConfiguration(newConfig)); + long ledgerId = 23456789L; + LedgerHandle lh = bk + .createLedgerAdv(ledgerId, 1, 1, 1, BookKeeper.DigestType.MAC, new byte[0], null); + lh.close(); + + /* + * read instanceId + */ + byte[] data = + zkc.getData(ZKMetadataDriverBase.resolveZkLedgersRootPath(newConfig) + "/" + + BookKeeperConstants.INSTANCEID, + false, null); + String readInstanceId = new String(data, UTF_8); + + /* + * register a RO bookie + */ + String ipString = InetAddresses.fromInteger((new Random()).nextInt()).getHostAddress(); + String roBookieRegPath = + ZKMetadataDriverBase.resolveZkLedgersRootPath(newConfig) + "/" + AVAILABLE_NODE + "/" + + READONLY + "/" + ipString + ":3181"; + zkc.create(roBookieRegPath, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); + + assertFalse(BookKeeperAdmin.nukeExistingCluster(newConfig, ledgersRootPath, null, false), + "Cluster should'nt be nuked since instanceid is not provided and force option is not set"); + assertFalse(BookKeeperAdmin + .nukeExistingCluster(newConfig, ledgersRootPath, "incorrectinstanceid", false), + "Cluster should'nt be nuked since incorrect instanceid is provided"); + assertFalse( + BookKeeperAdmin.nukeExistingCluster(newConfig, ledgersRootPath, readInstanceId, false), + "Cluster should'nt be nuked since bookies are still registered"); + /* + * delete all rw bookies registration + */ + for (int i = 0; i < bookiesRegPaths.size(); i++) { + zkc.delete(bookiesRegPaths.get(i), -1); } - - @Test - public void testGetListOfEntriesOfClosedLedger() throws Exception { - testGetListOfEntriesOfLedger(true); + assertFalse( + BookKeeperAdmin.nukeExistingCluster(newConfig, ledgersRootPath, readInstanceId, false), + "Cluster should'nt be nuked since ro bookie is still registered"); + + /* + * make sure no node is deleted + */ + assertTrue((zkc.exists(ledgersRootPath, false) != null), + "Cluster rootpath should be existing " + ledgersRootPath); + String availableBookiesPath = + ZKMetadataDriverBase.resolveZkLedgersRootPath(newConfig) + "/" + AVAILABLE_NODE; + assertTrue((zkc.exists(availableBookiesPath, false) != null), + "AvailableBookiesPath should be existing " + availableBookiesPath); + String instanceIdPath = + ZKMetadataDriverBase.resolveZkLedgersRootPath(newConfig) + "/" + + BookKeeperConstants.INSTANCEID; + assertTrue((zkc.exists(instanceIdPath, false) != null), + "InstanceId node should be existing" + instanceIdPath); + String ledgersLayout = ledgersRootPath + "/" + BookKeeperConstants.LAYOUT_ZNODE; + assertTrue((zkc.exists(ledgersLayout, false) != null), + "Layout node should be existing" + ledgersLayout); + + /* + * ledger should not be deleted. + */ + lh = bk.openLedgerNoRecovery(ledgerId, BookKeeper.DigestType.MAC, new byte[0]); + lh.close(); + bk.close(); + + /* + * delete ro bookie reg znode + */ + zkc.delete(roBookieRegPath, -1); + + assertTrue( + BookKeeperAdmin.nukeExistingCluster(newConfig, ledgersRootPath, readInstanceId, false), + "Cluster should be nuked since no bookie is registered"); + assertTrue((zkc.exists(ledgersRootPath, false) == null), + "Cluster rootpath should have been deleted successfully " + ledgersRootPath); + } + + void initiateNewClusterAndCreateLedgers(ServerConfiguration newConfig, + List bookiesRegPaths) + throws Exception { + assertTrue(BookKeeperAdmin.initNewCluster(newConfig), + "New cluster should be initialized successfully"); + + /** + * create znodes simulating existence of Bookies in the cluster + */ + int numberOfBookies = 3; + Random rand = new Random(); + for (int i = 0; i < numberOfBookies; i++) { + String ipString = InetAddresses.fromInteger(rand.nextInt()).getHostAddress(); + bookiesRegPaths + .add(ZKMetadataDriverBase.resolveZkLedgersRootPath(newConfig) + "/" + AVAILABLE_NODE + "/" + + ipString + ":3181"); + zkc.create(bookiesRegPaths.get(i), new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); } - @Test - public void testGetListOfEntriesOfNotClosedLedger() throws Exception { - testGetListOfEntriesOfLedger(false); + /* + * now it should be possible to create ledger and delete the same + */ + BookKeeper bk = new BookKeeper(new ClientConfiguration(newConfig)); + LedgerHandle lh; + int numOfLedgers = 5; + for (int i = 0; i < numOfLedgers; i++) { + lh = bk.createLedger(numberOfBookies, numberOfBookies, numberOfBookies, + BookKeeper.DigestType.MAC, + new byte[0]); + lh.close(); } - - @Test - public void testGetListOfEntriesOfNonExistingLedger() throws Exception { - long nonExistingLedgerId = 56789L; - - try (BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString())) { - for (int i = 0; i < bookieCount(); i++) { - CompletableFuture futureResult = bkAdmin - .asyncGetListOfEntriesOfLedger(addressByIndex(i), nonExistingLedgerId); - try { - futureResult.get(); - fail("asyncGetListOfEntriesOfLedger is supposed to be failed with NoSuchLedgerExistsException"); - } catch (ExecutionException ee) { - assertTrue(ee.getCause() instanceof BKException); - BKException e = (BKException) ee.getCause(); - assertEquals(e.getCode(), BKException.Code.NoSuchLedgerExistsException); - } - } + bk.close(); + } + + @Test + void getListOfEntriesOfClosedLedger() throws Exception { + testGetListOfEntriesOfLedger(true); + } + + @Test + void getListOfEntriesOfNotClosedLedger() throws Exception { + testGetListOfEntriesOfLedger(false); + } + + @Test + void getListOfEntriesOfNonExistingLedger() throws Exception { + long nonExistingLedgerId = 56789L; + + try (BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString())) { + for (int i = 0; i < bookieCount(); i++) { + CompletableFuture futureResult = + bkAdmin.asyncGetListOfEntriesOfLedger(addressByIndex(i), nonExistingLedgerId); + try { + futureResult.get(); + fail( + "asyncGetListOfEntriesOfLedger is supposed to be failed with NoSuchLedgerExistsException"); + } catch (ExecutionException ee) { + assertTrue(ee.getCause() instanceof BKException); + BKException e = (BKException) ee.getCause(); + assertEquals(BKException.Code.NoSuchLedgerExistsException, e.getCode()); } + } } - - public void testGetListOfEntriesOfLedger(boolean isLedgerClosed) throws Exception { - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - int numOfEntries = 6; - BookKeeper bkc = new BookKeeper(conf); - LedgerHandle lh = bkc.createLedger(numOfBookies, numOfBookies, digestType, "testPasswd".getBytes()); - long lId = lh.getId(); - for (int i = 0; i < numOfEntries; i++) { - lh.addEntry("000".getBytes()); - } - if (isLedgerClosed) { - lh.close(); - } - try (BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString())) { - for (int i = 0; i < bookieCount(); i++) { - CompletableFuture futureResult = bkAdmin - .asyncGetListOfEntriesOfLedger(addressByIndex(i), lId); - AvailabilityOfEntriesOfLedger availabilityOfEntriesOfLedger = futureResult.get(); - assertEquals("Number of entries", numOfEntries, - availabilityOfEntriesOfLedger.getTotalNumOfAvailableEntries()); - for (int j = 0; j < numOfEntries; j++) { - assertTrue("Entry should be available: " + j, availabilityOfEntriesOfLedger.isEntryAvailable(j)); - } - assertFalse("Entry should not be available: " + numOfEntries, - availabilityOfEntriesOfLedger.isEntryAvailable(numOfEntries)); - } - } - bkc.close(); + } + + public void testGetListOfEntriesOfLedger(boolean isLedgerClosed) throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + int numOfEntries = 6; + BookKeeper bkc = new BookKeeper(conf); + LedgerHandle lh = bkc + .createLedger(numOfBookies, numOfBookies, digestType, "testPasswd".getBytes()); + long lId = lh.getId(); + for (int i = 0; i < numOfEntries; i++) { + lh.addEntry("000".getBytes()); } - - @Test - public void testGetEntriesFromEmptyLedger() throws Exception { - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - BookKeeper bkc = new BookKeeper(conf); - LedgerHandle lh = bkc.createLedger(numOfBookies, numOfBookies, digestType, "testPasswd".getBytes(UTF_8)); - lh.close(); - long ledgerId = lh.getId(); - - try (BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString())) { - Iterator iter = bkAdmin.readEntries(ledgerId, 0, 0).iterator(); - assertFalse(iter.hasNext()); - } - - bkc.close(); + if (isLedgerClosed) { + lh.close(); } - - @Test - public void testGetListOfEntriesOfLedgerWithJustOneBookieInWriteQuorum() throws Exception { - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - int numOfEntries = 6; - BookKeeper bkc = new BookKeeper(conf); - /* - * in this testsuite there are going to be 2 (numOfBookies) and if - * writeQuorum is 1 then it will stripe entries to those two bookies. - */ - LedgerHandle lh = bkc.createLedger(2, 1, digestType, "testPasswd".getBytes()); - long lId = lh.getId(); - for (int i = 0; i < numOfEntries; i++) { - lh.addEntry("000".getBytes()); - } - - try (BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString())) { - for (int i = 0; i < bookieCount(); i++) { - CompletableFuture futureResult = bkAdmin - .asyncGetListOfEntriesOfLedger(addressByIndex(i), lId); - AvailabilityOfEntriesOfLedger availabilityOfEntriesOfLedger = futureResult.get(); - /* - * since num of bookies in the ensemble is 2 and - * writeQuorum/ackQuorum is 1, it will stripe to these two - * bookies and hence in each bookie there will be only - * numOfEntries/2 entries. - */ - assertEquals("Number of entries", numOfEntries / 2, - availabilityOfEntriesOfLedger.getTotalNumOfAvailableEntries()); - } + try (BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString())) { + for (int i = 0; i < bookieCount(); i++) { + CompletableFuture futureResult = + bkAdmin.asyncGetListOfEntriesOfLedger(addressByIndex(i), lId); + AvailabilityOfEntriesOfLedger availabilityOfEntriesOfLedger = futureResult.get(); + assertEquals(numOfEntries, availabilityOfEntriesOfLedger.getTotalNumOfAvailableEntries(), + "Number of entries"); + for (int j = 0; j < numOfEntries; j++) { + assertTrue(availabilityOfEntriesOfLedger.isEntryAvailable(j), + "Entry should be available: " + j); } - bkc.close(); + assertFalse(availabilityOfEntriesOfLedger.isEntryAvailable(numOfEntries), + "Entry should not be available: " + numOfEntries); + } + } + bkc.close(); + } + + @Test + void getEntriesFromEmptyLedger() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + BookKeeper bkc = new BookKeeper(conf); + LedgerHandle lh = bkc + .createLedger(numOfBookies, numOfBookies, digestType, "testPasswd".getBytes(UTF_8)); + lh.close(); + long ledgerId = lh.getId(); + + try (BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString())) { + Iterator iter = bkAdmin.readEntries(ledgerId, 0, 0).iterator(); + assertFalse(iter.hasNext()); } - @Test - public void testGetBookies() throws Exception { - String ledgersRootPath = "/ledgers"; - Assert.assertTrue("Cluster rootpath should have been created successfully " + ledgersRootPath, - (zkc.exists(ledgersRootPath, false) != null)); - String bookieCookiePath = ZKMetadataDriverBase.resolveZkLedgersRootPath(baseConf) - + "/" + BookKeeperConstants.COOKIE_NODE; - Assert.assertTrue("AvailableBookiesPath should have been created successfully " + bookieCookiePath, - (zkc.exists(bookieCookiePath, false) != null)); - - try (BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString())) { - Collection availableBookies = bkAdmin.getAvailableBookies(); - Assert.assertEquals(availableBookies.size(), bookieCount()); - - for (int i = 0; i < bookieCount(); i++) { - availableBookies.contains(addressByIndex(i)); - } - - BookieServer killedBookie = serverByIndex(1); - killBookieAndWaitForZK(1); - - Collection remainingBookies = bkAdmin.getAvailableBookies(); - Assert.assertFalse(remainingBookies.contains(killedBookie)); - - Collection allBookies = bkAdmin.getAllBookies(); - for (int i = 0; i < bookieCount(); i++) { - remainingBookies.contains(addressByIndex(i)); - allBookies.contains(addressByIndex(i)); - } - - Assert.assertEquals(remainingBookies.size(), allBookies.size() - 1); - Assert.assertTrue(allBookies.contains(killedBookie.getBookieId())); - } + bkc.close(); + } + + @Test + void getListOfEntriesOfLedgerWithJustOneBookieInWriteQuorum() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + int numOfEntries = 6; + BookKeeper bkc = new BookKeeper(conf); + /* + * in this testsuite there are going to be 2 (numOfBookies) and if + * writeQuorum is 1 then it will stripe entries to those two bookies. + */ + LedgerHandle lh = bkc.createLedger(2, 1, digestType, "testPasswd".getBytes()); + long lId = lh.getId(); + for (int i = 0; i < numOfEntries; i++) { + lh.addEntry("000".getBytes()); } - @Test - public void testGetListOfEntriesOfLedgerWithEntriesNotStripedToABookie() throws Exception { - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - BookKeeper bkc = new BookKeeper(conf); + try (BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString())) { + for (int i = 0; i < bookieCount(); i++) { + CompletableFuture futureResult = + bkAdmin.asyncGetListOfEntriesOfLedger(addressByIndex(i), lId); + AvailabilityOfEntriesOfLedger availabilityOfEntriesOfLedger = futureResult.get(); /* - * in this testsuite there are going to be 2 (numOfBookies) bookies and - * we are having ensemble of size 2. + * since num of bookies in the ensemble is 2 and + * writeQuorum/ackQuorum is 1, it will stripe to these two + * bookies and hence in each bookie there will be only + * numOfEntries/2 entries. */ - LedgerHandle lh = bkc.createLedger(2, 1, digestType, "testPasswd".getBytes()); - long lId = lh.getId(); - /* - * ledger is writeclosed without adding any entry. - */ - lh.close(); - CountDownLatch callbackCalled = new CountDownLatch(1); - AtomicBoolean exceptionInCallback = new AtomicBoolean(false); - AtomicInteger exceptionCode = new AtomicInteger(BKException.Code.OK); - BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString()); - /* - * since no entry is added, callback is supposed to fail with - * NoSuchLedgerExistsException. - */ - bkAdmin.asyncGetListOfEntriesOfLedger(addressByIndex(0), lId) - .whenComplete((availabilityOfEntriesOfLedger, throwable) -> { - exceptionInCallback.set(throwable != null); - if (throwable != null) { - exceptionCode.set(BKException.getExceptionCode(throwable)); - } - callbackCalled.countDown(); - }); - callbackCalled.await(); - assertTrue("Exception occurred", exceptionInCallback.get()); - assertEquals("Exception code", BKException.Code.NoSuchLedgerExistsException, exceptionCode.get()); - bkAdmin.close(); - bkc.close(); + assertEquals(numOfEntries / 2, + availabilityOfEntriesOfLedger.getTotalNumOfAvailableEntries(), + "Number of entries"); + } } - - @Test - public void testAreEntriesOfLedgerStoredInTheBookieForLastEmptySegment() throws Exception { - int lastEntryId = 10; - long ledgerId = 100L; - BookieId bookie0 = new BookieSocketAddress("bookie0:3181").toBookieId(); - BookieId bookie1 = new BookieSocketAddress("bookie1:3181").toBookieId(); - BookieId bookie2 = new BookieSocketAddress("bookie2:3181").toBookieId(); - BookieId bookie3 = new BookieSocketAddress("bookie3:3181").toBookieId(); - - List ensembleOfSegment1 = new ArrayList(); - ensembleOfSegment1.add(bookie0); - ensembleOfSegment1.add(bookie1); - ensembleOfSegment1.add(bookie2); - - List ensembleOfSegment2 = new ArrayList(); - ensembleOfSegment2.add(bookie3); - ensembleOfSegment2.add(bookie1); - ensembleOfSegment2.add(bookie2); - - LedgerMetadataBuilder builder = LedgerMetadataBuilder.create(); - builder.withId(ledgerId) - .withEnsembleSize(3) - .withWriteQuorumSize(3) - .withAckQuorumSize(2) - .withDigestType(digestType.toApiDigestType()) - .withPassword(PASSWORD.getBytes()) - .newEnsembleEntry(0, ensembleOfSegment1) - .newEnsembleEntry(lastEntryId + 1, ensembleOfSegment2) - .withLastEntryId(lastEntryId).withLength(65576).withClosedState(); - LedgerMetadata meta = builder.build(); - - assertFalse("expected areEntriesOfLedgerStoredInTheBookie to return False for bookie3", - BookKeeperAdmin.areEntriesOfLedgerStoredInTheBookie(ledgerId, bookie3, meta)); - assertTrue("expected areEntriesOfLedgerStoredInTheBookie to return true for bookie2", - BookKeeperAdmin.areEntriesOfLedgerStoredInTheBookie(ledgerId, bookie2, meta)); + bkc.close(); + } + + @Test + void getBookies() throws Exception { + String ledgersRootPath = "/ledgers"; + assertTrue((zkc.exists(ledgersRootPath, false) != null), + "Cluster rootpath should have been created successfully " + ledgersRootPath); + String bookieCookiePath = + ZKMetadataDriverBase.resolveZkLedgersRootPath(baseConf) + "/" + + BookKeeperConstants.COOKIE_NODE; + assertTrue((zkc.exists(bookieCookiePath, false) != null), + "AvailableBookiesPath should have been created successfully " + bookieCookiePath); + + try (BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString())) { + Collection availableBookies = bkAdmin.getAvailableBookies(); + assertEquals(availableBookies.size(), bookieCount()); + + for (int i = 0; i < bookieCount(); i++) { + availableBookies.contains(addressByIndex(i)); + } + + BookieServer killedBookie = serverByIndex(1); + killBookieAndWaitForZK(1); + + Collection remainingBookies = bkAdmin.getAvailableBookies(); + assertFalse(remainingBookies.contains(killedBookie)); + + Collection allBookies = bkAdmin.getAllBookies(); + for (int i = 0; i < bookieCount(); i++) { + remainingBookies.contains(addressByIndex(i)); + allBookies.contains(addressByIndex(i)); + } + + assertEquals(remainingBookies.size(), allBookies.size() - 1); + assertTrue(allBookies.contains(killedBookie.getBookieId())); } - - @Test - public void testBookkeeperAdminFormatResetsLedgerIds() throws Exception { - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - - /* - * in this testsuite there are going to be 2 (numOfBookies) ledgers - * written and when formatting the BookieAdmin i expect that the - * ledger ids restart from 0 - */ - int numOfLedgers = 2; - try (BookKeeper bkc = new BookKeeper(conf)) { - Set ledgerIds = new HashSet<>(); - for (int n = 0; n < numOfLedgers; n++) { - try (LedgerHandle lh = bkc.createLedger(numOfBookies, numOfBookies, digestType, "L".getBytes())) { - ledgerIds.add(lh.getId()); - lh.addEntry("000".getBytes()); - } - } - - try (BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString())) { - bkAdmin.format(baseConf, false, true); - } - - /** - * ledgers created after format produce the same ids - */ - for (int n = 0; n < numOfLedgers; n++) { - try (LedgerHandle lh = bkc.createLedger(numOfBookies, numOfBookies, digestType, "L".getBytes())) { - lh.addEntry("000".getBytes()); - assertTrue(ledgerIds.contains(lh.getId())); - } - } + } + + @Test + void getListOfEntriesOfLedgerWithEntriesNotStripedToABookie() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + BookKeeper bkc = new BookKeeper(conf); + /* + * in this testsuite there are going to be 2 (numOfBookies) bookies and + * we are having ensemble of size 2. + */ + LedgerHandle lh = bkc.createLedger(2, 1, digestType, "testPasswd".getBytes()); + long lId = lh.getId(); + /* + * ledger is writeclosed without adding any entry. + */ + lh.close(); + CountDownLatch callbackCalled = new CountDownLatch(1); + AtomicBoolean exceptionInCallback = new AtomicBoolean(false); + AtomicInteger exceptionCode = new AtomicInteger(BKException.Code.OK); + BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString()); + /* + * since no entry is added, callback is supposed to fail with + * NoSuchLedgerExistsException. + */ + bkAdmin.asyncGetListOfEntriesOfLedger(addressByIndex(0), lId) + .whenComplete((availabilityOfEntriesOfLedger, throwable) -> { + exceptionInCallback.set(throwable != null); + if (throwable != null) { + exceptionCode.set(BKException.getExceptionCode(throwable)); + } + callbackCalled.countDown(); + }); + callbackCalled.await(); + assertTrue(exceptionInCallback.get(), "Exception occurred"); + assertEquals(BKException.Code.NoSuchLedgerExistsException, exceptionCode.get(), + "Exception code"); + bkAdmin.close(); + bkc.close(); + } + + @Test + void areEntriesOfLedgerStoredInTheBookieForLastEmptySegment() throws Exception { + int lastEntryId = 10; + long ledgerId = 100L; + BookieId bookie0 = new BookieSocketAddress("bookie0:3181").toBookieId(); + BookieId bookie1 = new BookieSocketAddress("bookie1:3181").toBookieId(); + BookieId bookie2 = new BookieSocketAddress("bookie2:3181").toBookieId(); + BookieId bookie3 = new BookieSocketAddress("bookie3:3181").toBookieId(); + + List ensembleOfSegment1 = new ArrayList(); + ensembleOfSegment1.add(bookie0); + ensembleOfSegment1.add(bookie1); + ensembleOfSegment1.add(bookie2); + + List ensembleOfSegment2 = new ArrayList(); + ensembleOfSegment2.add(bookie3); + ensembleOfSegment2.add(bookie1); + ensembleOfSegment2.add(bookie2); + + LedgerMetadataBuilder builder = LedgerMetadataBuilder.create(); + builder.withId(ledgerId).withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(2) + .withDigestType(digestType.toApiDigestType()).withPassword(PASSWORD.getBytes()) + .newEnsembleEntry(0, ensembleOfSegment1) + .newEnsembleEntry(lastEntryId + 1, ensembleOfSegment2) + .withLastEntryId(lastEntryId).withLength(65576).withClosedState(); + LedgerMetadata meta = builder.build(); + + assertFalse(BookKeeperAdmin.areEntriesOfLedgerStoredInTheBookie(ledgerId, bookie3, meta), + "expected areEntriesOfLedgerStoredInTheBookie to return False for bookie3"); + assertTrue(BookKeeperAdmin.areEntriesOfLedgerStoredInTheBookie(ledgerId, bookie2, meta), + "expected areEntriesOfLedgerStoredInTheBookie to return true for bookie2"); + } + + @Test + void bookkeeperAdminFormatResetsLedgerIds() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + + /* + * in this testsuite there are going to be 2 (numOfBookies) ledgers + * written and when formatting the BookieAdmin i expect that the + * ledger ids restart from 0 + */ + int numOfLedgers = 2; + try (BookKeeper bkc = new BookKeeper(conf)) { + Set ledgerIds = new HashSet<>(); + for (int n = 0; n < numOfLedgers; n++) { + try (LedgerHandle lh = bkc + .createLedger(numOfBookies, numOfBookies, digestType, "L".getBytes())) { + ledgerIds.add(lh.getId()); + lh.addEntry("000".getBytes()); + } + } + + try (BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString())) { + BookKeeperAdmin.format(baseConf, false, true); + } + + /** + * ledgers created after format produce the same ids + */ + for (int n = 0; n < numOfLedgers; n++) { + try (LedgerHandle lh = bkc + .createLedger(numOfBookies, numOfBookies, digestType, "L".getBytes())) { + lh.addEntry("000".getBytes()); + assertTrue(ledgerIds.contains(lh.getId())); } + } + } + } + + private void testBookieServiceInfo(boolean readonly, boolean legacy) throws Exception { + File tmpDir = tmpDirs.createNew("bookie", "test"); + final ServerConfiguration conf = TestBKConfiguration.newServerConfiguration() + .setJournalDirName(tmpDir.getPath()).setLedgerDirNames(new String[]{tmpDir.getPath()}) + .setBookiePort(PortManager.nextFreePort()).setMetadataServiceUri(metadataServiceUri); + + LifecycleComponent server = Main.buildBookieServer(new BookieConfiguration(conf)); + // 2. start the server + CompletableFuture stackComponentFuture = ComponentStarter.startComponent(server); + while (server.lifecycleState() != Lifecycle.State.STARTED) { + Thread.sleep(100); } - private void testBookieServiceInfo(boolean readonly, boolean legacy) throws Exception { - File tmpDir = tmpDirs.createNew("bookie", "test"); - final ServerConfiguration conf = TestBKConfiguration.newServerConfiguration() - .setJournalDirName(tmpDir.getPath()) - .setLedgerDirNames(new String[]{tmpDir.getPath()}) - .setBookiePort(PortManager.nextFreePort()) - .setMetadataServiceUri(metadataServiceUri); - - LifecycleComponent server = Main.buildBookieServer(new BookieConfiguration(conf)); - // 2. start the server - CompletableFuture stackComponentFuture = ComponentStarter.startComponent(server); - while (server.lifecycleState() != Lifecycle.State.STARTED) { - Thread.sleep(100); - } + ServerConfiguration bkConf = newServerConfiguration().setForceReadOnlyBookie(readonly); + BookieServer bkServer = startBookie(bkConf).getServer(); - ServerConfiguration bkConf = newServerConfiguration().setForceReadOnlyBookie(readonly); - BookieServer bkServer = startBookie(bkConf).getServer(); - - BookieId bookieId = bkServer.getBookieId(); - String host = bkServer.getLocalAddress().getHostName(); - int port = bkServer.getLocalAddress().getPort(); - - if (legacy) { - String regPath = ZKMetadataDriverBase.resolveZkLedgersRootPath(bkConf) + "/" + AVAILABLE_NODE; - regPath = readonly - ? regPath + READONLY + "/" + bookieId - : regPath + "/" + bookieId.toString(); - // deleting the metadata, so that the bookie registration should - // continue successfully with legacy BookieServiceInfo - zkc.setData(regPath, new byte[]{}, -1); - } + BookieId bookieId = bkServer.getBookieId(); + String host = bkServer.getLocalAddress().getHostName(); + int port = bkServer.getLocalAddress().getPort(); - try (BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString())) { - BookieServiceInfo bookieServiceInfo = bkAdmin.getBookieServiceInfo(bookieId); + if (legacy) { + String regPath = ZKMetadataDriverBase.resolveZkLedgersRootPath(bkConf) + "/" + AVAILABLE_NODE; + regPath = + readonly ? regPath + READONLY + "/" + bookieId : regPath + "/" + bookieId.toString(); + // deleting the metadata, so that the bookie registration should + // continue successfully with legacy BookieServiceInfo + zkc.setData(regPath, new byte[]{}, -1); + } - assertThat(bookieServiceInfo.getEndpoints().size(), is(1)); - BookieServiceInfo.Endpoint endpoint = bookieServiceInfo.getEndpoints().stream() - .filter(e -> Objects.equals(e.getId(), bookieId.getId())) - .findFirst() - .get(); - assertNotNull("Endpoint " + bookieId + " not found.", endpoint); + try (BookKeeperAdmin bkAdmin = new BookKeeperAdmin(zkUtil.getZooKeeperConnectString())) { + BookieServiceInfo bookieServiceInfo = bkAdmin.getBookieServiceInfo(bookieId); - assertThat(endpoint.getHost(), is(host)); - assertThat(endpoint.getPort(), is(port)); - assertThat(endpoint.getProtocol(), is("bookie-rpc")); - } + assertThat(bookieServiceInfo.getEndpoints().size(), is(1)); + BookieServiceInfo.Endpoint endpoint = bookieServiceInfo.getEndpoints().stream() + .filter(e -> Objects.equals(e.getId(), bookieId.getId())).findFirst().get(); + assertNotNull(endpoint, "Endpoint " + bookieId + " not found."); - bkServer.shutdown(); - stackComponentFuture.cancel(true); + assertThat(endpoint.getHost(), is(host)); + assertThat(endpoint.getPort(), is(port)); + assertThat(endpoint.getProtocol(), is("bookie-rpc")); } - @Test - public void testBookieServiceInfoWritable() throws Exception { - testBookieServiceInfo(false, false); - } + bkServer.shutdown(); + stackComponentFuture.cancel(true); + } - @Test - public void testBookieServiceInfoReadonly() throws Exception { - testBookieServiceInfo(true, false); - } + @Test + void bookieServiceInfoWritable() throws Exception { + testBookieServiceInfo(false, false); + } - @Test - public void testLegacyBookieServiceInfo() throws Exception { - testBookieServiceInfo(false, true); - } + @Test + void bookieServiceInfoReadonly() throws Exception { + testBookieServiceInfo(true, false); + } + + @Test + void legacyBookieServiceInfo() throws Exception { + testBookieServiceInfo(false, true); + } } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperClientTestsWithBookieErrors.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperClientTestsWithBookieErrors.java index 373e4f523c8..2e22d8efaa9 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperClientTestsWithBookieErrors.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperClientTestsWithBookieErrors.java @@ -20,7 +20,8 @@ */ package org.apache.bookkeeper.client; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; import io.netty.buffer.ByteBuf; import java.io.IOException; @@ -28,15 +29,16 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; +import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import org.apache.bookkeeper.bookie.BookieException; import org.apache.bookkeeper.bookie.SortedLedgerStorage; import org.apache.bookkeeper.client.BookKeeper.DigestType; import org.apache.bookkeeper.conf.ClientConfiguration; import org.apache.bookkeeper.test.BookKeeperClusterTestCase; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,291 +46,313 @@ * Test the bookkeeper client with errors from Bookies. */ public class BookKeeperClientTestsWithBookieErrors extends BookKeeperClusterTestCase { - private static final Logger LOG = LoggerFactory.getLogger(BookKeeperClientTestsWithBookieErrors.class); - private static final int NUM_BOOKIES = 3; - // The amount of sleeptime to sleep in injectSleepWhileRead fault injection - private final long sleepTime; - // Fault injection which would sleep for sleepTime before returning readEntry call - private final Consumer injectSleepWhileRead; - // Fault injection which would corrupt the entry data before returning readEntry call - private final Consumer injectCorruptData; - /* - * The ordered list of injections for the Bookies (LedgerStorage). The first - * bookie to get readEntry call will use the first faultInjection, and the - * second bookie to get readentry call will use the second one and so on.. - * - * It is assumed that there would be faultInjection for each Bookie. So if - * there aren't NUM_BOOKIES num of faulInjections in this list then it will - * fail with NullPointerException - */ - private static List> faultInjections = new ArrayList>(); - /* - * This map is used for storing LedgerStorage and the corresponding - * faultInjection, according to the faultInjections list - */ - private static HashMap> storageFaultInjectionsMap = - new HashMap>(); - // Lock object for synchronizing injectCorruptData and faultInjections - private static final Object lock = new Object(); - - public BookKeeperClientTestsWithBookieErrors() { - super(NUM_BOOKIES); - baseConf.setLedgerStorageClass(MockSortedLedgerStorage.class.getName()); - - // this fault injection will corrupt the entry data by modifying the last byte of the entry - injectCorruptData = (byteBuf) -> { - ByteBuffer buf = byteBuf.nioBuffer(); - int lastByteIndex = buf.limit() - 1; - buf.put(lastByteIndex, (byte) (buf.get(lastByteIndex) - 1)); - }; - - // this fault injection, will sleep for ReadEntryTimeout+2 secs before returning the readEntry call - sleepTime = (baseClientConf.getReadEntryTimeout() + 2) * 1000; - injectSleepWhileRead = (byteBuf) -> { - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - }; - } - @Before - public void setUp() throws Exception { - faultInjections.clear(); - storageFaultInjectionsMap.clear(); - super.setUp(); + private static final Logger LOG = LoggerFactory + .getLogger(BookKeeperClientTestsWithBookieErrors.class); + private static final int NUM_BOOKIES = 3; + + // Lock object for synchronizing injectCorruptData and faultInjections + private static final Object lock = new Object(); + + /* + * The ordered list of injections for the Bookies (LedgerStorage). The first + * bookie to get readEntry call will use the first faultInjection, and the + * second bookie to get readentry call will use the second one and so on.. + * + * It is assumed that there would be faultInjection for each Bookie. So if + * there aren't NUM_BOOKIES num of faulInjections in this list then it will + * fail with NullPointerException + */ + private static final List> faultInjections = new ArrayList>(); + + /* + * This map is used for storing LedgerStorage and the corresponding + * faultInjection, according to the faultInjections list + */ + private static final HashMap> storageFaultInjectionsMap = + new HashMap>(); + + // The amount of sleeptime to sleep in injectSleepWhileRead fault injection + private final long sleepTime; + + // Fault injection which would sleep for sleepTime before returning readEntry call + private final Consumer injectSleepWhileRead; + + // Fault injection which would corrupt the entry data before returning readEntry call + private final Consumer injectCorruptData; + + public BookKeeperClientTestsWithBookieErrors() { + super(NUM_BOOKIES); + baseConf.setLedgerStorageClass(MockSortedLedgerStorage.class.getName()); + + // this fault injection will corrupt the entry data by modifying the last byte of the entry + injectCorruptData = (byteBuf) -> { + ByteBuffer buf = byteBuf.nioBuffer(); + int lastByteIndex = buf.limit() - 1; + buf.put(lastByteIndex, (byte) (buf.get(lastByteIndex) - 1)); + }; + + // this fault injection, will sleep for ReadEntryTimeout+2 secs before returning the readEntry call + sleepTime = (baseClientConf.getReadEntryTimeout() + 2) * 1000; + injectSleepWhileRead = (byteBuf) -> { + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + }; + } + + @BeforeEach + public void setUp() throws Exception { + faultInjections.clear(); + storageFaultInjectionsMap.clear(); + super.setUp(); + } + + // In this testcase all the bookies will return corrupt entry + @Test + @Timeout(value = 60000, unit = TimeUnit.MILLISECONDS) + void bookkeeperAllDigestErrors() throws Exception { + ClientConfiguration conf = new ClientConfiguration() + .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + BookKeeper bkc = new BookKeeper(conf); + + byte[] passwd = "AAAAAAA".getBytes(); + + // all the bookies need to return corrupt data + faultInjections.add(injectCorruptData); + faultInjections.add(injectCorruptData); + faultInjections.add(injectCorruptData); + + LedgerHandle wlh = bkc.createLedger(3, 3, 2, DigestType.CRC32, passwd); + long id = wlh.getId(); + for (int i = 0; i < 10; i++) { + wlh.addEntry("foobarfoo".getBytes()); } + wlh.close(); - // Mock SortedLedgerStorage to simulate Fault Injection - static class MockSortedLedgerStorage extends SortedLedgerStorage { - public MockSortedLedgerStorage() { - super(); - } - - @Override - public ByteBuf getEntry(long ledgerId, long entryId) throws IOException, BookieException { - Consumer faultInjection; - synchronized (lock) { - faultInjection = storageFaultInjectionsMap.get(this); - if (faultInjection == null) { - int readLedgerStorageIndex = storageFaultInjectionsMap.size(); - faultInjection = faultInjections.get(readLedgerStorageIndex); - storageFaultInjectionsMap.put(this, faultInjection); - } - } - ByteBuf byteBuf = super.getEntry(ledgerId, entryId); - faultInjection.accept(byteBuf); - return byteBuf; - } + LedgerHandle rlh = bkc.openLedger(id, DigestType.CRC32, passwd); + try { + rlh.readEntries(4, 4); + fail("It is expected to fail with BKDigestMatchException"); + } catch (BKException.BKDigestMatchException e) { } - - // In this testcase all the bookies will return corrupt entry - @Test(timeout = 60000) - public void testBookkeeperAllDigestErrors() throws Exception { - ClientConfiguration conf = new ClientConfiguration().setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - BookKeeper bkc = new BookKeeper(conf); - - byte[] passwd = "AAAAAAA".getBytes(); - - // all the bookies need to return corrupt data - faultInjections.add(injectCorruptData); - faultInjections.add(injectCorruptData); - faultInjections.add(injectCorruptData); - - LedgerHandle wlh = bkc.createLedger(3, 3, 2, DigestType.CRC32, passwd); - long id = wlh.getId(); - for (int i = 0; i < 10; i++) { - wlh.addEntry("foobarfoo".getBytes()); - } - wlh.close(); - - LedgerHandle rlh = bkc.openLedger(id, DigestType.CRC32, passwd); - try { - rlh.readEntries(4, 4); - fail("It is expected to fail with BKDigestMatchException"); - } catch (BKException.BKDigestMatchException e) { - } - rlh.close(); - bkc.close(); + rlh.close(); + bkc.close(); + } + + // In this testcase first two bookies will sleep (for ReadEntryTimeout+2 secs) before returning the data, + // and the last one will return corrupt data + @Test + @Timeout(value = 60000, unit = TimeUnit.MILLISECONDS) + void bKReadFirstTimeoutThenDigestError() throws Exception { + ClientConfiguration conf = new ClientConfiguration() + .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + BookKeeper bkc = new BookKeeper(conf); + + byte[] passwd = "AAAAAAA".getBytes(); + + faultInjections.add(injectSleepWhileRead); + faultInjections.add(injectSleepWhileRead); + faultInjections.add(injectCorruptData); + + LedgerHandle wlh = bkc.createLedger(3, 3, 2, DigestType.CRC32, passwd); + long id = wlh.getId(); + for (int i = 0; i < 10; i++) { + wlh.addEntry("foobarfoo".getBytes()); } + wlh.close(); - // In this testcase first two bookies will sleep (for ReadEntryTimeout+2 secs) before returning the data, - // and the last one will return corrupt data - @Test(timeout = 60000) - public void testBKReadFirstTimeoutThenDigestError() throws Exception { - ClientConfiguration conf = new ClientConfiguration().setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - BookKeeper bkc = new BookKeeper(conf); - - byte[] passwd = "AAAAAAA".getBytes(); - - faultInjections.add(injectSleepWhileRead); - faultInjections.add(injectSleepWhileRead); - faultInjections.add(injectCorruptData); - - LedgerHandle wlh = bkc.createLedger(3, 3, 2, DigestType.CRC32, passwd); - long id = wlh.getId(); - for (int i = 0; i < 10; i++) { - wlh.addEntry("foobarfoo".getBytes()); - } - wlh.close(); - - LedgerHandle rlh = bkc.openLedger(id, DigestType.CRC32, passwd); - try { - rlh.readEntries(4, 4); - fail("It is expected to fail with BKDigestMatchException"); - } catch (BKException.BKDigestMatchException e) { - } - rlh.close(); - bkc.close(); + LedgerHandle rlh = bkc.openLedger(id, DigestType.CRC32, passwd); + try { + rlh.readEntries(4, 4); + fail("It is expected to fail with BKDigestMatchException"); + } catch (BKException.BKDigestMatchException e) { } - - // In this testcase first one will return corrupt data and the last two bookies will - // sleep (for ReadEntryTimeout+2 secs) before returning the data - @Test(timeout = 60000) - public void testBKReadFirstDigestErrorThenTimeout() throws Exception { - ClientConfiguration conf = new ClientConfiguration().setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - BookKeeper bkc = new BookKeeper(conf); - - byte[] passwd = "AAAAAAA".getBytes(); - - faultInjections.add(injectCorruptData); - faultInjections.add(injectSleepWhileRead); - faultInjections.add(injectSleepWhileRead); - - LedgerHandle wlh = bkc.createLedger(3, 3, 2, DigestType.CRC32, passwd); - long id = wlh.getId(); - for (int i = 0; i < 10; i++) { - wlh.addEntry("foobarfoo".getBytes()); - } - wlh.close(); - - LedgerHandle rlh = bkc.openLedger(id, DigestType.CRC32, passwd); - try { - rlh.readEntries(4, 4); - fail("It is expected to fail with BKDigestMatchException"); - } catch (BKException.BKDigestMatchException e) { - } - rlh.close(); - bkc.close(); + rlh.close(); + bkc.close(); + } + + // In this testcase first one will return corrupt data and the last two bookies will + // sleep (for ReadEntryTimeout+2 secs) before returning the data + @Test + @Timeout(value = 60000, unit = TimeUnit.MILLISECONDS) + void bKReadFirstDigestErrorThenTimeout() throws Exception { + ClientConfiguration conf = new ClientConfiguration() + .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + BookKeeper bkc = new BookKeeper(conf); + + byte[] passwd = "AAAAAAA".getBytes(); + + faultInjections.add(injectCorruptData); + faultInjections.add(injectSleepWhileRead); + faultInjections.add(injectSleepWhileRead); + + LedgerHandle wlh = bkc.createLedger(3, 3, 2, DigestType.CRC32, passwd); + long id = wlh.getId(); + for (int i = 0; i < 10; i++) { + wlh.addEntry("foobarfoo".getBytes()); } + wlh.close(); - // In this testcase first two bookies are killed before making the readentry call - // and the last one will return corrupt data - @Test(timeout = 60000) - public void testBKReadFirstBookiesDownThenDigestError() throws Exception { - ClientConfiguration conf = new ClientConfiguration().setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - BookKeeper bkc = new BookKeeper(conf); - - byte[] passwd = "AAAAAAA".getBytes(); - - faultInjections.add(injectCorruptData); - - LedgerHandle wlh = bkc.createLedger(3, 3, 2, DigestType.CRC32, passwd); - long id = wlh.getId(); - wlh.addEntry("foobarfoo".getBytes()); - wlh.close(); - - super.killBookie(0); - super.killBookie(1); - - Thread.sleep(500); - - LedgerHandle rlh = bkc.openLedger(id, DigestType.CRC32, passwd); - try { - rlh.readEntries(0, 0); - fail("It is expected to fail with BKDigestMatchException"); - } catch (BKException.BKDigestMatchException e) { - } - rlh.close(); - bkc.close(); + LedgerHandle rlh = bkc.openLedger(id, DigestType.CRC32, passwd); + try { + rlh.readEntries(4, 4); + fail("It is expected to fail with BKDigestMatchException"); + } catch (BKException.BKDigestMatchException e) { } - - // In this testcase all the bookies will sleep (for ReadEntryTimeout+2 secs) before returning the data - @Test(timeout = 60000) - public void testBKReadAllTimeouts() throws Exception { - ClientConfiguration conf = new ClientConfiguration().setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - BookKeeper bkc = new BookKeeper(conf); - - byte[] passwd = "AAAAAAA".getBytes(); - - faultInjections.add(injectSleepWhileRead); - faultInjections.add(injectSleepWhileRead); - faultInjections.add(injectSleepWhileRead); - - LedgerHandle wlh = bkc.createLedger(3, 3, 2, DigestType.CRC32, passwd); - long id = wlh.getId(); - for (int i = 0; i < 10; i++) { - wlh.addEntry("foobarfoo".getBytes()); - } - wlh.close(); - - LedgerHandle rlh = bkc.openLedger(id, DigestType.CRC32, passwd); - try { - rlh.readEntries(4, 4); - fail("It is expected to fail with BKTimeoutException"); - } catch (BKException.BKTimeoutException e) { - } - rlh.close(); - bkc.close(); + rlh.close(); + bkc.close(); + } + + // In this testcase first two bookies are killed before making the readentry call + // and the last one will return corrupt data + @Test + @Timeout(value = 60000, unit = TimeUnit.MILLISECONDS) + void bKReadFirstBookiesDownThenDigestError() throws Exception { + ClientConfiguration conf = new ClientConfiguration() + .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + BookKeeper bkc = new BookKeeper(conf); + + byte[] passwd = "AAAAAAA".getBytes(); + + faultInjections.add(injectCorruptData); + + LedgerHandle wlh = bkc.createLedger(3, 3, 2, DigestType.CRC32, passwd); + long id = wlh.getId(); + wlh.addEntry("foobarfoo".getBytes()); + wlh.close(); + + super.killBookie(0); + super.killBookie(1); + + Thread.sleep(500); + + LedgerHandle rlh = bkc.openLedger(id, DigestType.CRC32, passwd); + try { + rlh.readEntries(0, 0); + fail("It is expected to fail with BKDigestMatchException"); + } catch (BKException.BKDigestMatchException e) { } + rlh.close(); + bkc.close(); + } + + // In this testcase all the bookies will sleep (for ReadEntryTimeout+2 secs) before returning the data + @Test + @Timeout(value = 60000, unit = TimeUnit.MILLISECONDS) + void bKReadAllTimeouts() throws Exception { + ClientConfiguration conf = new ClientConfiguration() + .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + BookKeeper bkc = new BookKeeper(conf); + + byte[] passwd = "AAAAAAA".getBytes(); + + faultInjections.add(injectSleepWhileRead); + faultInjections.add(injectSleepWhileRead); + faultInjections.add(injectSleepWhileRead); + + LedgerHandle wlh = bkc.createLedger(3, 3, 2, DigestType.CRC32, passwd); + long id = wlh.getId(); + for (int i = 0; i < 10; i++) { + wlh.addEntry("foobarfoo".getBytes()); + } + wlh.close(); - // In this testcase first two bookies will sleep (for ReadEntryTimeout+2 secs) before returning the data, - // but the last one will return as expected - @Test(timeout = 60000) - public void testBKReadTwoBookiesTimeout() throws Exception { - ClientConfiguration conf = new ClientConfiguration().setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - BookKeeper bkc = new BookKeeper(conf); + LedgerHandle rlh = bkc.openLedger(id, DigestType.CRC32, passwd); + try { + rlh.readEntries(4, 4); + fail("It is expected to fail with BKTimeoutException"); + } catch (BKException.BKTimeoutException e) { + } + rlh.close(); + bkc.close(); + } + + // In this testcase first two bookies will sleep (for ReadEntryTimeout+2 secs) before returning the data, + // but the last one will return as expected + @Test + @Timeout(value = 60000, unit = TimeUnit.MILLISECONDS) + void bKReadTwoBookiesTimeout() throws Exception { + ClientConfiguration conf = new ClientConfiguration() + .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + BookKeeper bkc = new BookKeeper(conf); + + byte[] passwd = "AAAAAAA".getBytes(); + + faultInjections.add(injectSleepWhileRead); + faultInjections.add(injectSleepWhileRead); + faultInjections.add((byteBuf) -> { + }); + + LedgerHandle wlh = bkc.createLedger(3, 3, 2, DigestType.CRC32, passwd); + long id = wlh.getId(); + for (int i = 0; i < 10; i++) { + wlh.addEntry("foobarfoo".getBytes()); + } + wlh.close(); + + LedgerHandle rlh = bkc.openLedger(id, DigestType.CRC32, passwd); + LedgerEntry entry = rlh.readEntries(4, 4).nextElement(); + assertEquals("foobarfoo", (new String(entry.getEntry())), + "The read Entry should match with what have been written"); + rlh.close(); + bkc.close(); + } + + // In this testcase first two bookies return the corrupt data, + // but the last one will return as expected + @Test + @Timeout(value = 60000, unit = TimeUnit.MILLISECONDS) + void bKReadTwoBookiesWithDigestError() throws Exception { + ClientConfiguration conf = new ClientConfiguration() + .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + BookKeeper bkc = new BookKeeper(conf); + + byte[] passwd = "AAAAAAA".getBytes(); + + faultInjections.add(injectCorruptData); + faultInjections.add(injectCorruptData); + faultInjections.add((byteBuf) -> { + }); + + LedgerHandle wlh = bkc.createLedger(3, 3, 2, DigestType.CRC32, passwd); + long id = wlh.getId(); + for (int i = 0; i < 10; i++) { + wlh.addEntry("foobarfoo".getBytes()); + } + wlh.close(); - byte[] passwd = "AAAAAAA".getBytes(); + LedgerHandle rlh = bkc.openLedger(id, DigestType.CRC32, passwd); + LedgerEntry entry = rlh.readEntries(4, 4).nextElement(); + assertEquals("foobarfoo", (new String(entry.getEntry())), + "The read Entry should match with what have been written"); + rlh.close(); + bkc.close(); + } - faultInjections.add(injectSleepWhileRead); - faultInjections.add(injectSleepWhileRead); - faultInjections.add((byteBuf) -> { - }); + // Mock SortedLedgerStorage to simulate Fault Injection + static class MockSortedLedgerStorage extends SortedLedgerStorage { - LedgerHandle wlh = bkc.createLedger(3, 3, 2, DigestType.CRC32, passwd); - long id = wlh.getId(); - for (int i = 0; i < 10; i++) { - wlh.addEntry("foobarfoo".getBytes()); - } - wlh.close(); - - LedgerHandle rlh = bkc.openLedger(id, DigestType.CRC32, passwd); - LedgerEntry entry = rlh.readEntries(4, 4).nextElement(); - Assert.assertTrue("The read Entry should match with what have been written", - (new String(entry.getEntry())).equals("foobarfoo")); - rlh.close(); - bkc.close(); + public MockSortedLedgerStorage() { + super(); } - // In this testcase first two bookies return the corrupt data, - // but the last one will return as expected - @Test(timeout = 60000) - public void testBKReadTwoBookiesWithDigestError() throws Exception { - ClientConfiguration conf = new ClientConfiguration() - .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - BookKeeper bkc = new BookKeeper(conf); - - byte[] passwd = "AAAAAAA".getBytes(); - - faultInjections.add(injectCorruptData); - faultInjections.add(injectCorruptData); - faultInjections.add((byteBuf) -> { - }); - - LedgerHandle wlh = bkc.createLedger(3, 3, 2, DigestType.CRC32, passwd); - long id = wlh.getId(); - for (int i = 0; i < 10; i++) { - wlh.addEntry("foobarfoo".getBytes()); + @Override + public ByteBuf getEntry(long ledgerId, long entryId) throws IOException, BookieException { + Consumer faultInjection; + synchronized (lock) { + faultInjection = storageFaultInjectionsMap.get(this); + if (faultInjection == null) { + int readLedgerStorageIndex = storageFaultInjectionsMap.size(); + faultInjection = faultInjections.get(readLedgerStorageIndex); + storageFaultInjectionsMap.put(this, faultInjection); } - wlh.close(); - - LedgerHandle rlh = bkc.openLedger(id, DigestType.CRC32, passwd); - LedgerEntry entry = rlh.readEntries(4, 4).nextElement(); - Assert.assertTrue("The read Entry should match with what have been written", - (new String(entry.getEntry())).equals("foobarfoo")); - rlh.close(); - bkc.close(); + } + ByteBuf byteBuf = super.getEntry(ledgerId, entryId); + faultInjection.accept(byteBuf); + return byteBuf; } + } } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperClientZKSessionExpiry.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperClientZKSessionExpiry.java index c72834397e0..e25bccfbc6b 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperClientZKSessionExpiry.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperClientZKSessionExpiry.java @@ -24,7 +24,7 @@ import org.apache.bookkeeper.test.TestCallbacks.AddCallbackFuture; import org.apache.bookkeeper.zookeeper.ZooKeeperWatcherBase; import org.apache.zookeeper.ZooKeeper; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -32,52 +32,54 @@ * Test the bookkeeper client while losing a ZK session. */ public class BookKeeperClientZKSessionExpiry extends BookKeeperClusterTestCase { - private static final Logger LOG = LoggerFactory.getLogger(BookKeeperClientZKSessionExpiry.class); - public BookKeeperClientZKSessionExpiry() { - super(4); - } + private static final Logger LOG = LoggerFactory.getLogger(BookKeeperClientZKSessionExpiry.class); - @Test - public void testSessionLossWhileWriting() throws Exception { + public BookKeeperClientZKSessionExpiry() { + super(4); + } - Thread expiryThread = new Thread() { - @Override - public void run() { - try { - while (true) { - Thread.sleep(5000); - long sessionId = bkc.getZkHandle().getSessionId(); - byte[] sessionPasswd = bkc.getZkHandle().getSessionPasswd(); + @Test + void sessionLossWhileWriting() throws Exception { - try { - ZooKeeperWatcherBase watcher = new ZooKeeperWatcherBase(10000, false); - ZooKeeper zk = new ZooKeeper(zkUtil.getZooKeeperConnectString(), 10000, - watcher, sessionId, sessionPasswd); - zk.close(); - } catch (Exception e) { - LOG.info("Error killing session", e); - } - } - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - return; - } - } - }; - expiryThread.start(); + Thread expiryThread = new Thread() { + @Override + public void run() { + try { + while (true) { + Thread.sleep(5000); + long sessionId = bkc.getZkHandle().getSessionId(); + byte[] sessionPasswd = bkc.getZkHandle().getSessionPasswd(); - for (int i = 0; i < 3; i++) { - LedgerHandle lh = bkc.createLedger(3, 3, 2, BookKeeper.DigestType.MAC, "foobar".getBytes()); - for (int j = 0; j < 100; j++) { - lh.asyncAddEntry("foobar".getBytes(), new AddCallbackFuture(j), null); + try { + ZooKeeperWatcherBase watcher = new ZooKeeperWatcherBase(10000, true); + ZooKeeper zk = new ZooKeeper(zkUtil.getZooKeeperConnectString(), 10000, watcher, + sessionId, + sessionPasswd); + zk.close(); + } catch (Exception e) { + LOG.info("Error killing session", e); } - startNewBookie(); - killBookie(0); + } + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + return; + } + } + }; + expiryThread.start(); - lh.addEntry("lastEntry".getBytes()); + for (int i = 0; i < 3; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, 2, BookKeeper.DigestType.MAC, "foobar".getBytes()); + for (int j = 0; j < 100; j++) { + lh.asyncAddEntry("foobar".getBytes(), new AddCallbackFuture(j), null); + } + startNewBookie(); + killBookie(0); - lh.close(); - } + lh.addEntry("lastEntry".getBytes()); + + lh.close(); } + } } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperCloseTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperCloseTest.java index e9db5b9d736..9fc9a5f4c73 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperCloseTest.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperCloseTest.java @@ -20,9 +20,9 @@ */ package org.apache.bookkeeper.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import com.google.common.util.concurrent.SettableFuture; import io.netty.buffer.ByteBuf; @@ -47,566 +47,556 @@ import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.GenericCallback; import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteCallback; import org.apache.bookkeeper.test.BookKeeperClusterTestCase; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * This unit test verifies the behavior of bookkeeper apis, where the operations - * are being executed through a closed bookkeeper client. + * This unit test verifies the behavior of bookkeeper apis, where the operations are being executed + * through a closed bookkeeper client. */ public class BookKeeperCloseTest extends BookKeeperClusterTestCase { - // Depending on the taste, select the amount of logging - // by decommenting one of the two lines below - // static Logger LOG = Logger.getRootLogger(); - private static final Logger LOG = LoggerFactory - .getLogger(BookKeeperCloseTest.class); - private DigestType digestType = DigestType.CRC32; - private static final String PASSWORD = "testPasswd"; - private static final BiConsumer NOOP_BICONSUMER = (l, e) -> { }; - - public BookKeeperCloseTest() { - super(3); - } - - private void restartBookieSlow() throws Exception{ - ServerConfiguration conf = killBookie(0); - - Bookie delayBookie = new TestBookieImpl(conf) { - @Override - public void recoveryAddEntry(ByteBuf entry, WriteCallback cb, - Object ctx, byte[] masterKey) - throws IOException, BookieException, InterruptedException { - try { - Thread.sleep(5000); - } catch (InterruptedException ie) { - // ignore, only interrupted if shutting down, - // and an exception would spam the logs - Thread.currentThread().interrupt(); - } - super.recoveryAddEntry(entry, cb, ctx, masterKey); - } - - @Override - public void addEntry(ByteBuf entry, boolean ackBeforeSync, WriteCallback cb, - Object ctx, byte[] masterKey) - throws IOException, BookieException, InterruptedException { - try { - Thread.sleep(5000); - } catch (InterruptedException ie) { - // ignore, only interrupted if shutting down, - // and an exception would spam the logs - Thread.currentThread().interrupt(); - } - super.addEntry(entry, ackBeforeSync, cb, ctx, masterKey); - } - - @Override - public ByteBuf readEntry(long ledgerId, long entryId) - throws IOException, NoLedgerException, BookieException { - try { - Thread.sleep(5000); - } catch (InterruptedException ie) { - // ignore, only interrupted if shutting down, - // and an exception would spam the logs - Thread.currentThread().interrupt(); - } - return super.readEntry(ledgerId, entryId); - } - }; - startAndAddBookie(conf, delayBookie); - } + // Depending on the taste, select the amount of logging + // by decommenting one of the two lines below + // static Logger LOG = Logger.getRootLogger(); + private static final Logger LOG = LoggerFactory.getLogger(BookKeeperCloseTest.class); - /** - * Test that createledger using bookkeeper client which is closed should - * throw ClientClosedException. - */ - @Test - public void testCreateLedger() throws Exception { - BookKeeper bk = new BookKeeper(baseClientConf, zkc); - LOG.info("Closing bookkeeper client"); - bk.close(); - try { - bk.createLedger(digestType, PASSWORD.getBytes()); - fail("should have failed, client is closed"); - } catch (BKClientClosedException e) { - // correct - } + private static final String PASSWORD = "testPasswd"; - // using async, because this could trigger an assertion - final AtomicInteger returnCode = new AtomicInteger(0); - final CountDownLatch openLatch = new CountDownLatch(1); - CreateCallback cb = new CreateCallback() { - @Override - public void createComplete(int rc, LedgerHandle lh, Object ctx) { - returnCode.set(rc); - openLatch.countDown(); - } - }; - bk.asyncCreateLedger(3, 2, digestType, PASSWORD.getBytes(), cb, - openLatch); - - LOG.info("Waiting to finish the ledger creation"); - // wait for creating the ledger - assertTrue("create ledger call should have completed", - openLatch.await(20, TimeUnit.SECONDS)); - assertEquals("Successfully created ledger through closed bkclient!", - BKException.Code.ClientClosedException, returnCode.get()); - } + private static final BiConsumer NOOP_BICONSUMER = (l, e) -> { + }; - /** - * Test that opening a ledger using bookkeeper client which is closed should - * throw ClientClosedException. - */ - @Test - public void testFenceLedger() throws Exception { - BookKeeper bk = new BookKeeper(baseClientConf, zkc); - LOG.info("Create ledger and add entries to it"); - LedgerHandle lh = createLedgerWithEntries(bk, 100); - LOG.info("Closing bookkeeper client"); + private final DigestType digestType = DigestType.CRC32; - restartBookieSlow(); + public BookKeeperCloseTest() { + super(3); + } - bk.close(); + private void restartBookieSlow() throws Exception { + ServerConfiguration conf = killBookie(0); + Bookie delayBookie = new TestBookieImpl(conf) { + @Override + public void recoveryAddEntry(ByteBuf entry, WriteCallback cb, Object ctx, byte[] masterKey) + throws IOException, BookieException, InterruptedException { try { - bk.openLedger(lh.getId(), digestType, PASSWORD.getBytes()); - fail("should have failed, client is closed"); - } catch (BKClientClosedException e) { - // correct + Thread.sleep(5000); + } catch (InterruptedException ie) { + // ignore, only interrupted if shutting down, + // and an exception would spam the logs + Thread.currentThread().interrupt(); } + super.recoveryAddEntry(entry, cb, ctx, masterKey); + } + @Override + public void addEntry(ByteBuf entry, boolean ackBeforeSync, WriteCallback cb, Object ctx, + byte[] masterKey) + throws IOException, BookieException, InterruptedException { try { - bk.openLedgerNoRecovery(lh.getId(), digestType, PASSWORD.getBytes()); - fail("should have failed, client is closed"); - } catch (BKClientClosedException e) { - // correct + Thread.sleep(5000); + } catch (InterruptedException ie) { + // ignore, only interrupted if shutting down, + // and an exception would spam the logs + Thread.currentThread().interrupt(); } + super.addEntry(entry, ackBeforeSync, cb, ctx, masterKey); + } - final AtomicInteger returnCode = new AtomicInteger(0); - final CountDownLatch openLatch = new CountDownLatch(1); - AsyncCallback.OpenCallback cb = new AsyncCallback.OpenCallback() { - public void openComplete(int rc, LedgerHandle lh, Object ctx) { - returnCode.set(rc); - openLatch.countDown(); - } - }; - bk.asyncOpenLedger(lh.getId(), digestType, PASSWORD.getBytes(), cb, - openLatch); - - LOG.info("Waiting to open the ledger asynchronously"); - assertTrue("Open call should have completed", - openLatch.await(20, TimeUnit.SECONDS)); - assertTrue("Open should not have succeeded through closed bkclient!", - BKException.Code.ClientClosedException == returnCode.get()); - } - - /** - * Test that deleting a ledger using bookkeeper client which is closed - * should throw ClientClosedException. - */ - @Test - public void testDeleteLedger() throws Exception { - BookKeeper bk = new BookKeeper(baseClientConf, zkc); - LOG.info("Create ledger and add entries to it"); - LedgerHandle lh = createLedgerWithEntries(bk, 100); - LOG.info("Closing bookkeeper client"); - bk.close(); + @Override + public ByteBuf readEntry(long ledgerId, long entryId) + throws IOException, BookieException { try { - bk.deleteLedger(lh.getId()); - fail("should have failed, client is closed"); - } catch (BKClientClosedException e) { - // correct + Thread.sleep(5000); + } catch (InterruptedException ie) { + // ignore, only interrupted if shutting down, + // and an exception would spam the logs + Thread.currentThread().interrupt(); } - - // using async, because this could trigger an assertion - final AtomicInteger returnCode = new AtomicInteger(0); - final CountDownLatch openLatch = new CountDownLatch(1); - AsyncCallback.DeleteCallback cb = new AsyncCallback.DeleteCallback() { - public void deleteComplete(int rc, Object ctx) { - returnCode.set(rc); - openLatch.countDown(); - } - }; - bk.asyncDeleteLedger(lh.getId(), cb, openLatch); - - LOG.info("Waiting to delete the ledger asynchronously"); - assertTrue("Delete call should have completed", - openLatch.await(20, TimeUnit.SECONDS)); - assertEquals("Delete should not have succeeded through closed bkclient!", - BKException.Code.ClientClosedException, returnCode.get()); + return super.readEntry(ledgerId, entryId); + } + }; + startAndAddBookie(conf, delayBookie); + } + + /** + * Test that createledger using bookkeeper client which is closed should throw + * ClientClosedException. + */ + @Test + void createLedger() throws Exception { + BookKeeper bk = new BookKeeper(baseClientConf, zkc); + LOG.info("Closing bookkeeper client"); + bk.close(); + try { + bk.createLedger(digestType, PASSWORD.getBytes()); + fail("should have failed, client is closed"); + } catch (BKClientClosedException e) { + // correct } - /** - * Test that adding entry to a ledger using bookkeeper client which is - * closed should throw ClientClosedException. - */ - @Test - public void testAddLedgerEntry() throws Exception { - BookKeeper bk = new BookKeeper(baseClientConf, zkc); - LOG.info("Create ledger and add entries to it"); - LedgerHandle lh = createLedgerWithEntries(bk, 1); - LOG.info("Closing bookkeeper client"); - - restartBookieSlow(); - - bk.close(); - - try { - lh.addEntry("foobar".getBytes()); - fail("should have failed, client is closed"); - } catch (BKClientClosedException e) { - // correct - } - - final CountDownLatch completeLatch = new CountDownLatch(1); - final AtomicInteger rc = new AtomicInteger(BKException.Code.OK); - lh.asyncAddEntry("foobar".getBytes(), new AddCallback() { - public void addComplete(int rccb, LedgerHandle lh, long entryId, - Object ctx) { - rc.set(rccb); - completeLatch.countDown(); - } - }, null); - - LOG.info("Waiting to finish adding another entry asynchronously"); - assertTrue("Add entry to ledger call should have completed", - completeLatch.await(20, TimeUnit.SECONDS)); - assertEquals( - "Add entry to ledger should not have succeeded through closed bkclient!", - BKException.Code.ClientClosedException, rc.get()); + // using async, because this could trigger an assertion + final AtomicInteger returnCode = new AtomicInteger(0); + final CountDownLatch openLatch = new CountDownLatch(1); + CreateCallback cb = new CreateCallback() { + @Override + public void createComplete(int rc, LedgerHandle lh, Object ctx) { + returnCode.set(rc); + openLatch.countDown(); + } + }; + bk.asyncCreateLedger(3, 2, digestType, PASSWORD.getBytes(), cb, openLatch); + + LOG.info("Waiting to finish the ledger creation"); + // wait for creating the ledger + assertTrue(openLatch.await(20, TimeUnit.SECONDS), "create ledger call should have completed"); + assertEquals(BKException.Code.ClientClosedException, returnCode.get(), + "Successfully created ledger through closed bkclient!"); + } + + /** + * Test that opening a ledger using bookkeeper client which is closed should throw + * ClientClosedException. + */ + @Test + void fenceLedger() throws Exception { + BookKeeper bk = new BookKeeper(baseClientConf, zkc); + LOG.info("Create ledger and add entries to it"); + LedgerHandle lh = createLedgerWithEntries(bk, 100); + LOG.info("Closing bookkeeper client"); + + restartBookieSlow(); + + bk.close(); + + try { + bk.openLedger(lh.getId(), digestType, PASSWORD.getBytes()); + fail("should have failed, client is closed"); + } catch (BKClientClosedException e) { + // correct } - /** - * Test that closing a ledger using bookkeeper client which is closed should - * throw ClientClosedException. - */ - @Test - public void testCloseLedger() throws Exception { - BookKeeper bk = new BookKeeper(baseClientConf, zkc); - LOG.info("Create ledger and add entries to it"); - LedgerHandle lh = createLedgerWithEntries(bk, 100); - LedgerHandle lh2 = createLedgerWithEntries(bk, 100); - - LOG.info("Closing bookkeeper client"); - bk.close(); - - try { - lh.close(); - fail("should have failed, client is closed"); - } catch (BKClientClosedException e) { - // correct - } - - final CountDownLatch completeLatch = new CountDownLatch(1); - final AtomicInteger rc = new AtomicInteger(BKException.Code.OK); - lh2.asyncClose(new CloseCallback() { - public void closeComplete(int rccb, LedgerHandle lh, Object ctx) { - rc.set(rccb); - completeLatch.countDown(); - } - }, null); - - LOG.info("Waiting to finish adding another entry asynchronously"); - assertTrue("Close ledger call should have completed", - completeLatch.await(20, TimeUnit.SECONDS)); - assertEquals( - "Close ledger should have succeeded through closed bkclient!", - BKException.Code.ClientClosedException, rc.get()); + try { + bk.openLedgerNoRecovery(lh.getId(), digestType, PASSWORD.getBytes()); + fail("should have failed, client is closed"); + } catch (BKClientClosedException e) { + // correct } - /** - * Test that reading entry from a ledger using bookkeeper client which is - * closed should throw ClientClosedException. - */ - @Test - public void testReadLedgerEntry() throws Exception { - BookKeeper bk = new BookKeeper(baseClientConf, zkc); - LOG.info("Create ledger and add entries to it"); - int numOfEntries = 100; - LedgerHandle lh = createLedgerWithEntries(bk, numOfEntries); - LOG.info("Closing bookkeeper client"); - - restartBookieSlow(); - - bk.close(); + final AtomicInteger returnCode = new AtomicInteger(0); + final CountDownLatch openLatch = new CountDownLatch(1); + AsyncCallback.OpenCallback cb = new AsyncCallback.OpenCallback() { + public void openComplete(int rc, LedgerHandle lh, Object ctx) { + returnCode.set(rc); + openLatch.countDown(); + } + }; + bk.asyncOpenLedger(lh.getId(), digestType, PASSWORD.getBytes(), cb, openLatch); + + LOG.info("Waiting to open the ledger asynchronously"); + assertTrue(openLatch.await(20, TimeUnit.SECONDS), "Open call should have completed"); + assertEquals(BKException.Code.ClientClosedException, returnCode.get(), + "Open should not have succeeded through closed bkclient!"); + } + + /** + * Test that deleting a ledger using bookkeeper client which is closed should throw + * ClientClosedException. + */ + @Test + void deleteLedger() throws Exception { + BookKeeper bk = new BookKeeper(baseClientConf, zkc); + LOG.info("Create ledger and add entries to it"); + LedgerHandle lh = createLedgerWithEntries(bk, 100); + LOG.info("Closing bookkeeper client"); + bk.close(); + try { + bk.deleteLedger(lh.getId()); + fail("should have failed, client is closed"); + } catch (BKClientClosedException e) { + // correct + } - try { - lh.readEntries(0, numOfEntries - 1); - fail("should have failed, client is closed"); - } catch (BKClientClosedException e) { - // correct - } + // using async, because this could trigger an assertion + final AtomicInteger returnCode = new AtomicInteger(0); + final CountDownLatch openLatch = new CountDownLatch(1); + AsyncCallback.DeleteCallback cb = new AsyncCallback.DeleteCallback() { + public void deleteComplete(int rc, Object ctx) { + returnCode.set(rc); + openLatch.countDown(); + } + }; + bk.asyncDeleteLedger(lh.getId(), cb, openLatch); + + LOG.info("Waiting to delete the ledger asynchronously"); + assertTrue(openLatch.await(20, TimeUnit.SECONDS), "Delete call should have completed"); + assertEquals(BKException.Code.ClientClosedException, returnCode.get(), + "Delete should not have succeeded through closed bkclient!"); + } + + /** + * Test that adding entry to a ledger using bookkeeper client which is closed should throw + * ClientClosedException. + */ + @Test + void addLedgerEntry() throws Exception { + BookKeeper bk = new BookKeeper(baseClientConf, zkc); + LOG.info("Create ledger and add entries to it"); + LedgerHandle lh = createLedgerWithEntries(bk, 1); + LOG.info("Closing bookkeeper client"); + + restartBookieSlow(); + + bk.close(); + + try { + lh.addEntry("foobar".getBytes()); + fail("should have failed, client is closed"); + } catch (BKClientClosedException e) { + // correct + } - final CountDownLatch readLatch = new CountDownLatch(1); - final AtomicInteger rc = new AtomicInteger(BKException.Code.OK); - ReadCallback cb = new ReadCallback() { - @Override - public void readComplete(int rccb, LedgerHandle lh, - Enumeration seq, Object ctx) { - rc.set(rccb); - readLatch.countDown(); - } - }; - lh.asyncReadEntries(0, numOfEntries - 1, cb, readLatch); - - LOG.info("Waiting to finish reading the entries asynchronously"); - assertTrue("Read entry ledger call should have completed", - readLatch.await(20, TimeUnit.SECONDS)); - assertEquals( - "Read entry ledger should have succeeded through closed bkclient!", - BKException.Code.ClientClosedException, rc.get()); + final CountDownLatch completeLatch = new CountDownLatch(1); + final AtomicInteger rc = new AtomicInteger(BKException.Code.OK); + lh.asyncAddEntry("foobar".getBytes(), new AddCallback() { + public void addComplete(int rccb, LedgerHandle lh, long entryId, Object ctx) { + rc.set(rccb); + completeLatch.countDown(); + } + }, null); + + LOG.info("Waiting to finish adding another entry asynchronously"); + assertTrue(completeLatch.await(20, TimeUnit.SECONDS), + "Add entry to ledger call should have completed"); + assertEquals(BKException.Code.ClientClosedException, rc.get(), + "Add entry to ledger should not have succeeded through closed bkclient!"); + } + + /** + * Test that closing a ledger using bookkeeper client which is closed should throw + * ClientClosedException. + */ + @Test + void closeLedger() throws Exception { + BookKeeper bk = new BookKeeper(baseClientConf, zkc); + LOG.info("Create ledger and add entries to it"); + LedgerHandle lh = createLedgerWithEntries(bk, 100); + LedgerHandle lh2 = createLedgerWithEntries(bk, 100); + + LOG.info("Closing bookkeeper client"); + bk.close(); + + try { + lh.close(); + fail("should have failed, client is closed"); + } catch (BKClientClosedException e) { + // correct } - /** - * Test that readlastconfirmed entry from a ledger using bookkeeper client - * which is closed should throw ClientClosedException. - */ - @Test - public void testReadLastConfirmed() throws Exception { - BookKeeper bk = new BookKeeper(baseClientConf, zkc); - LOG.info("Create ledger and add entries to it"); - LedgerHandle lh = createLedgerWithEntries(bk, 100); - LOG.info("Closing bookkeeper client"); - - // make all bookies slow - restartBookieSlow(); - restartBookieSlow(); - restartBookieSlow(); - - bk.close(); - - final CountDownLatch readLatch = new CountDownLatch(1); - final AtomicInteger rc = new AtomicInteger(BKException.Code.OK); - AsyncCallback.ReadLastConfirmedCallback cb = new AsyncCallback.ReadLastConfirmedCallback() { - - @Override - public void readLastConfirmedComplete(int rccb, long lastConfirmed, - Object ctx) { - rc.set(rccb); - readLatch.countDown(); - } - }; - lh.asyncReadLastConfirmed(cb, readLatch); - - LOG.info("Waiting to finish reading last confirmed entry asynchronously"); - assertTrue("ReadLastConfirmed call should have completed", - readLatch.await(20, TimeUnit.SECONDS)); - assertEquals( - "ReadLastConfirmed should have succeeded through closed bkclient!", - BKException.Code.ClientClosedException, rc.get()); + final CountDownLatch completeLatch = new CountDownLatch(1); + final AtomicInteger rc = new AtomicInteger(BKException.Code.OK); + lh2.asyncClose(new CloseCallback() { + public void closeComplete(int rccb, LedgerHandle lh, Object ctx) { + rc.set(rccb); + completeLatch.countDown(); + } + }, null); + + LOG.info("Waiting to finish adding another entry asynchronously"); + assertTrue(completeLatch.await(20, TimeUnit.SECONDS), + "Close ledger call should have completed"); + assertEquals(BKException.Code.ClientClosedException, rc.get(), + "Close ledger should have succeeded through closed bkclient!"); + } + + /** + * Test that reading entry from a ledger using bookkeeper client which is closed should throw + * ClientClosedException. + */ + @Test + void readLedgerEntry() throws Exception { + BookKeeper bk = new BookKeeper(baseClientConf, zkc); + LOG.info("Create ledger and add entries to it"); + int numOfEntries = 100; + LedgerHandle lh = createLedgerWithEntries(bk, numOfEntries); + LOG.info("Closing bookkeeper client"); + + restartBookieSlow(); + + bk.close(); + + try { + lh.readEntries(0, numOfEntries - 1); + fail("should have failed, client is closed"); + } catch (BKClientClosedException e) { + // correct + } + final CountDownLatch readLatch = new CountDownLatch(1); + final AtomicInteger rc = new AtomicInteger(BKException.Code.OK); + ReadCallback cb = new ReadCallback() { + @Override + public void readComplete(int rccb, LedgerHandle lh, Enumeration seq, + Object ctx) { + rc.set(rccb); + readLatch.countDown(); + } + }; + lh.asyncReadEntries(0, numOfEntries - 1, cb, readLatch); + + LOG.info("Waiting to finish reading the entries asynchronously"); + assertTrue(readLatch.await(20, TimeUnit.SECONDS), + "Read entry ledger call should have completed"); + assertEquals(BKException.Code.ClientClosedException, rc.get(), + "Read entry ledger should have succeeded through closed bkclient!"); + } + + /** + * Test that readlastconfirmed entry from a ledger using bookkeeper client which is closed should + * throw ClientClosedException. + */ + @Test + void readLastConfirmed() throws Exception { + BookKeeper bk = new BookKeeper(baseClientConf, zkc); + LOG.info("Create ledger and add entries to it"); + LedgerHandle lh = createLedgerWithEntries(bk, 100); + LOG.info("Closing bookkeeper client"); + + // make all bookies slow + restartBookieSlow(); + restartBookieSlow(); + restartBookieSlow(); + + bk.close(); + + final CountDownLatch readLatch = new CountDownLatch(1); + final AtomicInteger rc = new AtomicInteger(BKException.Code.OK); + AsyncCallback.ReadLastConfirmedCallback cb = new AsyncCallback.ReadLastConfirmedCallback() { + + @Override + public void readLastConfirmedComplete(int rccb, long lastConfirmed, Object ctx) { + rc.set(rccb); + readLatch.countDown(); + } + }; + lh.asyncReadLastConfirmed(cb, readLatch); + + LOG.info("Waiting to finish reading last confirmed entry asynchronously"); + assertTrue(readLatch.await(20, TimeUnit.SECONDS), + "ReadLastConfirmed call should have completed"); + assertEquals(BKException.Code.ClientClosedException, rc.get(), + "ReadLastConfirmed should have succeeded through closed bkclient!"); + + try { + lh.readLastConfirmed(); + fail("should have failed, client is closed"); + } catch (BKClientClosedException e) { + // correct + } + } + + /** + * Test that checking a ledger using a closed BK client will throw a ClientClosedException. + */ + @Test + void ledgerCheck() throws Exception { + BookKeeper bk = new BookKeeper(baseClientConf, zkc); + LOG.info("Create ledger and add entries to it"); + LedgerHandle lh = createLedgerWithEntries(bk, 100); + LOG.info("Closing bookkeeper client"); + LedgerChecker lc = new LedgerChecker(bk); + + restartBookieSlow(); + bk.close(); + + final CountDownLatch postLatch = new CountDownLatch(1); + final AtomicInteger postRc = new AtomicInteger(BKException.Code.OK); + lc.checkLedger(lh, new GenericCallback>() { + @Override + public void operationComplete(int rc, Set result) { + postRc.set(rc); + postLatch.countDown(); + } + }); + assertTrue(postLatch.await(30, TimeUnit.SECONDS), "checkLedger should have finished"); + assertEquals(BKException.Code.ClientClosedException, postRc.get(), + "Should have client closed exception"); + } + + /** + * Test that BookKeeperAdmin operationg using a closed BK client will throw a + * ClientClosedException. + */ + @Test + void bookKeeperAdmin() throws Exception { + BookKeeper bk = new BookKeeper(baseClientConf, zkc); + try (BookKeeperAdmin bkadmin = new BookKeeperAdmin(bk, baseClientConf)) { + + LOG.info("Create ledger and add entries to it"); + LedgerHandle lh1 = createLedgerWithEntries(bk, 100); + LedgerHandle lh2 = createLedgerWithEntries(bk, 100); + LedgerHandle lh3 = createLedgerWithEntries(bk, 100); + lh3.close(); + + BookieId bookieToKill = getBookie(0); + killBookie(bookieToKill); + startNewBookie(); + + CheckerCb checkercb = new CheckerCb(); + LedgerChecker lc = new LedgerChecker(bk); + lc.checkLedger(lh3, checkercb); + assertEquals(BKException.Code.OK, checkercb.getRc(30, TimeUnit.SECONDS), + "Should have completed"); + assertEquals(1, checkercb.getResult(30, TimeUnit.SECONDS).size(), + "Should have a missing fragment"); + + // make sure a bookie in each quorum is slow + restartBookieSlow(); + restartBookieSlow(); + + bk.close(); + + try { + bkadmin.openLedger(lh1.getId()); + fail("Shouldn't be able to open with a closed client"); + } catch (BKException.BKClientClosedException cce) { + // correct behaviour + } + + try { + bkadmin.openLedgerNoRecovery(lh1.getId()); + fail("Shouldn't be able to open with a closed client"); + } catch (BKException.BKClientClosedException cce) { + // correct behaviour + } + + try { + bkadmin.recoverBookieData(bookieToKill); + fail("Shouldn't be able to recover with a closed client"); + } catch (BKException.BKClientClosedException cce) { + // correct behaviour + } + + try { + bkadmin.replicateLedgerFragment(lh3, + checkercb.getResult(10, TimeUnit.SECONDS).iterator().next(), + NOOP_BICONSUMER); + fail("Shouldn't be able to replicate with a closed client"); + } catch (BKException.BKClientClosedException cce) { + // correct behaviour + } + } + } + + /** + * Test that the bookkeeper client doesn't leave any threads hanging around. See {@link + * https://issues.apache.org/jira/browse/BOOKKEEPER-804} + */ + @Test + void bookKeeperCloseThreads() throws Exception { + ThreadGroup group = new ThreadGroup("test-group"); + final SettableFuture future = SettableFuture.create(); + + Thread t = new Thread(group, "TestThread") { + @Override + public void run() { try { - lh.readLastConfirmed(); - fail("should have failed, client is closed"); - } catch (BKClientClosedException e) { - // correct + BookKeeper bk = new BookKeeper(baseClientConf); + // 9 is a ledger id of an existing ledger + LedgerHandle lh = bk.createLedger(BookKeeper.DigestType.CRC32, "passwd".getBytes()); + lh.addEntry("foobar".getBytes()); + lh.close(); + long id = lh.getId(); + // 9 is a ledger id of an existing ledger + lh = bk.openLedgerNoRecovery(id, BookKeeper.DigestType.CRC32, "passwd".getBytes()); + Enumeration entries = lh.readEntries(0, 0); + + lh.close(); + bk.close(); + future.set(null); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + future.setException(ie); + } catch (Exception e) { + future.setException(e); + } + } + }; + t.start(); + + future.get(); + t.join(); + + // check in a loop for 10 seconds + // because sometimes it takes a while to threads to go away + for (int i = 0; i < 10; i++) { + if (group.activeCount() > 0) { + Thread[] threads = new Thread[group.activeCount()]; + group.enumerate(threads); + for (Thread leftover : threads) { + LOG.error("Leftover thread after {} secs: {}", i, leftover); } + Thread.sleep(1000); + } else { + break; + } } - - /** - * Test that checking a ledger using a closed BK client will - * throw a ClientClosedException. - */ - @Test - public void testLedgerCheck() throws Exception { - BookKeeper bk = new BookKeeper(baseClientConf, zkc); - LOG.info("Create ledger and add entries to it"); - LedgerHandle lh = createLedgerWithEntries(bk, 100); - LOG.info("Closing bookkeeper client"); - LedgerChecker lc = new LedgerChecker(bk); - - restartBookieSlow(); - bk.close(); - - final CountDownLatch postLatch = new CountDownLatch(1); - final AtomicInteger postRc = new AtomicInteger(BKException.Code.OK); - lc.checkLedger(lh, new GenericCallback>() { - @Override - public void operationComplete(int rc, Set result) { - postRc.set(rc); - postLatch.countDown(); - } - }); - assertTrue("checkLedger should have finished", postLatch.await(30, TimeUnit.SECONDS)); - assertEquals("Should have client closed exception", - postRc.get(), BKException.Code.ClientClosedException); + assertEquals(0, group.activeCount(), "Should be no threads left in group"); + } + + private LedgerHandle createLedgerWithEntries(BookKeeper bk, int numOfEntries) throws Exception { + LedgerHandle lh = bk.createLedger(3, 3, digestType, PASSWORD.getBytes()); + + final AtomicInteger rc = new AtomicInteger(BKException.Code.OK); + final CountDownLatch latch = new CountDownLatch(numOfEntries); + + final AddCallback cb = new AddCallback() { + public void addComplete(int rccb, LedgerHandle lh, long entryId, Object ctx) { + rc.compareAndSet(BKException.Code.OK, rccb); + latch.countDown(); + } + }; + for (int i = 0; i < numOfEntries; i++) { + lh.asyncAddEntry("foobar".getBytes(), cb, null); } + if (!latch.await(30, TimeUnit.SECONDS)) { + throw new Exception("Entries took too long to add"); + } + if (rc.get() != BKException.Code.OK) { + throw BKException.create(rc.get()); + } + return lh; + } - private static class CheckerCb implements GenericCallback> { - CountDownLatch latch = new CountDownLatch(1); - int rc = BKException.Code.OK; - Set result = null; - - @Override - public void operationComplete(int rc, Set result) { - this.rc = rc; - this.result = result; - latch.countDown(); - } + private static class CheckerCb implements GenericCallback> { - int getRc(int time, TimeUnit unit) throws Exception { - if (latch.await(time, unit)) { - return rc; - } else { - throw new Exception("Didn't complete"); - } - } + CountDownLatch latch = new CountDownLatch(1); + int rc = BKException.Code.OK; + Set result = null; - Set getResult(int time, TimeUnit unit) throws Exception { - if (latch.await(time, unit)) { - return result; - } else { - throw new Exception("Didn't complete"); - } - } - } - /** - * Test that BookKeeperAdmin operationg using a closed BK client will - * throw a ClientClosedException. - */ - @Test - public void testBookKeeperAdmin() throws Exception { - BookKeeper bk = new BookKeeper(baseClientConf, zkc); - try (BookKeeperAdmin bkadmin = new BookKeeperAdmin(bk, baseClientConf)) { - - LOG.info("Create ledger and add entries to it"); - LedgerHandle lh1 = createLedgerWithEntries(bk, 100); - LedgerHandle lh2 = createLedgerWithEntries(bk, 100); - LedgerHandle lh3 = createLedgerWithEntries(bk, 100); - lh3.close(); - - BookieId bookieToKill = getBookie(0); - killBookie(bookieToKill); - startNewBookie(); - - CheckerCb checkercb = new CheckerCb(); - LedgerChecker lc = new LedgerChecker(bk); - lc.checkLedger(lh3, checkercb); - assertEquals("Should have completed", - checkercb.getRc(30, TimeUnit.SECONDS), BKException.Code.OK); - assertEquals("Should have a missing fragment", - 1, checkercb.getResult(30, TimeUnit.SECONDS).size()); - - // make sure a bookie in each quorum is slow - restartBookieSlow(); - restartBookieSlow(); - - bk.close(); - - try { - bkadmin.openLedger(lh1.getId()); - fail("Shouldn't be able to open with a closed client"); - } catch (BKException.BKClientClosedException cce) { - // correct behaviour - } - - try { - bkadmin.openLedgerNoRecovery(lh1.getId()); - fail("Shouldn't be able to open with a closed client"); - } catch (BKException.BKClientClosedException cce) { - // correct behaviour - } - - try { - bkadmin.recoverBookieData(bookieToKill); - fail("Shouldn't be able to recover with a closed client"); - } catch (BKException.BKClientClosedException cce) { - // correct behaviour - } - - try { - bkadmin.replicateLedgerFragment(lh3, - checkercb.getResult(10, TimeUnit.SECONDS).iterator().next(), NOOP_BICONSUMER); - fail("Shouldn't be able to replicate with a closed client"); - } catch (BKException.BKClientClosedException cce) { - // correct behaviour - } - } + @Override + public void operationComplete(int rc, Set result) { + this.rc = rc; + this.result = result; + latch.countDown(); } - /** - * Test that the bookkeeper client doesn't leave any threads hanging around. - * See {@link https://issues.apache.org/jira/browse/BOOKKEEPER-804} - */ - @Test - public void testBookKeeperCloseThreads() throws Exception { - ThreadGroup group = new ThreadGroup("test-group"); - final SettableFuture future = SettableFuture.create(); - - Thread t = new Thread(group, "TestThread") { - @Override - public void run() { - try { - BookKeeper bk = new BookKeeper(baseClientConf); - // 9 is a ledger id of an existing ledger - LedgerHandle lh = bk.createLedger(BookKeeper.DigestType.CRC32, "passwd".getBytes()); - lh.addEntry("foobar".getBytes()); - lh.close(); - long id = lh.getId(); - // 9 is a ledger id of an existing ledger - lh = bk.openLedgerNoRecovery(id, BookKeeper.DigestType.CRC32, "passwd".getBytes()); - Enumeration entries = lh.readEntries(0, 0); - - lh.close(); - bk.close(); - future.set(null); - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - future.setException(ie); - } catch (Exception e) { - future.setException(e); - } - } - }; - t.start(); - - future.get(); - t.join(); - - // check in a loop for 10 seconds - // because sometimes it takes a while to threads to go away - for (int i = 0; i < 10; i++) { - if (group.activeCount() > 0) { - Thread[] threads = new Thread[group.activeCount()]; - group.enumerate(threads); - for (Thread leftover : threads) { - LOG.error("Leftover thread after {} secs: {}", i, leftover); - } - Thread.sleep(1000); - } else { - break; - } - } - assertEquals("Should be no threads left in group", 0, group.activeCount()); + int getRc(int time, TimeUnit unit) throws Exception { + if (latch.await(time, unit)) { + return rc; + } else { + throw new Exception("Didn't complete"); + } } - private LedgerHandle createLedgerWithEntries(BookKeeper bk, int numOfEntries) - throws Exception { - LedgerHandle lh = bk - .createLedger(3, 3, digestType, PASSWORD.getBytes()); - - final AtomicInteger rc = new AtomicInteger(BKException.Code.OK); - final CountDownLatch latch = new CountDownLatch(numOfEntries); - - final AddCallback cb = new AddCallback() { - public void addComplete(int rccb, LedgerHandle lh, long entryId, - Object ctx) { - rc.compareAndSet(BKException.Code.OK, rccb); - latch.countDown(); - } - }; - for (int i = 0; i < numOfEntries; i++) { - lh.asyncAddEntry("foobar".getBytes(), cb, null); - } - if (!latch.await(30, TimeUnit.SECONDS)) { - throw new Exception("Entries took too long to add"); - } - if (rc.get() != BKException.Code.OK) { - throw BKException.create(rc.get()); - } - return lh; + Set getResult(int time, TimeUnit unit) throws Exception { + if (latch.await(time, unit)) { + return result; + } else { + throw new Exception("Didn't complete"); + } } + } } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperDiskSpaceWeightedLedgerPlacementTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperDiskSpaceWeightedLedgerPlacementTest.java index 91612ec5c79..431ddde4356 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperDiskSpaceWeightedLedgerPlacementTest.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperDiskSpaceWeightedLedgerPlacementTest.java @@ -20,8 +20,8 @@ */ package org.apache.bookkeeper.client; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.HashMap; @@ -45,455 +45,456 @@ * Tests of the main BookKeeper client. */ public class BookKeeperDiskSpaceWeightedLedgerPlacementTest extends BookKeeperClusterTestCase { - private static final Logger LOG = LoggerFactory.getLogger(BookKeeperDiskSpaceWeightedLedgerPlacementTest.class); - private static final long MS_WEIGHT_UPDATE_TIMEOUT = 30000; - public BookKeeperDiskSpaceWeightedLedgerPlacementTest() { - super(10); - } - - class BookKeeperCheckInfoReader extends BookKeeper { - BookKeeperCheckInfoReader(ClientConfiguration conf) throws BKException, IOException, InterruptedException { - super(conf); + private static final Logger LOG = LoggerFactory + .getLogger(BookKeeperDiskSpaceWeightedLedgerPlacementTest.class); + private static final long MS_WEIGHT_UPDATE_TIMEOUT = 30000; + + public BookKeeperDiskSpaceWeightedLedgerPlacementTest() { + super(10); + } + + private BookieServer restartBookie(BookKeeperCheckInfoReader client, ServerConfiguration conf, + final long initialFreeDiskSpace, final long finalFreeDiskSpace, final AtomicBoolean useFinal) + throws Exception { + final AtomicBoolean ready = useFinal == null ? new AtomicBoolean(false) : useFinal; + Bookie bookieWithCustomFreeDiskSpace = new TestBookieImpl(conf) { + long startTime = System.currentTimeMillis(); + + @Override + public long getTotalFreeSpace() { + if (startTime == 0) { + startTime = System.currentTimeMillis(); } - - void blockUntilBookieWeightIs(BookieId bookie, Optional target) throws InterruptedException { - long startMsecs = System.currentTimeMillis(); - Optional freeDiskSpace = Optional.empty(); - while (System.currentTimeMillis() < (startMsecs + MS_WEIGHT_UPDATE_TIMEOUT)) { - freeDiskSpace = bookieInfoReader.getFreeDiskSpace(bookie); - if (freeDiskSpace.equals(target)) { - return; - } - Thread.sleep(1000); - } - fail(String.format( - "Server %s still has weight %s rather than %s", - bookie.toString(), freeDiskSpace, target.toString())); + if (!ready.get()) { + return initialFreeDiskSpace; + } else { + // after delaySecs, advertise finalFreeDiskSpace; before that advertise initialFreeDiskSpace + return finalFreeDiskSpace; } + } + }; + BookieServer server = startAndAddBookie(conf, bookieWithCustomFreeDiskSpace).getServer(); + client.blockUntilBookieWeightIs(server.getBookieId(), Optional.of(initialFreeDiskSpace)); + if (useFinal == null) { + ready.set(true); } - - private BookieServer restartBookie( - BookKeeperCheckInfoReader client, ServerConfiguration conf, final long initialFreeDiskSpace, - final long finalFreeDiskSpace, final AtomicBoolean useFinal) throws Exception { - final AtomicBoolean ready = useFinal == null ? new AtomicBoolean(false) : useFinal; - Bookie bookieWithCustomFreeDiskSpace = new TestBookieImpl(conf) { - long startTime = System.currentTimeMillis(); - @Override - public long getTotalFreeSpace() { - if (startTime == 0) { - startTime = System.currentTimeMillis(); - } - if (!ready.get()) { - return initialFreeDiskSpace; - } else { - // after delaySecs, advertise finalFreeDiskSpace; before that advertise initialFreeDiskSpace - return finalFreeDiskSpace; - } - } - }; - BookieServer server = startAndAddBookie(conf, bookieWithCustomFreeDiskSpace).getServer(); - client.blockUntilBookieWeightIs(server.getBookieId(), Optional.of(initialFreeDiskSpace)); - if (useFinal == null) { - ready.set(true); - } - return server; + return server; + } + + private BookieServer replaceBookieWithCustomFreeDiskSpaceBookie(BookKeeperCheckInfoReader client, + int bookieIdx, + final long freeDiskSpace) throws Exception { + return replaceBookieWithCustomFreeDiskSpaceBookie(client, bookieIdx, freeDiskSpace, + freeDiskSpace, null); + } + + private BookieServer replaceBookieWithCustomFreeDiskSpaceBookie(BookKeeperCheckInfoReader client, + BookieServer bookie, final long freeDiskSpace) throws Exception { + for (int i = 0; i < bookieCount(); i++) { + if (addressByIndex(i).equals(bookie.getBookieId())) { + return replaceBookieWithCustomFreeDiskSpaceBookie(client, i, freeDiskSpace); + } } - - private BookieServer replaceBookieWithCustomFreeDiskSpaceBookie( - BookKeeperCheckInfoReader client, - int bookieIdx, final long freeDiskSpace) - throws Exception { - return replaceBookieWithCustomFreeDiskSpaceBookie(client, bookieIdx, freeDiskSpace, freeDiskSpace, null); + return null; + } + + private BookieServer replaceBookieWithCustomFreeDiskSpaceBookie(BookKeeperCheckInfoReader client, + int bookieIdx, + long initialFreeDiskSpace, long finalFreeDiskSpace, AtomicBoolean useFinal) throws Exception { + BookieId addr = addressByIndex(bookieIdx); + LOG.info("Killing bookie {}", addr); + ServerConfiguration conf = killBookieAndWaitForZK(bookieIdx); + client.blockUntilBookieWeightIs(addr, Optional.empty()); + return restartBookie(client, conf, initialFreeDiskSpace, finalFreeDiskSpace, useFinal); + } + + /** + * Test to show that weight based selection honors the disk weight of bookies. + */ + @FlakyTest("https://github.com/apache/bookkeeper/issues/503") + public void testDiskSpaceWeightedBookieSelection() throws Exception { + long freeDiskSpace = 1000000L; + int multiple = 3; + + ClientConfiguration conf = new ClientConfiguration(); + conf.setDiskWeightBasedPlacementEnabled(true) + .setGetBookieInfoRetryIntervalSeconds(1, TimeUnit.SECONDS) + .setBookieMaxWeightMultipleForWeightBasedPlacement(multiple) + .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + final BookKeeperCheckInfoReader client = new BookKeeperCheckInfoReader(conf); + + for (int i = 0; i < numBookies; i++) { + // the first 8 bookies have freeDiskSpace of 1MB; While the remaining 2 have 3MB + if (i < numBookies - 2) { + replaceBookieWithCustomFreeDiskSpaceBookie(client, 0, freeDiskSpace); + } else { + replaceBookieWithCustomFreeDiskSpaceBookie(client, 0, multiple * freeDiskSpace); + } } - - private BookieServer replaceBookieWithCustomFreeDiskSpaceBookie( - BookKeeperCheckInfoReader client, - BookieServer bookie, final long freeDiskSpace) - throws Exception { - for (int i = 0; i < bookieCount(); i++) { - if (addressByIndex(i).equals(bookie.getBookieId())) { - return replaceBookieWithCustomFreeDiskSpaceBookie(client, i, freeDiskSpace); - } - } - return null; + Map m = new HashMap<>(); + bookieAddresses().forEach(a -> m.put(a, 0)); + + for (int i = 0; i < 2000; i++) { + LedgerHandle lh = client.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); + for (BookieId b : lh.getLedgerMetadata().getEnsembleAt(0)) { + m.put(b, m.get(b) + 1); + } } - - private BookieServer replaceBookieWithCustomFreeDiskSpaceBookie( - BookKeeperCheckInfoReader client, - int bookieIdx, long initialFreeDiskSpace, - long finalFreeDiskSpace, AtomicBoolean useFinal) throws Exception { - BookieId addr = addressByIndex(bookieIdx); - LOG.info("Killing bookie {}", addr); - ServerConfiguration conf = killBookieAndWaitForZK(bookieIdx); - client.blockUntilBookieWeightIs(addr, Optional.empty()); - return restartBookie(client, conf, initialFreeDiskSpace, finalFreeDiskSpace, useFinal); + client.close(); + // make sure that bookies with higher weight(the last 2 bookies) are chosen 3X as often as the median; + // since the number of ledgers created is small (2000), we allow a range of 2X to 4X instead of the exact 3X + for (int i = 0; i < numBookies - 2; i++) { + double ratio1 = + (double) m.get(addressByIndex(numBookies - 2)) / (double) m.get(addressByIndex(i)); + assertTrue(Math.abs(ratio1 - multiple) < 1, + "Weigheted placement is not honored: " + Math.abs(ratio1 - multiple)); + double ratio2 = + (double) m.get(addressByIndex(numBookies - 1)) / (double) m.get(addressByIndex(i)); + assertTrue(Math.abs(ratio2 - multiple) < 1, + "Weigheted placement is not honored: " + Math.abs(ratio2 - multiple)); } - - /** - * Test to show that weight based selection honors the disk weight of bookies. - */ - @FlakyTest("https://github.com/apache/bookkeeper/issues/503") - public void testDiskSpaceWeightedBookieSelection() throws Exception { - long freeDiskSpace = 1000000L; - int multiple = 3; - - ClientConfiguration conf = new ClientConfiguration(); - conf.setDiskWeightBasedPlacementEnabled(true) - .setGetBookieInfoRetryIntervalSeconds(1, TimeUnit.SECONDS) - .setBookieMaxWeightMultipleForWeightBasedPlacement(multiple) - .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - final BookKeeperCheckInfoReader client = new BookKeeperCheckInfoReader(conf); - - for (int i = 0; i < numBookies; i++) { - // the first 8 bookies have freeDiskSpace of 1MB; While the remaining 2 have 3MB - if (i < numBookies - 2) { - replaceBookieWithCustomFreeDiskSpaceBookie(client, 0, freeDiskSpace); - } else { - replaceBookieWithCustomFreeDiskSpaceBookie(client, 0, multiple * freeDiskSpace); - } - } - Map m = new HashMap<>(); - bookieAddresses().forEach(a -> m.put(a, 0)); - - for (int i = 0; i < 2000; i++) { - LedgerHandle lh = client.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); - for (BookieId b : lh.getLedgerMetadata().getEnsembleAt(0)) { - m.put(b, m.get(b) + 1); - } - } - client.close(); - // make sure that bookies with higher weight(the last 2 bookies) are chosen 3X as often as the median; - // since the number of ledgers created is small (2000), we allow a range of 2X to 4X instead of the exact 3X - for (int i = 0; i < numBookies - 2; i++) { - double ratio1 = (double) m.get(addressByIndex(numBookies - 2)) - / (double) m.get(addressByIndex(i)); - assertTrue("Weigheted placement is not honored: " + Math.abs(ratio1 - multiple), - Math.abs(ratio1 - multiple) < 1); - double ratio2 = (double) m.get(addressByIndex(numBookies - 1)) - / (double) m.get(addressByIndex(i)); - assertTrue("Weigheted placement is not honored: " + Math.abs(ratio2 - multiple), - Math.abs(ratio2 - multiple) < 1); - } + } + + /** + * Test to show that weight based selection honors the disk weight of bookies and also adapts when + * the bookies's weight changes. + */ + @FlakyTest("https://github.com/apache/bookkeeper/issues/503") + public void testDiskSpaceWeightedBookieSelectionWithChangingWeights() throws Exception { + long freeDiskSpace = 1000000L; + int multiple = 3; + + ClientConfiguration conf = new ClientConfiguration(); + conf.setDiskWeightBasedPlacementEnabled(true) + .setGetBookieInfoRetryIntervalSeconds(1, TimeUnit.SECONDS) + .setBookieMaxWeightMultipleForWeightBasedPlacement(multiple) + .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + final BookKeeperCheckInfoReader client = new BookKeeperCheckInfoReader(conf); + + for (int i = 0; i < numBookies; i++) { + // the first 8 bookies have freeDiskSpace of 1MB; While the remaining 2 have 3MB + if (i < numBookies - 2) { + replaceBookieWithCustomFreeDiskSpaceBookie(client, 0, freeDiskSpace); + } else { + replaceBookieWithCustomFreeDiskSpaceBookie(client, 0, multiple * freeDiskSpace); + } + } + Map m = new HashMap<>(); + bookieAddresses().forEach(a -> m.put(a, 0)); + + for (int i = 0; i < 2000; i++) { + LedgerHandle lh = client.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); + for (BookieId b : lh.getLedgerMetadata().getEnsembleAt(0)) { + m.put(b, m.get(b) + 1); + } } - /** - * Test to show that weight based selection honors the disk weight of bookies and also adapts - * when the bookies's weight changes. - */ - @FlakyTest("https://github.com/apache/bookkeeper/issues/503") - public void testDiskSpaceWeightedBookieSelectionWithChangingWeights() throws Exception { - long freeDiskSpace = 1000000L; - int multiple = 3; - - ClientConfiguration conf = new ClientConfiguration(); - conf.setDiskWeightBasedPlacementEnabled(true) - .setGetBookieInfoRetryIntervalSeconds(1, TimeUnit.SECONDS) - .setBookieMaxWeightMultipleForWeightBasedPlacement(multiple) - .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - final BookKeeperCheckInfoReader client = new BookKeeperCheckInfoReader(conf); - - for (int i = 0; i < numBookies; i++) { - // the first 8 bookies have freeDiskSpace of 1MB; While the remaining 2 have 3MB - if (i < numBookies - 2) { - replaceBookieWithCustomFreeDiskSpaceBookie(client, 0, freeDiskSpace); - } else { - replaceBookieWithCustomFreeDiskSpaceBookie(client, 0, multiple * freeDiskSpace); - } - } - Map m = new HashMap<>(); - bookieAddresses().forEach(a -> m.put(a, 0)); - - for (int i = 0; i < 2000; i++) { - LedgerHandle lh = client.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); - for (BookieId b : lh.getLedgerMetadata().getEnsembleAt(0)) { - m.put(b, m.get(b) + 1); - } - } - - // make sure that bookies with higher weight(the last 2 bookies) are chosen 3X as often as the median; - // since the number of ledgers created is small (2000), we allow a range of 2X to 4X instead of the exact 3X - for (int i = 0; i < numBookies - 2; i++) { - double ratio1 = (double) m.get(addressByIndex(numBookies - 2)) - / (double) m.get(addressByIndex(i)); - assertTrue("Weigheted placement is not honored: " + Math.abs(ratio1 - multiple), - Math.abs(ratio1 - multiple) < 1); - double ratio2 = (double) m.get(addressByIndex(numBookies - 1)) - / (double) m.get(addressByIndex(i)); - assertTrue("Weigheted placement is not honored: " + Math.abs(ratio2 - multiple), - Math.abs(ratio2 - multiple) < 1); - } - - // Restart the bookies in such a way that the first 2 bookies go from 1MB to 3MB free space and the last - // 2 bookies go from 3MB to 1MB - BookieServer server1 = serverByIndex(0); - BookieServer server2 = serverByIndex(1); - BookieServer server3 = serverByIndex(numBookies - 2); - BookieServer server4 = serverByIndex(numBookies - 1); - - server1 = replaceBookieWithCustomFreeDiskSpaceBookie(client, server1, multiple * freeDiskSpace); - server2 = replaceBookieWithCustomFreeDiskSpaceBookie(client, server2, multiple * freeDiskSpace); - server3 = replaceBookieWithCustomFreeDiskSpaceBookie(client, server3, freeDiskSpace); - server4 = replaceBookieWithCustomFreeDiskSpaceBookie(client, server4, freeDiskSpace); - - bookieAddresses().forEach(a -> m.put(a, 0)); - - for (int i = 0; i < 2000; i++) { - LedgerHandle lh = client.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); - for (BookieId b : lh.getLedgerMetadata().getEnsembleAt(0)) { - m.put(b, m.get(b) + 1); - } - } + // make sure that bookies with higher weight(the last 2 bookies) are chosen 3X as often as the median; + // since the number of ledgers created is small (2000), we allow a range of 2X to 4X instead of the exact 3X + for (int i = 0; i < numBookies - 2; i++) { + double ratio1 = + (double) m.get(addressByIndex(numBookies - 2)) / (double) m.get(addressByIndex(i)); + assertTrue(Math.abs(ratio1 - multiple) < 1, + "Weigheted placement is not honored: " + Math.abs(ratio1 - multiple)); + double ratio2 = + (double) m.get(addressByIndex(numBookies - 1)) / (double) m.get(addressByIndex(i)); + assertTrue(Math.abs(ratio2 - multiple) < 1, + "Weigheted placement is not honored: " + Math.abs(ratio2 - multiple)); + } - // make sure that bookies with higher weight(the last 2 bookies) are chosen 3X as often as the median; - // since the number of ledgers created is small (2000), we allow a range of 2X to 4X instead of the exact 3X - for (int i = 0; i < numBookies; i++) { - if (server1.getLocalAddress().equals(addressByIndex(i)) - || server2.getLocalAddress().equals(addressByIndex(i))) { - continue; - } - double ratio1 = (double) m.get(server1) - / (double) m.get(addressByIndex(i)); - assertTrue("Weigheted placement is not honored: " + Math.abs(ratio1 - multiple), - Math.abs(ratio1 - multiple) < 1); - double ratio2 = (double) m.get(server2) - / (double) m.get(addressByIndex(i)); - assertTrue("Weigheted placement is not honored: " + Math.abs(ratio2 - multiple), - Math.abs(ratio2 - multiple) < 1); - } - client.close(); + // Restart the bookies in such a way that the first 2 bookies go from 1MB to 3MB free space and the last + // 2 bookies go from 3MB to 1MB + BookieServer server1 = serverByIndex(0); + BookieServer server2 = serverByIndex(1); + BookieServer server3 = serverByIndex(numBookies - 2); + BookieServer server4 = serverByIndex(numBookies - 1); + + server1 = replaceBookieWithCustomFreeDiskSpaceBookie(client, server1, multiple * freeDiskSpace); + server2 = replaceBookieWithCustomFreeDiskSpaceBookie(client, server2, multiple * freeDiskSpace); + server3 = replaceBookieWithCustomFreeDiskSpaceBookie(client, server3, freeDiskSpace); + server4 = replaceBookieWithCustomFreeDiskSpaceBookie(client, server4, freeDiskSpace); + + bookieAddresses().forEach(a -> m.put(a, 0)); + + for (int i = 0; i < 2000; i++) { + LedgerHandle lh = client.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); + for (BookieId b : lh.getLedgerMetadata().getEnsembleAt(0)) { + m.put(b, m.get(b) + 1); + } } - /** - * Test to show that weight based selection honors the disk weight of bookies and also adapts - * when bookies go away permanently. - */ - @FlakyTest("https://github.com/apache/bookkeeper/issues/503") - public void testDiskSpaceWeightedBookieSelectionWithBookiesDying() throws Exception { - long freeDiskSpace = 1000000L; - int multiple = 3; - - ClientConfiguration conf = new ClientConfiguration(); - conf.setDiskWeightBasedPlacementEnabled(true) - .setGetBookieInfoRetryIntervalSeconds(1, TimeUnit.SECONDS) - .setBookieMaxWeightMultipleForWeightBasedPlacement(multiple) - .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - final BookKeeperCheckInfoReader client = new BookKeeperCheckInfoReader(conf); - - for (int i = 0; i < numBookies; i++) { - // the first 8 bookies have freeDiskSpace of 1MB; While the remaining 2 have 1GB - if (i < numBookies - 2) { - replaceBookieWithCustomFreeDiskSpaceBookie(client, 0, freeDiskSpace); - } else { - replaceBookieWithCustomFreeDiskSpaceBookie(client, 0, multiple * freeDiskSpace); - } - } - Map m = new HashMap<>(); - bookieAddresses().forEach(a -> m.put(a, 0)); - - for (int i = 0; i < 2000; i++) { - LedgerHandle lh = client.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); - for (BookieId b : lh.getLedgerMetadata().getEnsembleAt(0)) { - m.put(b, m.get(b) + 1); - } - } + // make sure that bookies with higher weight(the last 2 bookies) are chosen 3X as often as the median; + // since the number of ledgers created is small (2000), we allow a range of 2X to 4X instead of the exact 3X + for (int i = 0; i < numBookies; i++) { + if (server1.getLocalAddress().equals(addressByIndex(i)) + || server2.getLocalAddress().equals(addressByIndex(i))) { + continue; + } + double ratio1 = (double) m.get(server1) / (double) m.get(addressByIndex(i)); + assertTrue(Math.abs(ratio1 - multiple) < 1, + "Weigheted placement is not honored: " + Math.abs(ratio1 - multiple)); + double ratio2 = (double) m.get(server2) / (double) m.get(addressByIndex(i)); + assertTrue(Math.abs(ratio2 - multiple) < 1, + "Weigheted placement is not honored: " + Math.abs(ratio2 - multiple)); + } + client.close(); + } + + /** + * Test to show that weight based selection honors the disk weight of bookies and also adapts when + * bookies go away permanently. + */ + @FlakyTest("https://github.com/apache/bookkeeper/issues/503") + public void testDiskSpaceWeightedBookieSelectionWithBookiesDying() throws Exception { + long freeDiskSpace = 1000000L; + int multiple = 3; + + ClientConfiguration conf = new ClientConfiguration(); + conf.setDiskWeightBasedPlacementEnabled(true) + .setGetBookieInfoRetryIntervalSeconds(1, TimeUnit.SECONDS) + .setBookieMaxWeightMultipleForWeightBasedPlacement(multiple) + .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + final BookKeeperCheckInfoReader client = new BookKeeperCheckInfoReader(conf); + + for (int i = 0; i < numBookies; i++) { + // the first 8 bookies have freeDiskSpace of 1MB; While the remaining 2 have 1GB + if (i < numBookies - 2) { + replaceBookieWithCustomFreeDiskSpaceBookie(client, 0, freeDiskSpace); + } else { + replaceBookieWithCustomFreeDiskSpaceBookie(client, 0, multiple * freeDiskSpace); + } + } + Map m = new HashMap<>(); + bookieAddresses().forEach(a -> m.put(a, 0)); + + for (int i = 0; i < 2000; i++) { + LedgerHandle lh = client.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); + for (BookieId b : lh.getLedgerMetadata().getEnsembleAt(0)) { + m.put(b, m.get(b) + 1); + } + } - // make sure that bookies with higher weight are chosen 3X as often as the median; - // since the number of ledgers is small (2000), there may be variation - double ratio1 = (double) m.get(addressByIndex(numBookies - 2)) - / (double) m.get(addressByIndex(0)); - assertTrue("Weigheted placement is not honored: " + Math.abs(ratio1 - multiple), - Math.abs(ratio1 - multiple) < 1); - double ratio2 = (double) m.get(addressByIndex(numBookies - 1)) - / (double) m.get(addressByIndex(1)); - assertTrue("Weigheted placement is not honored: " + Math.abs(ratio2 - multiple), - Math.abs(ratio2 - multiple) < 1); - - // Bring down the 2 bookies that had higher weight; after this the allocation to all - // the remaining bookies should be uniform - bookieAddresses().forEach(a -> m.put(a, 0)); - - BookieServer server1 = serverByIndex(numBookies - 2); - BookieServer server2 = serverByIndex(numBookies - 1); - killBookieAndWaitForZK(numBookies - 1); - killBookieAndWaitForZK(numBookies - 2); - - for (int i = 0; i < 2000; i++) { - LedgerHandle lh = client.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); - for (BookieId b : lh.getLedgerMetadata().getEnsembleAt(0)) { - m.put(b, m.get(b) + 1); - } - } + // make sure that bookies with higher weight are chosen 3X as often as the median; + // since the number of ledgers is small (2000), there may be variation + double ratio1 = + (double) m.get(addressByIndex(numBookies - 2)) / (double) m.get(addressByIndex(0)); + assertTrue(Math.abs(ratio1 - multiple) < 1, + "Weigheted placement is not honored: " + Math.abs(ratio1 - multiple)); + double ratio2 = + (double) m.get(addressByIndex(numBookies - 1)) / (double) m.get(addressByIndex(1)); + assertTrue(Math.abs(ratio2 - multiple) < 1, + "Weigheted placement is not honored: " + Math.abs(ratio2 - multiple)); + + // Bring down the 2 bookies that had higher weight; after this the allocation to all + // the remaining bookies should be uniform + bookieAddresses().forEach(a -> m.put(a, 0)); + + BookieServer server1 = serverByIndex(numBookies - 2); + BookieServer server2 = serverByIndex(numBookies - 1); + killBookieAndWaitForZK(numBookies - 1); + killBookieAndWaitForZK(numBookies - 2); + + for (int i = 0; i < 2000; i++) { + LedgerHandle lh = client.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); + for (BookieId b : lh.getLedgerMetadata().getEnsembleAt(0)) { + m.put(b, m.get(b) + 1); + } + } - // make sure that bookies with higher weight are chosen 3X as often as the median; - for (int i = 0; i < numBookies - 3; i++) { - double delta = Math.abs((double) m.get(addressByIndex(i)) - - (double) m.get(addressByIndex(i + 1))); - delta = (delta * 100) / (double) m.get(addressByIndex(i + 1)); - // the deviation should be less than 30% - assertTrue("Weigheted placement is not honored: " + delta, delta <= 30); - } - // since the following 2 bookies were down, they shouldn't ever be selected - assertTrue("Weigheted placement is not honored" + m.get(server1), - m.get(server1) == 0); - assertTrue("Weigheted placement is not honored" + m.get(server2), - m.get(server2) == 0); + // make sure that bookies with higher weight are chosen 3X as often as the median; + for (int i = 0; i < numBookies - 3; i++) { + double delta = Math + .abs((double) m.get(addressByIndex(i)) - (double) m.get(addressByIndex(i + 1))); + delta = (delta * 100) / (double) m.get(addressByIndex(i + 1)); + // the deviation should be less than 30% + assertTrue(delta <= 30, "Weigheted placement is not honored: " + delta); + } + // since the following 2 bookies were down, they shouldn't ever be selected + assertTrue(m.get(server1) == 0, "Weigheted placement is not honored" + m.get(server1)); + assertTrue(m.get(server2) == 0, "Weigheted placement is not honored" + m.get(server2)); + + client.close(); + } + + /** + * Test to show that weight based selection honors the disk weight of bookies and also adapts when + * bookies are added. + */ + @FlakyTest("https://github.com/apache/bookkeeper/issues/503") + public void testDiskSpaceWeightedBookieSelectionWithBookiesBeingAdded() throws Exception { + long freeDiskSpace = 1000000L; + int multiple = 3; + + ClientConfiguration conf = new ClientConfiguration(); + conf.setDiskWeightBasedPlacementEnabled(true) + .setGetBookieInfoRetryIntervalSeconds(1, TimeUnit.SECONDS) + .setBookieMaxWeightMultipleForWeightBasedPlacement(multiple) + .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + final BookKeeperCheckInfoReader client = new BookKeeperCheckInfoReader(conf); + + for (int i = 0; i < numBookies; i++) { + // all the bookies have freeDiskSpace of 1MB + replaceBookieWithCustomFreeDiskSpaceBookie(client, 0, freeDiskSpace); + } + // let the last two bookies be down initially + ServerConfiguration conf1 = killBookieAndWaitForZK(numBookies - 1); + ServerConfiguration conf2 = killBookieAndWaitForZK(numBookies - 2); + Map m = new HashMap<>(); + + bookieAddresses().forEach(a -> m.put(a, 0)); + + for (int i = 0; i < 2000; i++) { + LedgerHandle lh = client.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); + for (BookieId b : lh.getLedgerMetadata().getEnsembleAt(0)) { + m.put(b, m.get(b) + 1); + } + } - client.close(); + // make sure that bookies with higher weight are chosen 3X as often as the median; + // since the number of ledgers is small (2000), there may be variation + for (int i = 0; i < numBookies - 3; i++) { + double delta = Math + .abs((double) m.get(addressByIndex(i)) - (double) m.get(addressByIndex(i + 1))); + delta = (delta * 100) / (double) m.get(addressByIndex(i + 1)); + // the deviation should be less than 30% + assertTrue(delta <= 30, "Weigheted placement is not honored: " + delta); } - /** - * Test to show that weight based selection honors the disk weight of bookies and also adapts - * when bookies are added. - */ - @FlakyTest("https://github.com/apache/bookkeeper/issues/503") - public void testDiskSpaceWeightedBookieSelectionWithBookiesBeingAdded() throws Exception { - long freeDiskSpace = 1000000L; - int multiple = 3; - - ClientConfiguration conf = new ClientConfiguration(); - conf.setDiskWeightBasedPlacementEnabled(true) - .setGetBookieInfoRetryIntervalSeconds(1, TimeUnit.SECONDS) - .setBookieMaxWeightMultipleForWeightBasedPlacement(multiple) - .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - final BookKeeperCheckInfoReader client = new BookKeeperCheckInfoReader(conf); - - for (int i = 0; i < numBookies; i++) { - // all the bookies have freeDiskSpace of 1MB - replaceBookieWithCustomFreeDiskSpaceBookie(client, 0, freeDiskSpace); - } - // let the last two bookies be down initially - ServerConfiguration conf1 = killBookieAndWaitForZK(numBookies - 1); - ServerConfiguration conf2 = killBookieAndWaitForZK(numBookies - 2); - Map m = new HashMap<>(); - - bookieAddresses().forEach(a -> m.put(a, 0)); - - for (int i = 0; i < 2000; i++) { - LedgerHandle lh = client.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); - for (BookieId b : lh.getLedgerMetadata().getEnsembleAt(0)) { - m.put(b, m.get(b) + 1); - } - } + // bring up the two dead bookies; they'll also have 3X more free space than the rest of the bookies + restartBookie(client, conf1, multiple * freeDiskSpace, multiple * freeDiskSpace, null); + restartBookie(client, conf2, multiple * freeDiskSpace, multiple * freeDiskSpace, null); - // make sure that bookies with higher weight are chosen 3X as often as the median; - // since the number of ledgers is small (2000), there may be variation - for (int i = 0; i < numBookies - 3; i++) { - double delta = Math.abs((double) m.get(addressByIndex(i)) - - (double) m.get(addressByIndex(i + 1))); - delta = (delta * 100) / (double) m.get(addressByIndex(i + 1)); - // the deviation should be less than 30% - assertTrue("Weigheted placement is not honored: " + delta, delta <= 30); - } + bookieAddresses().forEach(a -> m.put(a, 0)); - // bring up the two dead bookies; they'll also have 3X more free space than the rest of the bookies - restartBookie(client, conf1, multiple * freeDiskSpace, multiple * freeDiskSpace, null); - restartBookie(client, conf2, multiple * freeDiskSpace, multiple * freeDiskSpace, null); + for (int i = 0; i < 2000; i++) { + LedgerHandle lh = client.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); + for (BookieId b : lh.getLedgerMetadata().getEnsembleAt(0)) { + m.put(b, m.get(b) + 1); + } + } - bookieAddresses().forEach(a -> m.put(a, 0)); + // make sure that bookies with higher weight(the last 2 bookies) are chosen 3X as often as the median; + // since the number of ledgers created is small (2000), we allow a range of 2X to 4X instead of the exact 3X + for (int i = 0; i < numBookies - 2; i++) { + double ratio1 = + (double) m.get(addressByIndex(numBookies - 2)) / (double) m.get(addressByIndex(i)); + assertTrue(Math.abs(ratio1 - multiple) < 1, + "Weigheted placement is not honored: " + Math.abs(ratio1 - multiple)); + double ratio2 = + (double) m.get(addressByIndex(numBookies - 1)) / (double) m.get(addressByIndex(i)); + assertTrue(Math.abs(ratio2 - multiple) < 1, + "Weigheted placement is not honored: " + Math.abs(ratio2 - multiple)); + } + client.close(); + } + + /** + * Tests that the bookie selection is based on the amount of free disk space a bookie has. Also + * make sure that the periodic bookieInfo read is working and causes the new weights to be taken + * into account. + */ + @FlakyTest("https://github.com/apache/bookkeeper/issues/503") + public void testDiskSpaceWeightedBookieSelectionWithPeriodicBookieInfoUpdate() throws Exception { + long freeDiskSpace = 1000000L; + int multiple = 3; + + int updateIntervalSecs = 6; + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()) + .setDiskWeightBasedPlacementEnabled(true) + .setGetBookieInfoRetryIntervalSeconds(1, TimeUnit.SECONDS) + .setBookieMaxWeightMultipleForWeightBasedPlacement(multiple) + .setGetBookieInfoIntervalSeconds(updateIntervalSecs, TimeUnit.SECONDS); + final BookKeeperCheckInfoReader client = new BookKeeperCheckInfoReader(conf); + + AtomicBoolean useHigherValue = new AtomicBoolean(false); + for (int i = 0; i < numBookies; i++) { + // the first 8 bookies have freeDiskSpace of 1MB; the remaining 2 will advertise 1MB for + // the start of the test, and 3MB once useHigherValue is set + if (i < numBookies - 2) { + replaceBookieWithCustomFreeDiskSpaceBookie(client, 0, freeDiskSpace); + } else { + replaceBookieWithCustomFreeDiskSpaceBookie(client, 0, freeDiskSpace, + multiple * freeDiskSpace, + useHigherValue); + } + } + Map m = new HashMap<>(); - for (int i = 0; i < 2000; i++) { - LedgerHandle lh = client.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); - for (BookieId b : lh.getLedgerMetadata().getEnsembleAt(0)) { - m.put(b, m.get(b) + 1); - } - } + bookieAddresses().forEach(a -> m.put(a, 0)); - // make sure that bookies with higher weight(the last 2 bookies) are chosen 3X as often as the median; - // since the number of ledgers created is small (2000), we allow a range of 2X to 4X instead of the exact 3X - for (int i = 0; i < numBookies - 2; i++) { - double ratio1 = (double) m.get(addressByIndex(numBookies - 2)) - / (double) m.get(addressByIndex(i)); - assertTrue("Weigheted placement is not honored: " + Math.abs(ratio1 - multiple), - Math.abs(ratio1 - multiple) < 1); - double ratio2 = (double) m.get(addressByIndex(numBookies - 1)) - / (double) m.get(addressByIndex(i)); - assertTrue("Weigheted placement is not honored: " + Math.abs(ratio2 - multiple), - Math.abs(ratio2 - multiple) < 1); - } - client.close(); + for (int i = 0; i < 2000; i++) { + LedgerHandle lh = client.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); + for (BookieId b : lh.getLedgerMetadata().getEnsembleAt(0)) { + m.put(b, m.get(b) + 1); + } } - /** - * Tests that the bookie selection is based on the amount of free disk space a bookie has. Also make sure that - * the periodic bookieInfo read is working and causes the new weights to be taken into account. - */ - @FlakyTest("https://github.com/apache/bookkeeper/issues/503") - public void testDiskSpaceWeightedBookieSelectionWithPeriodicBookieInfoUpdate() throws Exception { - long freeDiskSpace = 1000000L; - int multiple = 3; - - int updateIntervalSecs = 6; - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()) - .setDiskWeightBasedPlacementEnabled(true) - .setGetBookieInfoRetryIntervalSeconds(1, TimeUnit.SECONDS) - .setBookieMaxWeightMultipleForWeightBasedPlacement(multiple) - .setGetBookieInfoIntervalSeconds(updateIntervalSecs, TimeUnit.SECONDS); - final BookKeeperCheckInfoReader client = new BookKeeperCheckInfoReader(conf); - - AtomicBoolean useHigherValue = new AtomicBoolean(false); - for (int i = 0; i < numBookies; i++) { - // the first 8 bookies have freeDiskSpace of 1MB; the remaining 2 will advertise 1MB for - // the start of the test, and 3MB once useHigherValue is set - if (i < numBookies - 2) { - replaceBookieWithCustomFreeDiskSpaceBookie(client, 0, freeDiskSpace); - } else { - replaceBookieWithCustomFreeDiskSpaceBookie( - client, 0, freeDiskSpace, multiple * freeDiskSpace, useHigherValue); - } - } - Map m = new HashMap<>(); + for (int i = 0; i < numBookies - 1; i++) { + double delta = Math + .abs((double) m.get(addressByIndex(i)) - (double) m.get(addressByIndex(i + 1))); + delta = (delta * 100) / (double) m.get(addressByIndex(i + 1)); + assertTrue(delta <= 30, + "Weigheted placement is not honored: " + delta); // the deviation should be <30% + } - bookieAddresses().forEach(a -> m.put(a, 0)); + // Sleep for double the time required to update the bookie infos, and then check each one + useHigherValue.set(true); + Thread.sleep(updateIntervalSecs * 1000); + for (int i = 0; i < numBookies; i++) { + if (i < numBookies - 2) { + client.blockUntilBookieWeightIs(addressByIndex(i), Optional.of(freeDiskSpace)); + } else { + client.blockUntilBookieWeightIs(addressByIndex(i), Optional.of(freeDiskSpace * multiple)); + } + } - for (int i = 0; i < 2000; i++) { - LedgerHandle lh = client.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); - for (BookieId b : lh.getLedgerMetadata().getEnsembleAt(0)) { - m.put(b, m.get(b) + 1); - } - } + bookieAddresses().forEach(a -> m.put(a, 0)); + for (int i = 0; i < 2000; i++) { + LedgerHandle lh = client.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); + for (BookieId b : lh.getLedgerMetadata().getEnsembleAt(0)) { + m.put(b, m.get(b) + 1); + } + } - for (int i = 0; i < numBookies - 1; i++) { - double delta = Math.abs((double) m.get(addressByIndex(i)) - - (double) m.get(addressByIndex(i + 1))); - delta = (delta * 100) / (double) m.get(addressByIndex(i + 1)); - assertTrue("Weigheted placement is not honored: " + delta, delta <= 30); // the deviation should be <30% - } + // make sure that bookies with higher weight(the last 2 bookies) are chosen 3X as often as the median; + // since the number of ledgers created is small (2000), we allow a range of 2X to 4X instead of the exact 3X + for (int i = 0; i < numBookies - 2; i++) { + double ratio1 = + (double) m.get(addressByIndex(numBookies - 2)) / (double) m.get(addressByIndex(i)); + assertTrue(Math.abs(ratio1 - multiple) < 1, + "Weigheted placement is not honored: " + Math.abs(ratio1 - multiple)); + double ratio2 = + (double) m.get(addressByIndex(lastBookieIndex())) / (double) m.get(addressByIndex(i)); + assertTrue(Math.abs(ratio2 - multiple) < 1, + "Weigheted placement is not honored: " + Math.abs(ratio2 - multiple)); + } + client.close(); + } - // Sleep for double the time required to update the bookie infos, and then check each one - useHigherValue.set(true); - Thread.sleep(updateIntervalSecs * 1000); - for (int i = 0; i < numBookies; i++) { - if (i < numBookies - 2) { - client.blockUntilBookieWeightIs(addressByIndex(i), Optional.of(freeDiskSpace)); - } else { - client.blockUntilBookieWeightIs(addressByIndex(i), Optional.of(freeDiskSpace * multiple)); - } - } + class BookKeeperCheckInfoReader extends BookKeeper { - bookieAddresses().forEach(a -> m.put(a, 0)); - for (int i = 0; i < 2000; i++) { - LedgerHandle lh = client.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); - for (BookieId b : lh.getLedgerMetadata().getEnsembleAt(0)) { - m.put(b, m.get(b) + 1); - } - } + BookKeeperCheckInfoReader(ClientConfiguration conf) + throws BKException, IOException, InterruptedException { + super(conf); + } - // make sure that bookies with higher weight(the last 2 bookies) are chosen 3X as often as the median; - // since the number of ledgers created is small (2000), we allow a range of 2X to 4X instead of the exact 3X - for (int i = 0; i < numBookies - 2; i++) { - double ratio1 = (double) m.get(addressByIndex(numBookies - 2)) - / (double) m.get(addressByIndex(i)); - assertTrue("Weigheted placement is not honored: " + Math.abs(ratio1 - multiple), - Math.abs(ratio1 - multiple) < 1); - double ratio2 = (double) m.get(addressByIndex(lastBookieIndex())) - / (double) m.get(addressByIndex(i)); - assertTrue("Weigheted placement is not honored: " + Math.abs(ratio2 - multiple), - Math.abs(ratio2 - multiple) < 1); + void blockUntilBookieWeightIs(BookieId bookie, Optional target) + throws InterruptedException { + long startMsecs = System.currentTimeMillis(); + Optional freeDiskSpace = Optional.empty(); + while (System.currentTimeMillis() < (startMsecs + MS_WEIGHT_UPDATE_TIMEOUT)) { + freeDiskSpace = bookieInfoReader.getFreeDiskSpace(bookie); + if (freeDiskSpace.equals(target)) { + return; } - - client.close(); + Thread.sleep(1000); + } + fail(String + .format("Server %s still has weight %s rather than %s", bookie.toString(), freeDiskSpace, + target.toString())); } + } } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperTest.java index bb534b1e583..a2bb494a45c 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperTest.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperTest.java @@ -24,10 +24,12 @@ import static org.apache.bookkeeper.client.BookKeeperClientStats.WRITE_TIMED_OUT_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS; import static org.apache.bookkeeper.common.concurrent.FutureUtils.result; import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -37,9 +39,7 @@ import java.util.Collections; import java.util.Enumeration; import java.util.List; -import java.util.Map; import java.util.Set; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -52,14 +52,12 @@ import org.apache.bookkeeper.client.api.WriteFlag; import org.apache.bookkeeper.client.api.WriteHandle; import org.apache.bookkeeper.conf.ClientConfiguration; -import org.apache.bookkeeper.discover.BookieServiceInfo; import org.apache.bookkeeper.net.BookieId; import org.apache.bookkeeper.stats.NullStatsLogger; import org.apache.bookkeeper.stats.StatsLogger; import org.apache.bookkeeper.test.BookKeeperClusterTestCase; import org.apache.bookkeeper.test.TestStatsProvider; import org.apache.bookkeeper.util.StaticDNSResolver; -import org.apache.bookkeeper.versioning.Versioned; import org.apache.bookkeeper.zookeeper.BoundExponentialBackoffRetryPolicy; import org.apache.bookkeeper.zookeeper.ZooKeeperClient; import org.apache.bookkeeper.zookeeper.ZooKeeperWatcherBase; @@ -74,7 +72,6 @@ import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.ZooKeeper.States; import org.apache.zookeeper.data.ACL; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.condition.EnabledForJreRange; import org.junit.jupiter.api.condition.JRE; @@ -85,1265 +82,1233 @@ * Tests of the main BookKeeper client. */ public class BookKeeperTest extends BookKeeperClusterTestCase { - private static final Logger LOG = LoggerFactory.getLogger(BookKeeperTest.class); - private static final long INVALID_LEDGERID = -1L; - private final DigestType digestType; - public BookKeeperTest() { - super(3); - this.digestType = DigestType.CRC32; + private static final Logger LOG = LoggerFactory.getLogger(BookKeeperTest.class); + private static final long INVALID_LEDGERID = -1L; + private final DigestType digestType; + + public BookKeeperTest() { + super(3); + this.digestType = DigestType.CRC32; + } + + @Test + @EnabledForJreRange(max = JRE.JAVA_17) + public void testConstructionZkDelay() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()).setZkTimeout(20000); + + CountDownLatch l = new CountDownLatch(1); + zkUtil.sleepCluster(200, TimeUnit.MILLISECONDS, l); + l.await(); + + BookKeeper bkc = new BookKeeper(conf); + bkc.createLedger(digestType, "testPasswd".getBytes()).close(); + bkc.close(); + } + + @Test + @EnabledForJreRange(max = JRE.JAVA_17) + public void testConstructionNotConnectedExplicitZk() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()).setZkTimeout(20000); + + CountDownLatch l = new CountDownLatch(1); + zkUtil.sleepCluster(200, TimeUnit.MILLISECONDS, l); + l.await(); + + ZooKeeper zk = new ZooKeeper(zkUtil.getZooKeeperConnectString(), 50, event -> { + }); + assertFalse(zk.getState().isConnected(), "ZK shouldn't have connected yet"); + try { + BookKeeper bkc = new BookKeeper(conf, zk); + fail("Shouldn't be able to construct with unconnected zk"); + } catch (IOException cle) { + // correct behaviour + assertTrue(cle.getCause() instanceof ConnectionLossException); } - - @Test - @EnabledForJreRange(max = JRE.JAVA_17) - public void testConstructionZkDelay() throws Exception { - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()) - .setZkTimeout(20000); - - CountDownLatch l = new CountDownLatch(1); - zkUtil.sleepCluster(200, TimeUnit.MILLISECONDS, l); - l.await(); - - BookKeeper bkc = new BookKeeper(conf); - bkc.createLedger(digestType, "testPasswd".getBytes()).close(); - bkc.close(); - } - - @Test - @EnabledForJreRange(max = JRE.JAVA_17) - public void testConstructionNotConnectedExplicitZk() throws Exception { - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()) - .setZkTimeout(20000); - - CountDownLatch l = new CountDownLatch(1); - zkUtil.sleepCluster(200, TimeUnit.MILLISECONDS, l); - l.await(); - - ZooKeeper zk = new ZooKeeper( - zkUtil.getZooKeeperConnectString(), - 50, - event -> {}); - assertFalse(zk.getState().isConnected(), "ZK shouldn't have connected yet"); - try { - BookKeeper bkc = new BookKeeper(conf, zk); - fail("Shouldn't be able to construct with unconnected zk"); - } catch (IOException cle) { - // correct behaviour - assertTrue(cle.getCause() instanceof ConnectionLossException); + } + + /** + * Test that bookkeeper is not able to open ledgers if it provides the wrong password or wrong + * digest. + */ + @Test + public void testBookkeeperDigestPasswordWithAutoDetection() throws Exception { + testBookkeeperDigestPassword(true); + } + + @Test + public void testBookkeeperDigestPasswordWithoutAutoDetection() throws Exception { + testBookkeeperDigestPassword(false); + } + + void testBookkeeperDigestPassword(boolean autodetection) throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + conf.setEnableDigestTypeAutodetection(autodetection); + BookKeeper bkc = new BookKeeper(conf); + + DigestType digestCorrect = digestType; + byte[] passwdCorrect = "AAAAAAA".getBytes(); + DigestType digestBad = digestType == DigestType.MAC ? DigestType.CRC32 : DigestType.MAC; + byte[] passwdBad = "BBBBBBB".getBytes(); + + LedgerHandle lh = null; + try { + lh = bkc.createLedger(digestCorrect, passwdCorrect); + long id = lh.getId(); + for (int i = 0; i < 100; i++) { + lh.addEntry("foobar".getBytes()); + } + lh.close(); + + // try open with bad passwd + try { + bkc.openLedger(id, digestCorrect, passwdBad); + fail("Shouldn't be able to open with bad passwd"); + } catch (BKException.BKUnauthorizedAccessException bke) { + // correct behaviour + } + + // try open with bad digest + try { + bkc.openLedger(id, digestBad, passwdCorrect); + if (!autodetection) { + fail("Shouldn't be able to open with bad digest"); + } + } catch (BKException.BKDigestMatchException bke) { + // correct behaviour + if (autodetection) { + fail("Should not throw digest match exception if `autodetection` is enabled"); } + } + + // try open with both bad + try { + bkc.openLedger(id, digestBad, passwdBad); + fail("Shouldn't be able to open with bad passwd and digest"); + } catch (BKException.BKUnauthorizedAccessException bke) { + // correct behaviour + } + + // try open with both correct + bkc.openLedger(id, digestCorrect, passwdCorrect).close(); + } finally { + if (lh != null) { + lh.close(); + } + bkc.close(); } - - /** - * Test that bookkeeper is not able to open ledgers if - * it provides the wrong password or wrong digest. - */ - @Test - public void testBookkeeperDigestPasswordWithAutoDetection() throws Exception { - testBookkeeperDigestPassword(true); + } + + /** + * Tests that when trying to use a closed BK client object we get a callback error and not an + * InterruptedException. + * + * @throws Exception + */ + @Test + public void testAsyncReadWithError() throws Exception { + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); + bkc.close(); + + final AtomicInteger result = new AtomicInteger(0); + final CountDownLatch counter = new CountDownLatch(1); + + // Try to write, we should get and error callback but not an exception + lh.asyncAddEntry("test".getBytes(), new AddCallback() { + public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) { + result.set(rc); + counter.countDown(); + } + }, null); + + counter.await(); + + assertTrue(result.get() != 0); + } + + /** + * Test that bookkeeper will close cleanly if close is issued while another operation is in + * progress. + */ + @Test + public void testCloseDuringOp() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + for (int i = 0; i < 10; i++) { + final BookKeeper client = new BookKeeper(conf); + final CountDownLatch l = new CountDownLatch(1); + final AtomicBoolean success = new AtomicBoolean(false); + Thread t = new Thread() { + public void run() { + try { + LedgerHandle lh = client.createLedger(3, 3, digestType, "testPasswd".getBytes()); + startNewBookie(); + killBookie(0); + lh.asyncAddEntry("test".getBytes(), new AddCallback() { + @Override + public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) { + // noop, we don't care if this completes + } + }, null); + client.close(); + success.set(true); + l.countDown(); + } catch (Exception e) { + LOG.error("Error running test", e); + success.set(false); + l.countDown(); + } + } + }; + t.start(); + assertTrue(l.await(10, TimeUnit.SECONDS), "Close never completed"); + assertTrue(success.get(), "Close was not successful"); } + } - @Test - public void testBookkeeperDigestPasswordWithoutAutoDetection() throws Exception { - testBookkeeperDigestPassword(false); - } + @Test + public void testIsClosed() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - void testBookkeeperDigestPassword(boolean autodetection) throws Exception { - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - conf.setEnableDigestTypeAutodetection(autodetection); - BookKeeper bkc = new BookKeeper(conf); + BookKeeper bkc = new BookKeeper(conf); + LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes()); + long lId = lh.getId(); - DigestType digestCorrect = digestType; - byte[] passwdCorrect = "AAAAAAA".getBytes(); - DigestType digestBad = digestType == DigestType.MAC ? DigestType.CRC32 : DigestType.MAC; - byte[] passwdBad = "BBBBBBB".getBytes(); + lh.addEntry("000".getBytes()); + boolean result = bkc.isClosed(lId); + assertFalse(result, "Ledger shouldn't be flagged as closed!"); + lh.close(); + result = bkc.isClosed(lId); + assertTrue(result, "Ledger should be flagged as closed!"); - LedgerHandle lh = null; - try { - lh = bkc.createLedger(digestCorrect, passwdCorrect); - long id = lh.getId(); - for (int i = 0; i < 100; i++) { - lh.addEntry("foobar".getBytes()); - } - lh.close(); - - // try open with bad passwd - try { - bkc.openLedger(id, digestCorrect, passwdBad); - fail("Shouldn't be able to open with bad passwd"); - } catch (BKException.BKUnauthorizedAccessException bke) { - // correct behaviour - } + bkc.close(); + } - // try open with bad digest - try { - bkc.openLedger(id, digestBad, passwdCorrect); - if (!autodetection) { - fail("Shouldn't be able to open with bad digest"); - } - } catch (BKException.BKDigestMatchException bke) { - // correct behaviour - if (autodetection) { - fail("Should not throw digest match exception if `autodetection` is enabled"); - } - } + @Test + public void testReadFailureCallback() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - // try open with both bad - try { - bkc.openLedger(id, digestBad, passwdBad); - fail("Shouldn't be able to open with bad passwd and digest"); - } catch (BKException.BKUnauthorizedAccessException bke) { - // correct behaviour - } + BookKeeper bkc = new BookKeeper(conf); + LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes()); - // try open with both correct - bkc.openLedger(id, digestCorrect, passwdCorrect).close(); - } finally { - if (lh != null) { - lh.close(); - } - bkc.close(); - } + final int numEntries = 10; + for (int i = 0; i < numEntries; i++) { + lh.addEntry(("entry-" + i).getBytes()); } - /** - * Tests that when trying to use a closed BK client object we get - * a callback error and not an InterruptedException. - * @throws Exception - */ - @Test - public void testAsyncReadWithError() throws Exception { - LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes()); - bkc.close(); - - final AtomicInteger result = new AtomicInteger(0); - final CountDownLatch counter = new CountDownLatch(1); - - // Try to write, we should get and error callback but not an exception - lh.asyncAddEntry("test".getBytes(), new AddCallback() { - public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) { - result.set(rc); - counter.countDown(); - } - }, null); + stopBKCluster(); - counter.await(); - - assertTrue(result.get() != 0); + try { + lh.readEntries(0, numEntries - 1); + fail("Read operation should have failed"); + } catch (BKBookieHandleNotAvailableException e) { + // expected } - /** - * Test that bookkeeper will close cleanly if close is issued - * while another operation is in progress. - */ - @Test - public void testCloseDuringOp() throws Exception { - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - for (int i = 0; i < 10; i++) { - final BookKeeper client = new BookKeeper(conf); - final CountDownLatch l = new CountDownLatch(1); - final AtomicBoolean success = new AtomicBoolean(false); - Thread t = new Thread() { - public void run() { - try { - LedgerHandle lh = client.createLedger(3, 3, digestType, "testPasswd".getBytes()); - startNewBookie(); - killBookie(0); - lh.asyncAddEntry("test".getBytes(), new AddCallback() { - @Override - public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) { - // noop, we don't care if this completes - } - }, null); - client.close(); - success.set(true); - l.countDown(); - } catch (Exception e) { - LOG.error("Error running test", e); - success.set(false); - l.countDown(); - } - } - }; - t.start(); - assertTrue(l.await(10, TimeUnit.SECONDS), "Close never completed"); - assertTrue(success.get(), "Close was not successful"); + final CountDownLatch counter = new CountDownLatch(1); + final AtomicInteger receivedResponses = new AtomicInteger(0); + final AtomicInteger returnCode = new AtomicInteger(); + lh.asyncReadEntries(0, numEntries - 1, new ReadCallback() { + @Override + public void readComplete(int rc, LedgerHandle lh, Enumeration seq, Object ctx) { + returnCode.set(rc); + receivedResponses.incrementAndGet(); + counter.countDown(); + } + }, null); + + counter.await(); + + // Wait extra time to ensure no extra responses received + Thread.sleep(1000); + + assertEquals(1, receivedResponses.get()); + assertEquals(BKException.Code.BookieHandleNotAvailableException, returnCode.get()); + + bkc.close(); + } + + @Test + public void testAutoCloseableBookKeeper() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + BookKeeper bkc2; + try (BookKeeper bkc = new BookKeeper(conf)) { + bkc2 = bkc; + long ledgerId; + try (LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes())) { + ledgerId = lh.getId(); + for (int i = 0; i < 100; i++) { + lh.addEntry("foobar".getBytes()); } + } + assertTrue(bkc.isClosed(ledgerId), "Ledger should be closed!"); } + assertTrue(bkc2.closed, "BookKeeper should be closed!"); + } + + @Test + public void testReadAfterLastAddConfirmed() throws Exception { + + ClientConfiguration clientConfiguration = new ClientConfiguration(); + clientConfiguration.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + + try (BookKeeper bkWriter = new BookKeeper(clientConfiguration)) { + LedgerHandle writeLh = bkWriter.createLedger(digestType, "testPasswd".getBytes()); + long ledgerId = writeLh.getId(); + int numOfEntries = 5; + for (int i = 0; i < numOfEntries; i++) { + writeLh.addEntry(("foobar" + i).getBytes()); + } + + try (BookKeeper bkReader = new BookKeeper(clientConfiguration); + LedgerHandle rlh = bkReader + .openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes())) { + assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), + "Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh + .getLastAddConfirmed()); + + assertFalse(writeLh.isClosed()); + + // with readUnconfirmedEntries we are able to read all of the entries + Enumeration entries = rlh.readUnconfirmedEntries(0, numOfEntries - 1); + int entryId = 0; + while (entries.hasMoreElements()) { + LedgerEntry entry = entries.nextElement(); + String entryString = new String(entry.getEntry()); + assertEquals(entryString, "foobar" + entryId, + "Expected entry String: " + ("foobar" + entryId) + " actual entry String: " + + entryString); + entryId++; + } + } - @Test - public void testIsClosed() throws Exception { - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + try (BookKeeper bkReader = new BookKeeper(clientConfiguration); + LedgerHandle rlh = bkReader + .openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes())) { + assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), + "Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh + .getLastAddConfirmed()); - BookKeeper bkc = new BookKeeper(conf); - LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes()); - long lId = lh.getId(); + assertFalse(writeLh.isClosed()); - lh.addEntry("000".getBytes()); - boolean result = bkc.isClosed(lId); - assertFalse(result, "Ledger shouldn't be flagged as closed!"); + // without readUnconfirmedEntries we are not able to read all of the entries + try { + rlh.readEntries(0, numOfEntries - 1); + fail("should not be able to read up to " + (numOfEntries - 1) + " with readEntries"); + } catch (BKException.BKReadException expected) { + } - lh.close(); - result = bkc.isClosed(lId); - assertTrue(result, "Ledger should be flagged as closed!"); + // read all entries within the 0..LastAddConfirmed range with readEntries + assertEquals(rlh.getLastAddConfirmed() + 1, + Collections.list(rlh.readEntries(0, rlh.getLastAddConfirmed())).size()); - bkc.close(); - } + // assert local LAC does not change after reads + assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), + "Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh + .getLastAddConfirmed()); - @Test - public void testReadFailureCallback() throws Exception { - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + // read all entries within the 0..LastAddConfirmed range with readUnconfirmedEntries + assertEquals(rlh.getLastAddConfirmed() + 1, + Collections.list(rlh.readUnconfirmedEntries(0, rlh.getLastAddConfirmed())).size()); - BookKeeper bkc = new BookKeeper(conf); - LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes()); + // assert local LAC does not change after reads + assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), + "Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh + .getLastAddConfirmed()); - final int numEntries = 10; - for (int i = 0; i < numEntries; i++) { - lh.addEntry(("entry-" + i).getBytes()); - } + // read all entries within the LastAddConfirmed..numOfEntries - 1 range with readUnconfirmedEntries + assertEquals(numOfEntries - rlh.getLastAddConfirmed(), + Collections + .list(rlh.readUnconfirmedEntries(rlh.getLastAddConfirmed(), numOfEntries - 1)) + .size()); - stopBKCluster(); + // assert local LAC does not change after reads + assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), + "Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh + .getLastAddConfirmed()); try { - lh.readEntries(0, numEntries - 1); - fail("Read operation should have failed"); - } catch (BKBookieHandleNotAvailableException e) { - // expected + // read all entries within the LastAddConfirmed..numOfEntries range with readUnconfirmedEntries + // this is an error, we are going outside the range of existing entries + rlh.readUnconfirmedEntries(rlh.getLastAddConfirmed(), numOfEntries); + fail("the read tried to access data for unexisting entry id " + numOfEntries); + } catch (BKException.BKNoSuchEntryException expected) { + // expecting a BKNoSuchEntryException, as the entry does not exist on bookies } - final CountDownLatch counter = new CountDownLatch(1); - final AtomicInteger receivedResponses = new AtomicInteger(0); - final AtomicInteger returnCode = new AtomicInteger(); - lh.asyncReadEntries(0, numEntries - 1, new ReadCallback() { - @Override - public void readComplete(int rc, LedgerHandle lh, Enumeration seq, Object ctx) { - returnCode.set(rc); - receivedResponses.incrementAndGet(); - counter.countDown(); - } - }, null); - - counter.await(); - - // Wait extra time to ensure no extra responses received - Thread.sleep(1000); - - assertEquals(1, receivedResponses.get()); - assertEquals(BKException.Code.BookieHandleNotAvailableException, returnCode.get()); - - bkc.close(); - } - - @Test - public void testAutoCloseableBookKeeper() throws Exception { - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - BookKeeper bkc2; - try (BookKeeper bkc = new BookKeeper(conf)) { - bkc2 = bkc; - long ledgerId; - try (LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes())) { - ledgerId = lh.getId(); - for (int i = 0; i < 100; i++) { - lh.addEntry("foobar".getBytes()); - } - } - assertTrue(bkc.isClosed(ledgerId), "Ledger should be closed!"); + try { + // read all entries within the LastAddConfirmed..numOfEntries range with readEntries + // this is an error, we are going outside the range of existing entries + rlh.readEntries(rlh.getLastAddConfirmed(), numOfEntries); + fail("the read tries to access data for unexisting entry id " + numOfEntries); + } catch (BKException.BKReadException expected) { + // expecting a BKReadException, as the client rejected the request to access entries + // after local LastAddConfirmed } - assertTrue(bkc2.closed, "BookKeeper should be closed!"); - } - @Test - public void testReadAfterLastAddConfirmed() throws Exception { + } + + // ensure that after restarting every bookie entries are not lost + // even entries after the LastAddConfirmed + restartBookies(); + + try (BookKeeper bkReader = new BookKeeper(clientConfiguration); + LedgerHandle rlh = bkReader + .openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes())) { + assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), + "Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh + .getLastAddConfirmed()); + + assertFalse(writeLh.isClosed()); + + // with readUnconfirmedEntries we are able to read all of the entries + Enumeration entries = rlh.readUnconfirmedEntries(0, numOfEntries - 1); + int entryId = 0; + while (entries.hasMoreElements()) { + LedgerEntry entry = entries.nextElement(); + String entryString = new String(entry.getEntry()); + assertEquals(entryString, "foobar" + entryId, + "Expected entry String: " + ("foobar" + entryId) + " actual entry String: " + + entryString); + entryId++; + } + } - ClientConfiguration clientConfiguration = new ClientConfiguration(); - clientConfiguration.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + try (BookKeeper bkReader = new BookKeeper(clientConfiguration); + LedgerHandle rlh = bkReader + .openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes())) { + assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), + "Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh + .getLastAddConfirmed()); - try (BookKeeper bkWriter = new BookKeeper(clientConfiguration)) { - LedgerHandle writeLh = bkWriter.createLedger(digestType, "testPasswd".getBytes()); - long ledgerId = writeLh.getId(); - int numOfEntries = 5; - for (int i = 0; i < numOfEntries; i++) { - writeLh.addEntry(("foobar" + i).getBytes()); - } + assertFalse(writeLh.isClosed()); - try (BookKeeper bkReader = new BookKeeper(clientConfiguration); - LedgerHandle rlh = bkReader.openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes())) { - assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), "Expected LAC of rlh: " - + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed()); - - assertFalse(writeLh.isClosed()); - - // with readUnconfirmedEntries we are able to read all of the entries - Enumeration entries = rlh.readUnconfirmedEntries(0, numOfEntries - 1); - int entryId = 0; - while (entries.hasMoreElements()) { - LedgerEntry entry = entries.nextElement(); - String entryString = new String(entry.getEntry()); - assertEquals(entryString, "foobar" + entryId, "Expected entry String: " + ("foobar" + entryId) - + " actual entry String: " + entryString); - entryId++; - } - } + // without readUnconfirmedEntries we are not able to read all of the entries + try { + rlh.readEntries(0, numOfEntries - 1); + fail("should not be able to read up to " + (numOfEntries - 1) + " with readEntries"); + } catch (BKException.BKReadException expected) { + } - try (BookKeeper bkReader = new BookKeeper(clientConfiguration); - LedgerHandle rlh = bkReader.openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes())) { - assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), "Expected LAC of rlh: " - + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed()); - - assertFalse(writeLh.isClosed()); - - // without readUnconfirmedEntries we are not able to read all of the entries - try { - rlh.readEntries(0, numOfEntries - 1); - fail("should not be able to read up to " + (numOfEntries - 1) + " with readEntries"); - } catch (BKException.BKReadException expected) { - } - - // read all entries within the 0..LastAddConfirmed range with readEntries - assertEquals(rlh.getLastAddConfirmed() + 1, - Collections.list(rlh.readEntries(0, rlh.getLastAddConfirmed())).size()); - - // assert local LAC does not change after reads - assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), "Expected LAC of rlh: " - + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed()); - - // read all entries within the 0..LastAddConfirmed range with readUnconfirmedEntries - assertEquals(rlh.getLastAddConfirmed() + 1, - Collections.list(rlh.readUnconfirmedEntries(0, rlh.getLastAddConfirmed())).size()); - - // assert local LAC does not change after reads - assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), "Expected LAC of rlh: " - + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed()); - - // read all entries within the LastAddConfirmed..numOfEntries - 1 range with readUnconfirmedEntries - assertEquals(numOfEntries - rlh.getLastAddConfirmed(), - Collections.list(rlh.readUnconfirmedEntries(rlh.getLastAddConfirmed(), numOfEntries - 1)).size()); - - // assert local LAC does not change after reads - assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), "Expected LAC of rlh: " - + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed()); - - try { - // read all entries within the LastAddConfirmed..numOfEntries range with readUnconfirmedEntries - // this is an error, we are going outside the range of existing entries - rlh.readUnconfirmedEntries(rlh.getLastAddConfirmed(), numOfEntries); - fail("the read tried to access data for unexisting entry id " + numOfEntries); - } catch (BKException.BKNoSuchEntryException expected) { - // expecting a BKNoSuchEntryException, as the entry does not exist on bookies - } - - try { - // read all entries within the LastAddConfirmed..numOfEntries range with readEntries - // this is an error, we are going outside the range of existing entries - rlh.readEntries(rlh.getLastAddConfirmed(), numOfEntries); - fail("the read tries to access data for unexisting entry id " + numOfEntries); - } catch (BKException.BKReadException expected) { - // expecting a BKReadException, as the client rejected the request to access entries - // after local LastAddConfirmed - } + // read all entries within the 0..LastAddConfirmed range with readEntries + assertEquals(rlh.getLastAddConfirmed() + 1, + Collections.list(rlh.readEntries(0, rlh.getLastAddConfirmed())).size()); - } + // assert local LAC does not change after reads + assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), + "Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh + .getLastAddConfirmed()); - // ensure that after restarting every bookie entries are not lost - // even entries after the LastAddConfirmed - restartBookies(); - - try (BookKeeper bkReader = new BookKeeper(clientConfiguration); - LedgerHandle rlh = bkReader.openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes())) { - assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), "Expected LAC of rlh: " - + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed()); - - assertFalse(writeLh.isClosed()); - - // with readUnconfirmedEntries we are able to read all of the entries - Enumeration entries = rlh.readUnconfirmedEntries(0, numOfEntries - 1); - int entryId = 0; - while (entries.hasMoreElements()) { - LedgerEntry entry = entries.nextElement(); - String entryString = new String(entry.getEntry()); - assertEquals(entryString, "foobar" + entryId, "Expected entry String: " + ("foobar" + entryId) - + " actual entry String: " + entryString); - entryId++; - } - } + // read all entries within the 0..LastAddConfirmed range with readUnconfirmedEntries + assertEquals(rlh.getLastAddConfirmed() + 1, + Collections.list(rlh.readUnconfirmedEntries(0, rlh.getLastAddConfirmed())).size()); - try (BookKeeper bkReader = new BookKeeper(clientConfiguration); - LedgerHandle rlh = bkReader.openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes())) { - assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), "Expected LAC of rlh: " - + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed()); - - assertFalse(writeLh.isClosed()); - - // without readUnconfirmedEntries we are not able to read all of the entries - try { - rlh.readEntries(0, numOfEntries - 1); - fail("should not be able to read up to " + (numOfEntries - 1) + " with readEntries"); - } catch (BKException.BKReadException expected) { - } - - // read all entries within the 0..LastAddConfirmed range with readEntries - assertEquals(rlh.getLastAddConfirmed() + 1, - Collections.list(rlh.readEntries(0, rlh.getLastAddConfirmed())).size()); - - // assert local LAC does not change after reads - assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), "Expected LAC of rlh: " - + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed()); - - // read all entries within the 0..LastAddConfirmed range with readUnconfirmedEntries - assertEquals(rlh.getLastAddConfirmed() + 1, - Collections.list(rlh.readUnconfirmedEntries(0, rlh.getLastAddConfirmed())).size()); - - // assert local LAC does not change after reads - assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), "Expected LAC of rlh: " - + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed()); - - // read all entries within the LastAddConfirmed..numOfEntries - 1 range with readUnconfirmedEntries - assertEquals(numOfEntries - rlh.getLastAddConfirmed(), - Collections.list(rlh.readUnconfirmedEntries(rlh.getLastAddConfirmed(), numOfEntries - 1)).size()); - - // assert local LAC does not change after reads - assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), "Expected LAC of rlh: " - + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed()); - - try { - // read all entries within the LastAddConfirmed..numOfEntries range with readUnconfirmedEntries - // this is an error, we are going outside the range of existing entries - rlh.readUnconfirmedEntries(rlh.getLastAddConfirmed(), numOfEntries); - fail("the read tried to access data for unexisting entry id " + numOfEntries); - } catch (BKException.BKNoSuchEntryException expected) { - // expecting a BKNoSuchEntryException, as the entry does not exist on bookies - } - - try { - // read all entries within the LastAddConfirmed..numOfEntries range with readEntries - // this is an error, we are going outside the range of existing entries - rlh.readEntries(rlh.getLastAddConfirmed(), numOfEntries); - fail("the read tries to access data for unexisting entry id " + numOfEntries); - } catch (BKException.BKReadException expected) { - // expecting a BKReadException, as the client rejected the request to access entries - // after local LastAddConfirmed - } + // assert local LAC does not change after reads + assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), + "Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh + .getLastAddConfirmed()); - } + // read all entries within the LastAddConfirmed..numOfEntries - 1 range with readUnconfirmedEntries + assertEquals(numOfEntries - rlh.getLastAddConfirmed(), + Collections + .list(rlh.readUnconfirmedEntries(rlh.getLastAddConfirmed(), numOfEntries - 1)) + .size()); - // open ledger with fencing, this will repair the ledger and make the last entry readable - try (BookKeeper bkReader = new BookKeeper(clientConfiguration); - LedgerHandle rlh = bkReader.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { - assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 1)), "Expected LAC of rlh: " - + (numOfEntries - 1) + " actual LAC of rlh: " + rlh.getLastAddConfirmed()); - - assertFalse(writeLh.isClosed()); - - // without readUnconfirmedEntries we are not able to read all of the entries - Enumeration entries = rlh.readEntries(0, numOfEntries - 1); - int entryId = 0; - while (entries.hasMoreElements()) { - LedgerEntry entry = entries.nextElement(); - String entryString = new String(entry.getEntry()); - assertEquals(entryString, "foobar" + entryId, "Expected entry String: " + ("foobar" + entryId) - + " actual entry String: " + entryString); - entryId++; - } - } + // assert local LAC does not change after reads + assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), + "Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh + .getLastAddConfirmed()); - // should still be able to close as long as recovery closed the ledger - // with the same last entryId and length as in the write handle. - writeLh.close(); + try { + // read all entries within the LastAddConfirmed..numOfEntries range with readUnconfirmedEntries + // this is an error, we are going outside the range of existing entries + rlh.readUnconfirmedEntries(rlh.getLastAddConfirmed(), numOfEntries); + fail("the read tried to access data for unexisting entry id " + numOfEntries); + } catch (BKException.BKNoSuchEntryException expected) { + // expecting a BKNoSuchEntryException, as the entry does not exist on bookies } - } - @Test - public void testReadWriteWithV2WireProtocol() throws Exception { - ClientConfiguration conf = new ClientConfiguration().setUseV2WireProtocol(true); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - int numEntries = 100; - byte[] data = "foobar".getBytes(); - try (BookKeeper bkc = new BookKeeper(conf)) { - - // basic read/write - { - long ledgerId; - try (LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes())) { - ledgerId = lh.getId(); - for (int i = 0; i < numEntries; i++) { - lh.addEntry(data); - } - } - try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { - assertEquals(numEntries - 1, lh.readLastConfirmed()); - for (Enumeration readEntries = lh.readEntries(0, numEntries - 1); - readEntries.hasMoreElements();) { - LedgerEntry entry = readEntries.nextElement(); - assertArrayEquals(data, entry.getEntry()); - } - } - } + try { + // read all entries within the LastAddConfirmed..numOfEntries range with readEntries + // this is an error, we are going outside the range of existing entries + rlh.readEntries(rlh.getLastAddConfirmed(), numOfEntries); + fail("the read tries to access data for unexisting entry id " + numOfEntries); + } catch (BKException.BKReadException expected) { + // expecting a BKReadException, as the client rejected the request to access entries + // after local LastAddConfirmed + } - // basic fencing - { - long ledgerId; - try (LedgerHandle lh2 = bkc.createLedger(digestType, "testPasswd".getBytes())) { - ledgerId = lh2.getId(); - lh2.addEntry(data); - try (LedgerHandle lh2Fence = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { - } - try { - lh2.addEntry(data); - fail("ledger should be fenced"); - } catch (BKException.BKLedgerFencedException ex){ - } - } - } + } + + // open ledger with fencing, this will repair the ledger and make the last entry readable + try (BookKeeper bkReader = new BookKeeper(clientConfiguration); + LedgerHandle rlh = bkReader.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { + assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 1)), + "Expected LAC of rlh: " + (numOfEntries - 1) + " actual LAC of rlh: " + rlh + .getLastAddConfirmed()); + + assertFalse(writeLh.isClosed()); + + // without readUnconfirmedEntries we are not able to read all of the entries + Enumeration entries = rlh.readEntries(0, numOfEntries - 1); + int entryId = 0; + while (entries.hasMoreElements()) { + LedgerEntry entry = entries.nextElement(); + String entryString = new String(entry.getEntry()); + assertEquals(entryString, "foobar" + entryId, + "Expected entry String: " + ("foobar" + entryId) + " actual entry String: " + + entryString); + entryId++; } - } + } - @Test - public void testBatchReadFailBackToSingleRead1() throws Exception { - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - int numEntries = 100; - byte[] data = "foobar".getBytes(); - try (BookKeeper bkc = new BookKeeper(conf)) { - // basic read/write - { - long ledgerId; - try (LedgerHandle lh = bkc.createLedger(2, 2, 2, - digestType, "testPasswd".getBytes())) { - ledgerId = lh.getId(); - for (int i = 0; i < numEntries; i++) { - lh.addEntry(data); - } - } - try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { - assertEquals(numEntries - 1, lh.readLastConfirmed()); - //V3 protocol not support batch read. In theory, it will throw UnsupportedOperationException. - try { - lh.batchReadEntries(0, numEntries, 5 * 1024 * 1024); - fail("Should throw UnsupportedOperationException."); - } catch (UnsupportedOperationException e) { - assertEquals("Unsupported batch read entry operation for v3 protocol.", e.getMessage()); - } - } - } + // should still be able to close as long as recovery closed the ledger + // with the same last entryId and length as in the write handle. + writeLh.close(); + } + } + + @Test + public void testReadWriteWithV2WireProtocol() throws Exception { + ClientConfiguration conf = new ClientConfiguration().setUseV2WireProtocol(true); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + int numEntries = 100; + byte[] data = "foobar".getBytes(); + try (BookKeeper bkc = new BookKeeper(conf)) { + + // basic read/write + { + long ledgerId; + try (LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes())) { + ledgerId = lh.getId(); + for (int i = 0; i < numEntries; i++) { + lh.addEntry(data); + } } + try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { + assertEquals(numEntries - 1, lh.readLastConfirmed()); + for (Enumeration readEntries = lh.readEntries(0, numEntries - 1); + readEntries.hasMoreElements(); ) { + LedgerEntry entry = readEntries.nextElement(); + assertArrayEquals(data, entry.getEntry()); + } + } + } - try (BookKeeper bkc = new BookKeeper(conf)) { - // basic read/write - { - long ledgerId; - try (LedgerHandle lh = bkc.createLedger(3, 2, 2, - digestType, "testPasswd".getBytes())) { - ledgerId = lh.getId(); - for (int i = 0; i < numEntries; i++) { - lh.addEntry(data); - } - } - try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { - assertEquals(numEntries - 1, lh.readLastConfirmed()); - //The ledger ensemble is not equals write quorum, so failback to single read, it also can - //read data successfully. - for (Enumeration readEntries = lh.batchReadEntries(0, numEntries, 5 * 1024 * 1024); - readEntries.hasMoreElements();) { - LedgerEntry entry = readEntries.nextElement(); - assertArrayEquals(data, entry.getEntry()); - } - } - } + // basic fencing + { + long ledgerId; + try (LedgerHandle lh2 = bkc.createLedger(digestType, "testPasswd".getBytes())) { + ledgerId = lh2.getId(); + lh2.addEntry(data); + try (LedgerHandle lh2Fence = bkc + .openLedger(ledgerId, digestType, "testPasswd".getBytes())) { + } + try { + lh2.addEntry(data); + fail("ledger should be fenced"); + } catch (BKException.BKLedgerFencedException ex) { + } } + } } - - @Test - public void testBatchReadFailBackToSingleRead2() throws Exception { - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - int numEntries = 100; - byte[] data = "foobar".getBytes(); - try (BookKeeper bkc = new BookKeeper(conf)) { - // basic read/write - { - long ledgerId; - try (LedgerHandle lh = bkc.createLedger(2, 2, 2, - digestType, "testPasswd".getBytes())) { - ledgerId = lh.getId(); - for (int i = 0; i < numEntries; i++) { - lh.addEntry(data); - } - } - try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { - assertEquals(numEntries - 1, lh.readLastConfirmed()); - //V3 protocol not support batch read, it will throw UnsupportedOperationException. - try { - lh.batchReadEntries(0, numEntries, 5 * 1024 * 1024); - fail("Should throw UnsupportedOperationException."); - } catch (UnsupportedOperationException e) { - assertEquals("Unsupported batch read entry operation for v3 protocol.", e.getMessage()); - } - } - } + } + + @Test + public void testBatchReadFailBackToSingleRead1() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + int numEntries = 100; + byte[] data = "foobar".getBytes(); + try (BookKeeper bkc = new BookKeeper(conf)) { + // basic read/write + { + long ledgerId; + try (LedgerHandle lh = bkc.createLedger(2, 2, 2, digestType, "testPasswd".getBytes())) { + ledgerId = lh.getId(); + for (int i = 0; i < numEntries; i++) { + lh.addEntry(data); + } } - - conf.setBatchReadEnabled(false); - try (BookKeeper bkc = new BookKeeper(conf)) { - // basic read/write - { - long ledgerId; - try (LedgerHandle lh = bkc.createLedger(2, 2, 2, - digestType, "testPasswd".getBytes())) { - ledgerId = lh.getId(); - for (int i = 0; i < numEntries; i++) { - lh.addEntry(data); - } - } - try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { - assertEquals(numEntries - 1, lh.readLastConfirmed()); - //We config disable the batch read, so failback to single read, it also can - //read data successfully. - for (Enumeration readEntries = lh.batchReadEntries(0, numEntries, 5 * 1024 * 1024); - readEntries.hasMoreElements();) { - LedgerEntry entry = readEntries.nextElement(); - assertArrayEquals(data, entry.getEntry()); - } - } - } + try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { + assertEquals(numEntries - 1, lh.readLastConfirmed()); + // V3 protocol not support batch read. In theory, it will throw UnsupportedOperationException. + try { + lh.batchReadEntries(0, numEntries, 5 * 1024 * 1024); + fail("Should throw UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + assertEquals("Unsupported batch read entry operation for v3 protocol.", e.getMessage()); + } } + } } - @Test - public void testBatchReadWithV2Protocol() throws Exception { - ClientConfiguration conf = new ClientConfiguration().setUseV2WireProtocol(true); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - int numEntries = 100; - byte[] data = "foobar".getBytes(); - try (BookKeeper bkc = new BookKeeper(conf)) { - // basic read/write - { - long ledgerId; - try (LedgerHandle lh = bkc.createLedger(2, 2, 2, digestType, "testPasswd".getBytes())) { - ledgerId = lh.getId(); - for (int i = 0; i < numEntries; i++) { - lh.addEntry(data); - } - } - try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { - assertEquals(numEntries - 1, lh.readLastConfirmed()); - int entries = 0; - for (Enumeration readEntries = lh.batchReadEntries(0, numEntries, 5 * 1024 * 1024); - readEntries.hasMoreElements();) { - LedgerEntry entry = readEntries.nextElement(); - assertArrayEquals(data, entry.getEntry()); - entries++; - } - assertEquals(numEntries, entries); - - //The maxCount is 0, the result is only limited by maxSize. - entries = 0; - for (Enumeration readEntries = lh.batchReadEntries(0, 0, 5 * 1024 * 1024); - readEntries.hasMoreElements();) { - LedgerEntry entry = readEntries.nextElement(); - assertArrayEquals(data, entry.getEntry()); - entries++; - } - assertEquals(numEntries, entries); - - // one entry size = 8(ledgerId) + 8(entryId) + 8(lac) + 8(length) + 8(digest) + payload size - long entrySize = 8 + 8 + 8 + 8 + 8 + data.length; - //response header size. - int headerSize = 24 + 8 + 4; - //The maxCount is 0, the result is only limited by maxSize. - entries = 0; - int expectEntriesNum = 5; - for (Enumeration readEntries = lh.batchReadEntries(0, 0, - expectEntriesNum * entrySize + headerSize + (expectEntriesNum * 4)); - readEntries.hasMoreElements();) { - LedgerEntry entry = readEntries.nextElement(); - assertArrayEquals(data, entry.getEntry()); - entries++; - } - assertEquals(expectEntriesNum, entries); - - //The maxCount is 100, the result entries reach maxSize limit. - entries = 0; - for (Enumeration readEntries = lh.batchReadEntries(0, 20, - expectEntriesNum * entrySize + headerSize + (expectEntriesNum * 4)); - readEntries.hasMoreElements();) { - LedgerEntry entry = readEntries.nextElement(); - assertArrayEquals(data, entry.getEntry()); - entries++; - } - assertEquals(expectEntriesNum, entries); - } - } + try (BookKeeper bkc = new BookKeeper(conf)) { + // basic read/write + { + long ledgerId; + try (LedgerHandle lh = bkc.createLedger(3, 2, 2, digestType, "testPasswd".getBytes())) { + ledgerId = lh.getId(); + for (int i = 0; i < numEntries; i++) { + lh.addEntry(data); + } + } + try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { + assertEquals(numEntries - 1, lh.readLastConfirmed()); + // The ledger ensemble is not equals write quorum, so failback to single read, it also can + // read data successfully. + for (Enumeration readEntries = lh + .batchReadEntries(0, numEntries, 5 * 1024 * 1024); + readEntries.hasMoreElements(); ) { + LedgerEntry entry = readEntries.nextElement(); + assertArrayEquals(data, entry.getEntry()); + } } + } } - - @SuppressWarnings("deprecation") - @Test - public void testReadEntryReleaseByteBufs() throws Exception { - ClientConfiguration confWriter = new ClientConfiguration(); - confWriter.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - int numEntries = 10; - byte[] data = "foobar".getBytes(); + } + + @Test + public void testBatchReadFailBackToSingleRead2() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + int numEntries = 100; + byte[] data = "foobar".getBytes(); + try (BookKeeper bkc = new BookKeeper(conf)) { + // basic read/write + { long ledgerId; - try (BookKeeper bkc = new BookKeeper(confWriter)) { - try (LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes())) { - ledgerId = lh.getId(); - for (int i = 0; i < numEntries; i++) { - lh.addEntry(data); - } - } + try (LedgerHandle lh = bkc.createLedger(2, 2, 2, digestType, "testPasswd".getBytes())) { + ledgerId = lh.getId(); + for (int i = 0; i < numEntries; i++) { + lh.addEntry(data); + } } - - // v2 protocol, using pooled buffers - ClientConfiguration confReader1 = new ClientConfiguration() - .setUseV2WireProtocol(true) - .setNettyUsePooledBuffers(true) - .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - - try (BookKeeper bkc = new BookKeeper(confReader1)) { - try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { - assertEquals(numEntries - 1, lh.readLastConfirmed()); - for (Enumeration readEntries = lh.readEntries(0, numEntries - 1); - readEntries.hasMoreElements();) { - LedgerEntry entry = readEntries.nextElement(); - try { - entry.data.release(); - } catch (IllegalReferenceCountException ok) { - fail("ByteBuf already released"); - } - } - } + try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { + assertEquals(numEntries - 1, lh.readLastConfirmed()); + // V3 protocol not support batch read, it will throw UnsupportedOperationException. + try { + lh.batchReadEntries(0, numEntries, 5 * 1024 * 1024); + fail("Should throw UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + assertEquals("Unsupported batch read entry operation for v3 protocol.", e.getMessage()); + } } + } + } - // v2 protocol, not using pooled buffers - ClientConfiguration confReader2 = new ClientConfiguration() - .setUseV2WireProtocol(true) - .setNettyUsePooledBuffers(false); - confReader2.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - - try (BookKeeper bkc = new BookKeeper(confReader2)) { - try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { - assertEquals(numEntries - 1, lh.readLastConfirmed()); - for (Enumeration readEntries = lh.readEntries(0, numEntries - 1); - readEntries.hasMoreElements();) { - LedgerEntry entry = readEntries.nextElement(); - try { - entry.data.release(); - } catch (IllegalReferenceCountException e) { - fail("ByteBuf already released"); - } - } - } + conf.setBatchReadEnabled(false); + try (BookKeeper bkc = new BookKeeper(conf)) { + // basic read/write + { + long ledgerId; + try (LedgerHandle lh = bkc.createLedger(2, 2, 2, digestType, "testPasswd".getBytes())) { + ledgerId = lh.getId(); + for (int i = 0; i < numEntries; i++) { + lh.addEntry(data); + } } - - // v3 protocol, not using pooled buffers - ClientConfiguration confReader3 = new ClientConfiguration() - .setUseV2WireProtocol(false) - .setNettyUsePooledBuffers(false) - .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - try (BookKeeper bkc = new BookKeeper(confReader3)) { - try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { - assertEquals(numEntries - 1, lh.readLastConfirmed()); - for (Enumeration readEntries = lh.readEntries(0, numEntries - 1); - readEntries.hasMoreElements();) { - LedgerEntry entry = readEntries.nextElement(); - assertTrue(entry.data.release(), - "Can't release entry " + entry.getEntryId() + ": ref = " + entry.data.refCnt()); - try { - assertFalse(entry.data.release()); - fail("ByteBuf already released"); - } catch (IllegalReferenceCountException ok) { - } - } - } + try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { + assertEquals(numEntries - 1, lh.readLastConfirmed()); + // We config disable the batch read, so failback to single read, it also can + // read data successfully. + for (Enumeration readEntries = lh + .batchReadEntries(0, numEntries, 5 * 1024 * 1024); + readEntries.hasMoreElements(); ) { + LedgerEntry entry = readEntries.nextElement(); + assertArrayEquals(data, entry.getEntry()); + } } - - // v3 protocol, using pooled buffers - // v3 protocol from 4.5 always "wraps" buffers returned by protobuf - ClientConfiguration confReader4 = new ClientConfiguration() - .setUseV2WireProtocol(false) - .setNettyUsePooledBuffers(true) - .setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - - try (BookKeeper bkc = new BookKeeper(confReader4)) { - try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { - assertEquals(numEntries - 1, lh.readLastConfirmed()); - for (Enumeration readEntries = lh.readEntries(0, numEntries - 1); - readEntries.hasMoreElements();) { - LedgerEntry entry = readEntries.nextElement(); - // ButeBufs not reference counter - assertTrue(entry.data.release(), - "Can't release entry " + entry.getEntryId() + ": ref = " + entry.data.refCnt()); - try { - assertFalse(entry.data.release()); - fail("ByteBuf already released"); - } catch (IllegalReferenceCountException ok) { - } - } - } + } + } + } + + @Test + public void testBatchReadWithV2Protocol() throws Exception { + ClientConfiguration conf = new ClientConfiguration().setUseV2WireProtocol(true); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + int numEntries = 100; + byte[] data = "foobar".getBytes(); + try (BookKeeper bkc = new BookKeeper(conf)) { + // basic read/write + { + long ledgerId; + try (LedgerHandle lh = bkc.createLedger(2, 2, 2, digestType, "testPasswd".getBytes())) { + ledgerId = lh.getId(); + for (int i = 0; i < numEntries; i++) { + lh.addEntry(data); + } } - - // cannot read twice an entry - ClientConfiguration confReader5 = new ClientConfiguration(); - confReader5.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - try (BookKeeper bkc = new BookKeeper(confReader5)) { - try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { - assertEquals(numEntries - 1, lh.readLastConfirmed()); - for (Enumeration readEntries = lh.readEntries(0, numEntries - 1); - readEntries.hasMoreElements();) { - LedgerEntry entry = readEntries.nextElement(); - entry.getEntry(); - try { - entry.getEntry(); - fail("entry data accessed twice"); - } catch (IllegalStateException ok){ - } - try { - entry.getEntryInputStream(); - fail("entry data accessed twice"); - } catch (IllegalStateException ok){ - } - } - } + try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { + assertEquals(numEntries - 1, lh.readLastConfirmed()); + int entries = 0; + for (Enumeration readEntries = lh + .batchReadEntries(0, numEntries, 5 * 1024 * 1024); + readEntries.hasMoreElements(); ) { + LedgerEntry entry = readEntries.nextElement(); + assertArrayEquals(data, entry.getEntry()); + entries++; + } + assertEquals(numEntries, entries); + + // The maxCount is 0, the result is only limited by maxSize. + entries = 0; + for (Enumeration readEntries = lh.batchReadEntries(0, 0, 5 * 1024 * 1024); + readEntries.hasMoreElements(); ) { + LedgerEntry entry = readEntries.nextElement(); + assertArrayEquals(data, entry.getEntry()); + entries++; + } + assertEquals(numEntries, entries); + + // one entry size = 8(ledgerId) + 8(entryId) + 8(lac) + 8(length) + 8(digest) + payload size + long entrySize = 8 + 8 + 8 + 8 + 8 + data.length; + // response header size. + int headerSize = 24 + 8 + 4; + // The maxCount is 0, the result is only limited by maxSize. + entries = 0; + int expectEntriesNum = 5; + for ( + Enumeration readEntries = lh.batchReadEntries(0, 0, + expectEntriesNum * entrySize + headerSize + (expectEntriesNum * 4)); + readEntries.hasMoreElements(); ) { + LedgerEntry entry = readEntries.nextElement(); + assertArrayEquals(data, entry.getEntry()); + entries++; + } + assertEquals(expectEntriesNum, entries); + + // The maxCount is 100, the result entries reach maxSize limit. + entries = 0; + for ( + Enumeration readEntries = lh.batchReadEntries(0, 20, + expectEntriesNum * entrySize + headerSize + (expectEntriesNum * 4)); + readEntries.hasMoreElements(); ) { + LedgerEntry entry = readEntries.nextElement(); + assertArrayEquals(data, entry.getEntry()); + entries++; + } + assertEquals(expectEntriesNum, entries); } + } } - - /** - * Tests that issuing multiple reads for the same entry at the same time works as expected. - * - * @throws Exception - */ - @Test - public void testDoubleRead() throws Exception { - LedgerHandle lh = bkc.createLedger(digestType, "".getBytes()); - - lh.addEntry("test".getBytes()); - - // Read the same entry more times asynchronously - final int n = 10; - final CountDownLatch latch = new CountDownLatch(n); - for (int i = 0; i < n; i++) { - lh.asyncReadEntries(0, 0, new ReadCallback() { - public void readComplete(int rc, LedgerHandle lh, - Enumeration seq, Object ctx) { - if (rc == BKException.Code.OK) { - latch.countDown(); - } else { - fail("Read fail"); - } - } - }, null); + } + + @SuppressWarnings("deprecation") + @Test + public void testReadEntryReleaseByteBufs() throws Exception { + ClientConfiguration confWriter = new ClientConfiguration(); + confWriter.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + int numEntries = 10; + byte[] data = "foobar".getBytes(); + long ledgerId; + try (BookKeeper bkc = new BookKeeper(confWriter)) { + try (LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes())) { + ledgerId = lh.getId(); + for (int i = 0; i < numEntries; i++) { + lh.addEntry(data); } - - latch.await(); + } } - /** - * Tests that issuing multiple reads for the same entry at the same time works as expected. - * - * @throws Exception - */ - @Test - public void testDoubleReadWithV2Protocol() throws Exception { - ClientConfiguration conf = new ClientConfiguration(baseClientConf); - conf.setUseV2WireProtocol(true); - BookKeeperTestClient bkc = new BookKeeperTestClient(conf); - LedgerHandle lh = bkc.createLedger(digestType, "".getBytes()); - - lh.addEntry("test".getBytes()); - - // Read the same entry more times asynchronously - final int n = 10; - final CountDownLatch latch = new CountDownLatch(n); - for (int i = 0; i < n; i++) { - lh.asyncReadEntries(0, 0, new ReadCallback() { - public void readComplete(int rc, LedgerHandle lh, - Enumeration seq, Object ctx) { - if (rc == BKException.Code.OK) { - latch.countDown(); - } else { - fail("Read fail"); - } - } - }, null); + // v2 protocol, using pooled buffers + ClientConfiguration confReader1 = new ClientConfiguration().setUseV2WireProtocol(true) + .setNettyUsePooledBuffers(true).setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + + try (BookKeeper bkc = new BookKeeper(confReader1)) { + try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { + assertEquals(numEntries - 1, lh.readLastConfirmed()); + for (Enumeration readEntries = lh.readEntries(0, numEntries - 1); + readEntries.hasMoreElements(); ) { + LedgerEntry entry = readEntries.nextElement(); + assertDoesNotThrow(() -> { + entry.data.release(); + }, "ByteBuf already released"); } - - latch.await(); - bkc.close(); + } } - @Test - public void testCannotUseWriteFlagsOnV2Protocol() throws Exception { - ClientConfiguration conf = new ClientConfiguration(baseClientConf); - conf.setUseV2WireProtocol(true); - try (BookKeeperTestClient bkc = new BookKeeperTestClient(conf)) { - try (WriteHandle wh = result(bkc.newCreateLedgerOp() - .withEnsembleSize(3) - .withWriteQuorumSize(3) - .withAckQuorumSize(2) - .withPassword("".getBytes()) - .withWriteFlags(WriteFlag.DEFERRED_SYNC) - .execute())) { - Assertions.assertThrows(BKException.BKIllegalOpException.class, - () -> result(wh.appendAsync("test".getBytes()))); - } + // v2 protocol, not using pooled buffers + ClientConfiguration confReader2 = + new ClientConfiguration().setUseV2WireProtocol(true).setNettyUsePooledBuffers(false); + confReader2.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + + try (BookKeeper bkc = new BookKeeper(confReader2)) { + try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { + assertEquals(numEntries - 1, lh.readLastConfirmed()); + for (Enumeration readEntries = lh.readEntries(0, numEntries - 1); + readEntries.hasMoreElements(); ) { + LedgerEntry entry = readEntries.nextElement(); + assertDoesNotThrow(() -> { + entry.data.release(); + }, "ByteBuf already released"); } + } } - @Test - public void testCannotUseForceOnV2Protocol() throws Exception { - ClientConfiguration conf = new ClientConfiguration(baseClientConf); - conf.setUseV2WireProtocol(true); - try (BookKeeperTestClient bkc = new BookKeeperTestClient(conf)) { - try (WriteHandle wh = result(bkc.newCreateLedgerOp() - .withEnsembleSize(3) - .withWriteQuorumSize(3) - .withAckQuorumSize(2) - .withPassword("".getBytes()) - .withWriteFlags(WriteFlag.NONE) - .execute())) { - result(wh.appendAsync("".getBytes())); - Assertions.assertThrows(BKException.BKIllegalOpException.class, - () -> result(wh.force())); - } + // v3 protocol, not using pooled buffers + ClientConfiguration confReader3 = new ClientConfiguration().setUseV2WireProtocol(false) + .setNettyUsePooledBuffers(false).setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + try (BookKeeper bkc = new BookKeeper(confReader3)) { + try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { + assertEquals(numEntries - 1, lh.readLastConfirmed()); + for (Enumeration readEntries = lh.readEntries(0, numEntries - 1); + readEntries.hasMoreElements(); ) { + LedgerEntry entry = readEntries.nextElement(); + assertTrue(entry.data.release(), + "Can't release entry " + entry.getEntryId() + ": ref = " + entry.data.refCnt()); + try { + assertFalse(entry.data.release()); + fail("ByteBuf already released"); + } catch (IllegalReferenceCountException ok) { + } } + } } - class MockZooKeeperClient extends ZooKeeperClient { - class MockZooKeeper extends ZooKeeper { - public MockZooKeeper(String connectString, int sessionTimeout, Watcher watcher, boolean canBeReadOnly) - throws IOException { - super(connectString, sessionTimeout, watcher, canBeReadOnly); - } - - @Override - public void create(final String path, byte[] data, List acl, CreateMode createMode, StringCallback cb, - Object ctx) { - StringCallback injectedCallback = new StringCallback() { - @Override - public void processResult(int rc, String path, Object ctx, String name) { - /** - * if ledgerIdToInjectFailure matches with the path of - * the node, then throw CONNECTIONLOSS error and then - * reset it to INVALID_LEDGERID. - */ - if (path.contains(ledgerIdToInjectFailure.toString())) { - ledgerIdToInjectFailure.set(INVALID_LEDGERID); - cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, name); - } else { - cb.processResult(rc, path, ctx, name); - } - } - }; - super.create(path, data, acl, createMode, injectedCallback, ctx); - } + // v3 protocol, using pooled buffers + // v3 protocol from 4.5 always "wraps" buffers returned by protobuf + ClientConfiguration confReader4 = new ClientConfiguration().setUseV2WireProtocol(false) + .setNettyUsePooledBuffers(true).setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + + try (BookKeeper bkc = new BookKeeper(confReader4)) { + try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { + assertEquals(numEntries - 1, lh.readLastConfirmed()); + for (Enumeration readEntries = lh.readEntries(0, numEntries - 1); + readEntries.hasMoreElements(); ) { + LedgerEntry entry = readEntries.nextElement(); + // ButeBufs not reference counter + assertTrue(entry.data.release(), + "Can't release entry " + entry.getEntryId() + ": ref = " + entry.data.refCnt()); + try { + assertFalse(entry.data.release()); + fail("ByteBuf already released"); + } catch (IllegalReferenceCountException ok) { + } } + } + } - private final String connectString; - private final int sessionTimeoutMs; - private final ZooKeeperWatcherBase watcherManager; - private final AtomicLong ledgerIdToInjectFailure; - - MockZooKeeperClient(String connectString, int sessionTimeoutMs, ZooKeeperWatcherBase watcher, - AtomicLong ledgerIdToInjectFailure) throws IOException { - /* - * in OperationalRetryPolicy maxRetries is > 0. So in case of any - * RecoverableException scenario, it will retry. - */ - super(connectString, sessionTimeoutMs, watcher, - new BoundExponentialBackoffRetryPolicy(sessionTimeoutMs, sessionTimeoutMs, Integer.MAX_VALUE), - new BoundExponentialBackoffRetryPolicy(sessionTimeoutMs, sessionTimeoutMs, 3), - NullStatsLogger.INSTANCE, 1, 0, false); - this.connectString = connectString; - this.sessionTimeoutMs = sessionTimeoutMs; - this.watcherManager = watcher; - this.ledgerIdToInjectFailure = ledgerIdToInjectFailure; + // cannot read twice an entry + ClientConfiguration confReader5 = new ClientConfiguration(); + confReader5.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + try (BookKeeper bkc = new BookKeeper(confReader5)) { + try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) { + assertEquals(numEntries - 1, lh.readLastConfirmed()); + for (Enumeration readEntries = lh.readEntries(0, numEntries - 1); + readEntries.hasMoreElements(); ) { + LedgerEntry entry = readEntries.nextElement(); + entry.getEntry(); + try { + entry.getEntry(); + fail("entry data accessed twice"); + } catch (IllegalStateException ok) { + } + try { + entry.getEntryInputStream(); + fail("entry data accessed twice"); + } catch (IllegalStateException ok) { + } } - - @Override - protected ZooKeeper createZooKeeper() throws IOException { - return new MockZooKeeper(this.connectString, this.sessionTimeoutMs, this.watcherManager, false); + } + } + } + + /** + * Tests that issuing multiple reads for the same entry at the same time works as expected. + * + * @throws Exception + */ + @Test + public void testDoubleRead() throws Exception { + LedgerHandle lh = bkc.createLedger(digestType, "".getBytes()); + + lh.addEntry("test".getBytes()); + + // Read the same entry more times asynchronously + final int n = 10; + final CountDownLatch latch = new CountDownLatch(n); + for (int i = 0; i < n; i++) { + lh.asyncReadEntries(0, 0, new ReadCallback() { + public void readComplete(int rc, LedgerHandle lh, Enumeration seq, + Object ctx) { + if (rc == BKException.Code.OK) { + latch.countDown(); + } else { + fail("Read fail"); + } } + }, null); } - @Test - public void testZKConnectionLossForLedgerCreation() throws Exception { - int zkSessionTimeOut = 10000; - AtomicLong ledgerIdToInjectFailure = new AtomicLong(INVALID_LEDGERID); - ZooKeeperWatcherBase zooKeeperWatcherBase = new ZooKeeperWatcherBase(zkSessionTimeOut, false, - NullStatsLogger.INSTANCE); - MockZooKeeperClient zkFaultInjectionWrapper = new MockZooKeeperClient(zkUtil.getZooKeeperConnectString(), - zkSessionTimeOut, zooKeeperWatcherBase, ledgerIdToInjectFailure); - zkFaultInjectionWrapper.waitForConnection(); - assertEquals(States.CONNECTED, zkFaultInjectionWrapper.getState(), - "zkFaultInjectionWrapper should be in connected state"); - BookKeeper bk = new BookKeeper(baseClientConf, zkFaultInjectionWrapper); - long oldZkInstanceSessionId = zkFaultInjectionWrapper.getSessionId(); - long ledgerId = 567L; - LedgerHandle lh = bk.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "".getBytes(), null); - lh.close(); - - /* - * trigger Expired event so that MockZooKeeperClient would run - * 'clientCreator' and create new zk handle. In this case it would - * create MockZooKeeper. - */ - zooKeeperWatcherBase.process(new WatchedEvent(EventType.None, KeeperState.Expired, "")); - zkFaultInjectionWrapper.waitForConnection(); - for (int i = 0; i < 10; i++) { - if (zkFaultInjectionWrapper.getState() == States.CONNECTED) { - break; - } - Thread.sleep(200); + latch.await(); + } + + /** + * Tests that issuing multiple reads for the same entry at the same time works as expected. + * + * @throws Exception + */ + @Test + public void testDoubleReadWithV2Protocol() throws Exception { + ClientConfiguration conf = new ClientConfiguration(baseClientConf); + conf.setUseV2WireProtocol(true); + BookKeeperTestClient bkc = new BookKeeperTestClient(conf); + LedgerHandle lh = bkc.createLedger(digestType, "".getBytes()); + + lh.addEntry("test".getBytes()); + + // Read the same entry more times asynchronously + final int n = 10; + final CountDownLatch latch = new CountDownLatch(n); + for (int i = 0; i < n; i++) { + lh.asyncReadEntries(0, 0, new ReadCallback() { + public void readComplete(int rc, LedgerHandle lh, Enumeration seq, + Object ctx) { + if (rc == BKException.Code.OK) { + latch.countDown(); + } else { + fail("Read fail"); + } } - assertEquals(States.CONNECTED, zkFaultInjectionWrapper.getState(), - "zkFaultInjectionWrapper should be in connected state"); - assertNotEquals(oldZkInstanceSessionId, zkFaultInjectionWrapper.getSessionId(), - "Session Id of old and new ZK instance should be different"); - ledgerId++; - ledgerIdToInjectFailure.set(ledgerId); - /** - * ledgerIdToInjectFailure is set to 'ledgerId', so zookeeper.create - * would return CONNECTIONLOSS error for the first time and when it is - * retried, as expected it would return NODEEXISTS error. - * - * AbstractZkLedgerManager.createLedgerMetadata should deal with this - * scenario appropriately. - */ - lh = bk.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "".getBytes(), null); - lh.close(); - assertEquals(INVALID_LEDGERID, ledgerIdToInjectFailure.get(), - "injectZnodeCreationNoNodeFailure should have been reset it to INVALID_LEDGERID"); - lh = bk.openLedger(ledgerId, DigestType.CRC32, "".getBytes()); - lh.close(); - ledgerId++; - lh = bk.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "".getBytes(), null); - lh.close(); - bk.close(); + }, null); } - @Test - public void testLedgerDeletionIdempotency() throws Exception { - BookKeeper bk = new BookKeeper(baseClientConf); - long ledgerId = 789L; - LedgerHandle lh = bk.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "".getBytes(), null); - lh.close(); - bk.deleteLedger(ledgerId); - bk.deleteLedger(ledgerId); - bk.close(); + latch.await(); + bkc.close(); + } + + @Test + public void testCannotUseWriteFlagsOnV2Protocol() throws Exception { + ClientConfiguration conf = new ClientConfiguration(baseClientConf); + conf.setUseV2WireProtocol(true); + try (BookKeeperTestClient bkc = new BookKeeperTestClient(conf)) { + try (WriteHandle wh = result( + bkc.newCreateLedgerOp().withEnsembleSize(3).withWriteQuorumSize(3) + .withAckQuorumSize(2).withPassword("".getBytes()) + .withWriteFlags(WriteFlag.DEFERRED_SYNC).execute())) { + assertThrows(BKException.BKIllegalOpException.class, + () -> result(wh.appendAsync("test".getBytes()))); + } } - + } + + @Test + public void testCannotUseForceOnV2Protocol() throws Exception { + ClientConfiguration conf = new ClientConfiguration(baseClientConf); + conf.setUseV2WireProtocol(true); + try (BookKeeperTestClient bkc = new BookKeeperTestClient(conf)) { + try (WriteHandle wh = result( + bkc.newCreateLedgerOp().withEnsembleSize(3).withWriteQuorumSize(3) + .withAckQuorumSize(2).withPassword("".getBytes()).withWriteFlags(WriteFlag.NONE) + .execute())) { + result(wh.appendAsync("".getBytes())); + assertThrows(BKException.BKIllegalOpException.class, () -> result(wh.force())); + } + } + } + + @Test + public void testZKConnectionLossForLedgerCreation() throws Exception { + int zkSessionTimeOut = 10000; + AtomicLong ledgerIdToInjectFailure = new AtomicLong(INVALID_LEDGERID); + ZooKeeperWatcherBase zooKeeperWatcherBase = new ZooKeeperWatcherBase(zkSessionTimeOut, true); + MockZooKeeperClient zkFaultInjectionWrapper = new MockZooKeeperClient( + zkUtil.getZooKeeperConnectString(), + zkSessionTimeOut, zooKeeperWatcherBase, ledgerIdToInjectFailure); + zkFaultInjectionWrapper.waitForConnection(); + assertEquals(States.CONNECTED, zkFaultInjectionWrapper.getState(), + "zkFaultInjectionWrapper should be in connected state"); + BookKeeper bk = new BookKeeper(baseClientConf, zkFaultInjectionWrapper); + long oldZkInstanceSessionId = zkFaultInjectionWrapper.getSessionId(); + long ledgerId = 567L; + LedgerHandle lh = bk.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "".getBytes(), null); + lh.close(); + + /* + * trigger Expired event so that MockZooKeeperClient would run + * 'clientCreator' and create new zk handle. In this case it would + * create MockZooKeeper. + */ + zooKeeperWatcherBase.process(new WatchedEvent(EventType.None, KeeperState.Expired, "")); + zkFaultInjectionWrapper.waitForConnection(); + for (int i = 0; i < 10; i++) { + if (zkFaultInjectionWrapper.getState() == States.CONNECTED) { + break; + } + Thread.sleep(200); + } + assertEquals(States.CONNECTED, zkFaultInjectionWrapper.getState(), + "zkFaultInjectionWrapper should be in connected state"); + assertNotEquals(oldZkInstanceSessionId, zkFaultInjectionWrapper.getSessionId(), + "Session Id of old and new ZK instance should be different"); + ledgerId++; + ledgerIdToInjectFailure.set(ledgerId); /** - * Mock of RackawareEnsemblePlacementPolicy. Overrides areAckedBookiesAdheringToPlacementPolicy to only return true - * when ackedBookies consists of writeQuorumSizeToUseForTesting bookies. + * ledgerIdToInjectFailure is set to 'ledgerId', so zookeeper.create would return CONNECTIONLOSS error for the + * first time and when it is retried, as expected it would return NODEEXISTS error. + * + * AbstractZkLedgerManager.createLedgerMetadata should deal with this scenario appropriately. */ - public static class MockRackawareEnsemblePlacementPolicy extends RackawareEnsemblePlacementPolicy { - private int writeQuorumSizeToUseForTesting; - private CountDownLatch conditionFirstInvocationLatch; - - void setWriteQuorumSizeToUseForTesting(int writeQuorumSizeToUseForTesting) { - this.writeQuorumSizeToUseForTesting = writeQuorumSizeToUseForTesting; + lh = bk.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "".getBytes(), null); + lh.close(); + assertEquals(INVALID_LEDGERID, ledgerIdToInjectFailure.get(), + "injectZnodeCreationNoNodeFailure should have been reset it to INVALID_LEDGERID"); + lh = bk.openLedger(ledgerId, DigestType.CRC32, "".getBytes()); + lh.close(); + ledgerId++; + lh = bk.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "".getBytes(), null); + lh.close(); + bk.close(); + } + + @Test + public void testLedgerDeletionIdempotency() throws Exception { + BookKeeper bk = new BookKeeper(baseClientConf); + long ledgerId = 789L; + LedgerHandle lh = bk.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "".getBytes(), null); + lh.close(); + bk.deleteLedger(ledgerId); + bk.deleteLedger(ledgerId); + bk.close(); + } + + /** + * Test to verify that PendingAddOp waits for success condition from + * areAckedBookiesAdheringToPlacementPolicy before returning success to client. Also tests working + * of WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS and WRITE_TIMED_OUT_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS + * counters. + */ + @Test + public void testEnforceMinNumFaultDomainsForWrite() throws Exception { + byte[] data = "foobar".getBytes(); + byte[] password = "testPasswd".getBytes(); + + startNewBookie(); + startNewBookie(); + + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + conf.setEnsemblePlacementPolicy(MockRackawareEnsemblePlacementPolicy.class); + + conf.setAddEntryTimeout(2); + conf.setAddEntryQuorumTimeout(4); + conf.setEnforceMinNumFaultDomainsForWrite(true); + + TestStatsProvider statsProvider = new TestStatsProvider(); + + // Abnormal values for testing to prevent timeouts + BookKeeperTestClient bk = new BookKeeperTestClient(conf, statsProvider); + StatsLogger statsLogger = bk.getStatsLogger(); + + int ensembleSize = 3; + int writeQuorumSize = 3; + int ackQuorumSize = 2; + + CountDownLatch countDownLatch = new CountDownLatch(1); + MockRackawareEnsemblePlacementPolicy currPlacementPolicy = + (MockRackawareEnsemblePlacementPolicy) bk.getPlacementPolicy(); + currPlacementPolicy.setConditionFirstInvocationLatch(countDownLatch); + currPlacementPolicy.setWriteQuorumSizeToUseForTesting(writeQuorumSize); + + BookieId bookieToSleep; + + try (LedgerHandle lh = bk + .createLedger(ensembleSize, writeQuorumSize, ackQuorumSize, digestType, password)) { + CountDownLatch sleepLatchCase1 = new CountDownLatch(1); + CountDownLatch sleepLatchCase2 = new CountDownLatch(1); + + // Put all non ensemble bookies to sleep + LOG.info("Putting all non ensemble bookies to sleep."); + for (BookieId addr : bookieAddresses()) { + try { + if (!lh.getCurrentEnsemble().contains(addr)) { + sleepBookie(addr, sleepLatchCase2); + } + } catch (UnknownHostException ignored) { } + } - void setConditionFirstInvocationLatch(CountDownLatch conditionFirstInvocationLatch) { - this.conditionFirstInvocationLatch = conditionFirstInvocationLatch; + Thread writeToLedger = new Thread(() -> { + try { + LOG.info("Initiating write for entry"); + long entryId = lh.addEntry(data); + LOG.info("Wrote entry with entryId = {}", entryId); + } catch (InterruptedException | BKException ignored) { } + }); - @Override - public boolean areAckedBookiesAdheringToPlacementPolicy(Set ackedBookies, - int writeQuorumSize, - int ackQuorumSize) { - conditionFirstInvocationLatch.countDown(); - return ackedBookies.size() == writeQuorumSizeToUseForTesting; - } - } + bookieToSleep = lh.getCurrentEnsemble().get(0); - /** - * Test to verify that PendingAddOp waits for success condition from areAckedBookiesAdheringToPlacementPolicy - * before returning success to client. Also tests working of WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS and - * WRITE_TIMED_OUT_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS counters. - */ - @Test - public void testEnforceMinNumFaultDomainsForWrite() throws Exception { - byte[] data = "foobar".getBytes(); - byte[] password = "testPasswd".getBytes(); - - startNewBookie(); - startNewBookie(); - - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - conf.setEnsemblePlacementPolicy(MockRackawareEnsemblePlacementPolicy.class); - - conf.setAddEntryTimeout(2); - conf.setAddEntryQuorumTimeout(4); - conf.setEnforceMinNumFaultDomainsForWrite(true); - - TestStatsProvider statsProvider = new TestStatsProvider(); - - // Abnormal values for testing to prevent timeouts - BookKeeperTestClient bk = new BookKeeperTestClient(conf, statsProvider); - StatsLogger statsLogger = bk.getStatsLogger(); - - int ensembleSize = 3; - int writeQuorumSize = 3; - int ackQuorumSize = 2; - - CountDownLatch countDownLatch = new CountDownLatch(1); - MockRackawareEnsemblePlacementPolicy currPlacementPolicy = - (MockRackawareEnsemblePlacementPolicy) bk.getPlacementPolicy(); - currPlacementPolicy.setConditionFirstInvocationLatch(countDownLatch); - currPlacementPolicy.setWriteQuorumSizeToUseForTesting(writeQuorumSize); - - BookieId bookieToSleep; - - try (LedgerHandle lh = bk.createLedger(ensembleSize, writeQuorumSize, ackQuorumSize, digestType, password)) { - CountDownLatch sleepLatchCase1 = new CountDownLatch(1); - CountDownLatch sleepLatchCase2 = new CountDownLatch(1); - - // Put all non ensemble bookies to sleep - LOG.info("Putting all non ensemble bookies to sleep."); - for (BookieId addr : bookieAddresses()) { - try { - if (!lh.getCurrentEnsemble().contains(addr)) { - sleepBookie(addr, sleepLatchCase2); - } - } catch (UnknownHostException ignored) {} - } + LOG.info("Putting picked bookie to sleep"); + sleepBookie(bookieToSleep, sleepLatchCase1); - Thread writeToLedger = new Thread(() -> { - try { - LOG.info("Initiating write for entry"); - long entryId = lh.addEntry(data); - LOG.info("Wrote entry with entryId = {}", entryId); - } catch (InterruptedException | BKException ignored) { - } - }); + assertEquals( + statsLogger.getCounter(WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS).get().longValue(), + 0); - bookieToSleep = lh.getCurrentEnsemble().get(0); + // Trying to write entry + writeToLedger.start(); - LOG.info("Putting picked bookie to sleep"); - sleepBookie(bookieToSleep, sleepLatchCase1); + // Waiting and checking to make sure that write has not succeeded + countDownLatch.await(conf.getAddEntryTimeout(), TimeUnit.SECONDS); + assertEquals(-1, lh.lastAddConfirmed, "Write succeeded but should not have"); - assertEquals(statsLogger - .getCounter(WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS) - .get() - .longValue(), 0); + // Wake the bookie + sleepLatchCase1.countDown(); - // Trying to write entry - writeToLedger.start(); + // Waiting and checking to make sure that write has succeeded + writeToLedger.join(conf.getAddEntryTimeout() * 1000); + assertEquals(0, lh.lastAddConfirmed, "Write did not succeed but should have"); - // Waiting and checking to make sure that write has not succeeded - countDownLatch.await(conf.getAddEntryTimeout(), TimeUnit.SECONDS); - assertEquals(-1, lh.lastAddConfirmed, "Write succeeded but should not have"); + assertEquals( + statsLogger.getCounter(WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS).get().longValue(), + 1); - // Wake the bookie - sleepLatchCase1.countDown(); + // AddEntry thread for second scenario + Thread writeToLedger2 = new Thread(() -> { + try { + LOG.info("Initiating write for entry"); + long entryId = lh.addEntry(data); + LOG.info("Wrote entry with entryId = {}", entryId); + } catch (InterruptedException | BKException ignored) { + } + }); - // Waiting and checking to make sure that write has succeeded - writeToLedger.join(conf.getAddEntryTimeout() * 1000); - assertEquals(0, lh.lastAddConfirmed, "Write did not succeed but should have"); + bookieToSleep = lh.getCurrentEnsemble().get(1); - assertEquals(statsLogger - .getCounter(WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS) - .get() - .longValue(), 1); + LOG.info("Putting picked bookie to sleep"); + sleepBookie(bookieToSleep, sleepLatchCase2); - // AddEntry thread for second scenario - Thread writeToLedger2 = new Thread(() -> { - try { - LOG.info("Initiating write for entry"); - long entryId = lh.addEntry(data); - LOG.info("Wrote entry with entryId = {}", entryId); - } catch (InterruptedException | BKException ignored) { - } - }); + // Trying to write entry + writeToLedger2.start(); - bookieToSleep = lh.getCurrentEnsemble().get(1); + // Waiting and checking to make sure that write has failed + writeToLedger2.join((conf.getAddEntryQuorumTimeout() + 2) * 1000); + assertEquals(0, lh.lastAddConfirmed, "Write succeeded but should not have"); - LOG.info("Putting picked bookie to sleep"); - sleepBookie(bookieToSleep, sleepLatchCase2); + sleepLatchCase2.countDown(); - // Trying to write entry - writeToLedger2.start(); + assertEquals( + statsLogger.getCounter(WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS).get().longValue(), + 2); - // Waiting and checking to make sure that write has failed - writeToLedger2.join((conf.getAddEntryQuorumTimeout() + 2) * 1000); - assertEquals(0, lh.lastAddConfirmed, "Write succeeded but should not have"); + assertEquals( + statsLogger.getCounter(WRITE_TIMED_OUT_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS).get().longValue(), + 1); + } + } - sleepLatchCase2.countDown(); + @Test + public void testBookieAddressResolverPassedToDNSToSwitchMapping() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - assertEquals(statsLogger.getCounter(WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS).get().longValue(), - 2); + StaticDNSResolver tested = new StaticDNSResolver(); + try (BookKeeper bkc = BookKeeper.forConfig(conf).dnsResolver(tested).build()) { + bkc.createLedger(digestType, "testPasswd".getBytes()).close(); + assertSame(bkc.getBookieAddressResolver(), tested.getBookieAddressResolver()); + } + } - assertEquals(statsLogger.getCounter(WRITE_TIMED_OUT_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS).get().longValue(), - 1); - } + /** + * Mock of RackawareEnsemblePlacementPolicy. Overrides areAckedBookiesAdheringToPlacementPolicy to + * only return true when ackedBookies consists of writeQuorumSizeToUseForTesting bookies. + */ + public static class MockRackawareEnsemblePlacementPolicy extends + RackawareEnsemblePlacementPolicy { + + private int writeQuorumSizeToUseForTesting; + private CountDownLatch conditionFirstInvocationLatch; + + void setWriteQuorumSizeToUseForTesting(int writeQuorumSizeToUseForTesting) { + this.writeQuorumSizeToUseForTesting = writeQuorumSizeToUseForTesting; } - @Test - public void testBookieAddressResolverPassedToDNSToSwitchMapping() throws Exception { - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - - StaticDNSResolver tested = new StaticDNSResolver(); - try (BookKeeper bkc = BookKeeper - .forConfig(conf) - .dnsResolver(tested) - .build()) { - bkc.createLedger(digestType, "testPasswd".getBytes()).close(); - assertSame(bkc.getBookieAddressResolver(), tested.getBookieAddressResolver()); - } + void setConditionFirstInvocationLatch(CountDownLatch conditionFirstInvocationLatch) { + this.conditionFirstInvocationLatch = conditionFirstInvocationLatch; } - @Test - public void testBookieWatcher() throws Exception { - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - - StaticDNSResolver tested = new StaticDNSResolver(); - try (BookKeeper bkc = BookKeeper - .forConfig(conf) - .dnsResolver(tested) - .build()) { - final Map bookieInfo = bkc.getBookieInfo(); - - // 1. check all bookies in client cache successfully. - bookieInfo.forEach((bookieId, info) -> { - final CompletableFuture> bookieServiceInfo = bkc.getMetadataClientDriver() - .getRegistrationClient().getBookieServiceInfo(bookieId); - assertTrue(bookieServiceInfo.isDone()); - assertFalse(bookieServiceInfo.isCompletedExceptionally()); - }); - - // 2. add a task to scheduler, blocking zk watch for bookies cache - bkc.getClientCtx().getScheduler().schedule(() -> { - try { - Thread.sleep(Long.MAX_VALUE); - } catch (InterruptedException e) { - e.printStackTrace(); - } - }, 0, TimeUnit.MILLISECONDS); - - // 3. restart one bookie, so the client should update cache by WatchTask - restartBookie(bookieInfo.keySet().iterator().next()); - - // 4. after restart bookie, check again for the client cache - final CompletableFuture> bookieServiceInfo = - bkc.getMetadataClientDriver().getRegistrationClient() - .getBookieServiceInfo(bookieInfo.keySet().iterator().next()); - assertTrue(bookieServiceInfo.isDone()); - // 5. Previously, we used scheduler, and here getting bookie from client cache would fail. - // 6. After this PR, we introduced independent internal thread pool watchTaskScheduler, - // and here it will succeed. - assertFalse(bookieServiceInfo.isCompletedExceptionally()); - } + @Override + public boolean areAckedBookiesAdheringToPlacementPolicy(Set ackedBookies, + int writeQuorumSize, + int ackQuorumSize) { + conditionFirstInvocationLatch.countDown(); + return ackedBookies.size() == writeQuorumSizeToUseForTesting; + } + } + + class MockZooKeeperClient extends ZooKeeperClient { + + private final String connectString; + + private final int sessionTimeoutMs; + + private final ZooKeeperWatcherBase watcherManager; + + private final AtomicLong ledgerIdToInjectFailure; + + MockZooKeeperClient(String connectString, int sessionTimeoutMs, ZooKeeperWatcherBase watcher, + AtomicLong ledgerIdToInjectFailure) throws IOException { + /* + * in OperationalRetryPolicy maxRetries is > 0. So in case of any + * RecoverableException scenario, it will retry. + */ + super(connectString, sessionTimeoutMs, watcher, + new BoundExponentialBackoffRetryPolicy(sessionTimeoutMs, sessionTimeoutMs, + Integer.MAX_VALUE), + new BoundExponentialBackoffRetryPolicy(sessionTimeoutMs, sessionTimeoutMs, 3), + NullStatsLogger.INSTANCE, + 1, 0, false); + this.connectString = connectString; + this.sessionTimeoutMs = sessionTimeoutMs; + this.watcherManager = watcher; + this.ledgerIdToInjectFailure = ledgerIdToInjectFailure; + } + + @Override + protected ZooKeeper createZooKeeper() throws IOException { + return new MockZooKeeper(this.connectString, this.sessionTimeoutMs, this.watcherManager, + false); + } + + class MockZooKeeper extends ZooKeeper { + + public MockZooKeeper(String connectString, int sessionTimeout, Watcher watcher, + boolean canBeReadOnly) + throws IOException { + super(connectString, sessionTimeout, watcher, canBeReadOnly); + } + + @Override + public void create(final String path, byte[] data, List acl, CreateMode createMode, + StringCallback cb, + Object ctx) { + StringCallback injectedCallback = new StringCallback() { + @Override + public void processResult(int rc, String path, Object ctx, String name) { + /** + * if ledgerIdToInjectFailure matches with the path of the node, then throw CONNECTIONLOSS error + * and then reset it to INVALID_LEDGERID. + */ + if (path.contains(ledgerIdToInjectFailure.toString())) { + ledgerIdToInjectFailure.set(INVALID_LEDGERID); + cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, name); + } else { + cb.processResult(rc, path, ctx, name); + } + } + }; + super.create(path, data, acl, createMode, injectedCallback, ctx); + } } + } } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperTestClient.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperTestClient.java index d9917c4ec96..5ec0533b74d 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperTestClient.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperTestClient.java @@ -36,109 +36,106 @@ import org.apache.zookeeper.ZooKeeper; /** - * Test BookKeeperClient which allows access to members we don't - * wish to expose in the public API. + * Test BookKeeperClient which allows access to members we don't wish to expose in the public API. */ @Slf4j public class BookKeeperTestClient extends BookKeeper { - TestStatsProvider statsProvider; - - public BookKeeperTestClient(ClientConfiguration conf, TestStatsProvider statsProvider) - throws IOException, InterruptedException, BKException { - super(conf, null, null, new UnpooledByteBufAllocator(false), - statsProvider == null ? NullStatsLogger.INSTANCE : statsProvider.getStatsLogger(""), - null, null, null); - this.statsProvider = statsProvider; - } - - public BookKeeperTestClient(ClientConfiguration conf, ZooKeeper zkc) - throws IOException, InterruptedException, BKException { - super(conf, zkc, null, new UnpooledByteBufAllocator(false), - NullStatsLogger.INSTANCE, null, null, null); - } - - public BookKeeperTestClient(ClientConfiguration conf) - throws InterruptedException, BKException, IOException { - this(conf, (TestStatsProvider) null); - } - - public ZooKeeper getZkHandle() { - return ((ZKMetadataClientDriver) metadataDriver).getZk(); - } - - public ClientConfiguration getConf() { - return super.getConf(); - } - - public BookieClient getBookieClient() { - return bookieClient; - } - - public Future waitForReadOnlyBookie(BookieId b) - throws Exception { - return waitForBookieInSet(b, false); - } - - public Future waitForWritableBookie(BookieId b) - throws Exception { - return waitForBookieInSet(b, true); - } - /** - * Wait for bookie to appear in either the writable set of bookies, - * or the read only set of bookies. Also ensure that it doesn't exist - * in the other set before completing. - */ - private Future waitForBookieInSet(BookieId b, - boolean writable) throws Exception { - log.info("Wait for {} to become {}", - b, writable ? "writable" : "readonly"); - - CompletableFuture readOnlyFuture = new CompletableFuture<>(); - CompletableFuture writableFuture = new CompletableFuture<>(); - - RegistrationListener readOnlyListener = (bookies) -> { - boolean contains = bookies.getValue().contains(b); - if ((!writable && contains) || (writable && !contains)) { - readOnlyFuture.complete(null); + TestStatsProvider statsProvider; + + public BookKeeperTestClient(ClientConfiguration conf, TestStatsProvider statsProvider) + throws IOException, InterruptedException, BKException { + super(conf, null, null, new UnpooledByteBufAllocator(false), + statsProvider == null ? NullStatsLogger.INSTANCE : statsProvider.getStatsLogger(""), null, + null, null); + this.statsProvider = statsProvider; + } + + public BookKeeperTestClient(ClientConfiguration conf, ZooKeeper zkc) + throws IOException, InterruptedException, BKException { + super(conf, zkc, null, new UnpooledByteBufAllocator(false), NullStatsLogger.INSTANCE, null, + null, null); + } + + public BookKeeperTestClient(ClientConfiguration conf) + throws InterruptedException, BKException, IOException { + this(conf, (TestStatsProvider) null); + } + + public ZooKeeper getZkHandle() { + return ((ZKMetadataClientDriver) metadataDriver).getZk(); + } + + public ClientConfiguration getConf() { + return super.getConf(); + } + + public BookieClient getBookieClient() { + return bookieClient; + } + + public Future waitForReadOnlyBookie(BookieId b) throws Exception { + return waitForBookieInSet(b, false); + } + + public Future waitForWritableBookie(BookieId b) throws Exception { + return waitForBookieInSet(b, true); + } + + /** + * Wait for bookie to appear in either the writable set of bookies, or the read only set of + * bookies. Also ensure that it doesn't exist in the other set before completing. + */ + private Future waitForBookieInSet(BookieId b, boolean writable) throws Exception { + log.info("Wait for {} to become {}", b, writable ? "writable" : "readonly"); + + CompletableFuture readOnlyFuture = new CompletableFuture<>(); + CompletableFuture writableFuture = new CompletableFuture<>(); + + RegistrationListener readOnlyListener = (bookies) -> { + boolean contains = bookies.getValue().contains(b); + if ((!writable && contains) || (writable && !contains)) { + readOnlyFuture.complete(null); + } + }; + RegistrationListener writableListener = (bookies) -> { + boolean contains = bookies.getValue().contains(b); + if ((writable && contains) || (!writable && !contains)) { + writableFuture.complete(null); + } + }; + + getMetadataClientDriver().getRegistrationClient().watchWritableBookies(writableListener); + getMetadataClientDriver().getRegistrationClient().watchReadOnlyBookies(readOnlyListener); + + if (writable) { + return writableFuture + .thenCompose( + ignored -> getMetadataClientDriver().getRegistrationClient().getReadOnlyBookies()) + .thenCompose(readonlyBookies -> { + if (readonlyBookies.getValue().contains(b)) { + // if the bookie still shows up at readonly path, wait for it to disappear + return readOnlyFuture; + } else { + return FutureUtils.Void(); } - }; - RegistrationListener writableListener = (bookies) -> { - boolean contains = bookies.getValue().contains(b); - if ((writable && contains) || (!writable && !contains)) { - writableFuture.complete(null); + }); + } else { + return readOnlyFuture + .thenCompose( + ignored -> getMetadataClientDriver().getRegistrationClient().getWritableBookies()) + .thenCompose(writableBookies -> { + if (writableBookies.getValue().contains(b)) { + // if the bookie still shows up at writable path, wait for it to disappear + return writableFuture; + } else { + return FutureUtils.Void(); } - }; - - getMetadataClientDriver().getRegistrationClient().watchWritableBookies(writableListener); - getMetadataClientDriver().getRegistrationClient().watchReadOnlyBookies(readOnlyListener); - - if (writable) { - return writableFuture - .thenCompose(ignored -> getMetadataClientDriver().getRegistrationClient().getReadOnlyBookies()) - .thenCompose(readonlyBookies -> { - if (readonlyBookies.getValue().contains(b)) { - // if the bookie still shows up at readonly path, wait for it to disappear - return readOnlyFuture; - } else { - return FutureUtils.Void(); - } - }); - } else { - return readOnlyFuture - .thenCompose(ignored -> getMetadataClientDriver().getRegistrationClient().getWritableBookies()) - .thenCompose(writableBookies -> { - if (writableBookies.getValue().contains(b)) { - // if the bookie still shows up at writable path, wait for it to disappear - return writableFuture; - } else { - return FutureUtils.Void(); - } - }); - } + }); } + } - public TestStatsProvider getTestStatsProvider() { - return statsProvider; - } + public TestStatsProvider getTestStatsProvider() { + return statsProvider; + } } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookieWriteLedgerTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookieWriteLedgerTest.java index ea0d1b56b49..2337dfaf403 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookieWriteLedgerTest.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookieWriteLedgerTest.java @@ -24,11 +24,12 @@ import static org.apache.bookkeeper.client.BookKeeperClientStats.ADD_OP_UR; import static org.apache.bookkeeper.client.BookKeeperClientStats.CLIENT_SCOPE; import static org.apache.bookkeeper.client.BookKeeperClientStats.READ_OP_DM; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import com.google.common.collect.Lists; import io.netty.buffer.AbstractByteBufAllocator; @@ -79,1549 +80,1614 @@ import org.apache.bookkeeper.util.BookKeeperConstants; import org.apache.commons.lang3.tuple.Pair; import org.awaitility.Awaitility; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Testing ledger write entry cases. */ -@RunWith(Parameterized.class) public class BookieWriteLedgerTest extends BookKeeperClusterTestCase implements AddCallback { - private static final Logger LOG = LoggerFactory - .getLogger(BookieWriteLedgerTest.class); + private static final Logger LOG = LoggerFactory + .getLogger(BookieWriteLedgerTest.class); - @Parameterized.Parameters - public static Collection data() { - return Arrays.asList(new Object[][] { - { true, true }, { true, false }, { false, true }, { false, false } - }); - } + public static Collection data() { + return Arrays.asList(new Object[][]{ + {true, true}, {true, false}, {false, true}, {false, false} + }); + } - @Parameterized.Parameter(0) - public boolean useV2; + public boolean useV2; + public boolean writeJournal; - @Parameterized.Parameter(1) - public boolean writeJournal; + byte[] ledgerPassword = "aaa".getBytes(); + LedgerHandle lh, lh2; + Enumeration ls; - byte[] ledgerPassword = "aaa".getBytes(); - LedgerHandle lh, lh2; - Enumeration ls; + // test related variables + int numEntriesToWrite = 100; + int maxInt = Integer.MAX_VALUE; + Random rng; // Random Number Generator + ArrayList entries1; // generated entries + ArrayList entries2; // generated entries - // test related variables - int numEntriesToWrite = 100; - int maxInt = Integer.MAX_VALUE; - Random rng; // Random Number Generator - ArrayList entries1; // generated entries - ArrayList entries2; // generated entries + private final DigestType digestType; - private final DigestType digestType; + private static class SyncObj { - private static class SyncObj { - volatile int counter; - volatile int rc; + volatile int counter; + volatile int rc; - public SyncObj() { - counter = 0; - } + public SyncObj() { + counter = 0; } - - @Override - @Before - public void setUp() throws Exception { - baseConf.setJournalWriteData(writeJournal); - baseClientConf.setUseV2WireProtocol(useV2); - - super.setUp(); - rng = new Random(0); // Initialize the Random - // Number Generator - entries1 = new ArrayList(); // initialize the entries list - entries2 = new ArrayList(); // initialize the entries list - } - - public BookieWriteLedgerTest() { - super(5, 180); - this.digestType = DigestType.CRC32; - String ledgerManagerFactory = "org.apache.bookkeeper.meta.HierarchicalLedgerManagerFactory"; - // set ledger manager - baseConf.setLedgerManagerFactoryClassName(ledgerManagerFactory); - /* - * 'testLedgerCreateAdvWithLedgerIdInLoop2' testcase relies on skipListSizeLimit, - * so setting it to some small value for making that testcase lite. - */ - baseConf.setSkipListSizeLimit(4 * 1024 * 1024); - baseClientConf.setLedgerManagerFactoryClassName(ledgerManagerFactory); - } - - /** - * Verify write when few bookie failures in last ensemble and forcing - * ensemble reformation. + } + + @BeforeEach + public void setUp() throws Exception { + baseConf.setJournalWriteData(writeJournal); + baseClientConf.setUseV2WireProtocol(useV2); + + super.setUp(); + rng = new Random(0); // Initialize the Random + // Number Generator + entries1 = new ArrayList(); // initialize the entries list + entries2 = new ArrayList(); // initialize the entries list + } + + public BookieWriteLedgerTest() { + super(5, 180); + this.digestType = DigestType.CRC32; + String ledgerManagerFactory = "org.apache.bookkeeper.meta.HierarchicalLedgerManagerFactory"; + // set ledger manager + baseConf.setLedgerManagerFactoryClassName(ledgerManagerFactory); + /* + * 'testLedgerCreateAdvWithLedgerIdInLoop2' testcase relies on skipListSizeLimit, + * so setting it to some small value for making that testcase lite. */ - @Test - public void testWithMultipleBookieFailuresInLastEnsemble() throws Exception { - // Create a ledger - lh = bkc.createLedger(5, 4, digestType, ledgerPassword); - LOG.info("Ledger ID: " + lh.getId()); - for (int i = 0; i < numEntriesToWrite; i++) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - - entries1.add(entry.array()); - lh.addEntry(entry.array()); - } - // Start three more bookies - startNewBookie(); - startNewBookie(); - startNewBookie(); - - // Shutdown three bookies in the last ensemble and continue writing - List ensemble = lh.getLedgerMetadata() - .getAllEnsembles().entrySet().iterator().next().getValue(); - killBookie(ensemble.get(0)); - killBookie(ensemble.get(1)); - killBookie(ensemble.get(2)); - - int i = numEntriesToWrite; - numEntriesToWrite = numEntriesToWrite + 50; - for (; i < numEntriesToWrite; i++) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - - entries1.add(entry.array()); - lh.addEntry(entry.array()); - } - readEntries(lh, entries1); - lh.close(); + baseConf.setSkipListSizeLimit(4 * 1024 * 1024); + baseClientConf.setLedgerManagerFactoryClassName(ledgerManagerFactory); + } + + /** + * Verify write when few bookie failures in last ensemble and forcing ensemble reformation. + */ + @MethodSource("data") + @ParameterizedTest + public void withMultipleBookieFailuresInLastEnsemble(boolean useV2, boolean writeJournal) + throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + // Create a ledger + lh = bkc.createLedger(5, 4, digestType, ledgerPassword); + LOG.info("Ledger ID: " + lh.getId()); + for (int i = 0; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + + entries1.add(entry.array()); + lh.addEntry(entry.array()); } + // Start three more bookies + startNewBookie(); + startNewBookie(); + startNewBookie(); + + // Shutdown three bookies in the last ensemble and continue writing + List ensemble = lh.getLedgerMetadata() + .getAllEnsembles().entrySet().iterator().next().getValue(); + killBookie(ensemble.get(0)); + killBookie(ensemble.get(1)); + killBookie(ensemble.get(2)); + + int i = numEntriesToWrite; + numEntriesToWrite = numEntriesToWrite + 50; + for (; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + + entries1.add(entry.array()); + lh.addEntry(entry.array()); + } + readEntries(lh, entries1); + lh.close(); + } + + /** + * Verify write and Read durability stats. + */ + @MethodSource("data") + @ParameterizedTest + public void writeAndReadStats(boolean useV2, boolean writeJournal) throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + // Create a ledger + lh = bkc.createLedger(3, 3, 2, digestType, ledgerPassword); + + // write-batch-1 + for (int i = 0; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + + entries1.add(entry.array()); + lh.addEntry(entry.array()); + } + assertTrue( + bkc.getTestStatsProvider().getOpStatsLogger( + CLIENT_SCOPE + "." + ADD_OP) + .getSuccessCount() > 0, + "Stats should have captured a new writes"); - /** - * Verify write and Read durability stats. - */ - @Test - public void testWriteAndReadStats() throws Exception { - // Create a ledger - lh = bkc.createLedger(3, 3, 2, digestType, ledgerPassword); - - // write-batch-1 - for (int i = 0; i < numEntriesToWrite; i++) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - - entries1.add(entry.array()); - lh.addEntry(entry.array()); - } - assertTrue( - "Stats should have captured a new writes", - bkc.getTestStatsProvider().getOpStatsLogger( - CLIENT_SCOPE + "." + ADD_OP) - .getSuccessCount() > 0); - - CountDownLatch sleepLatch1 = new CountDownLatch(1); - CountDownLatch sleepLatch2 = new CountDownLatch(1); - List ensemble = lh.getLedgerMetadata() - .getAllEnsembles().entrySet().iterator().next().getValue(); - - sleepBookie(ensemble.get(0), sleepLatch1); + CountDownLatch sleepLatch1 = new CountDownLatch(1); + CountDownLatch sleepLatch2 = new CountDownLatch(1); + List ensemble = lh.getLedgerMetadata() + .getAllEnsembles().entrySet().iterator().next().getValue(); - int i = numEntriesToWrite; - numEntriesToWrite = numEntriesToWrite + 50; + sleepBookie(ensemble.get(0), sleepLatch1); - // write-batch-2 + int i = numEntriesToWrite; + numEntriesToWrite = numEntriesToWrite + 50; - for (; i < numEntriesToWrite; i++) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); + // write-batch-2 - entries1.add(entry.array()); - lh.addEntry(entry.array()); - } + for (; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); - // Let the second bookie go to sleep. This forces write timeout and ensemble change - // Which will be enough time to receive delayed write failures on the write-batch-2 + entries1.add(entry.array()); + lh.addEntry(entry.array()); + } - sleepBookie(ensemble.get(1), sleepLatch2); - i = numEntriesToWrite; - numEntriesToWrite = numEntriesToWrite + 50; + // Let the second bookie go to sleep. This forces write timeout and ensemble change + // Which will be enough time to receive delayed write failures on the write-batch-2 - // write-batch-3 + sleepBookie(ensemble.get(1), sleepLatch2); + i = numEntriesToWrite; + numEntriesToWrite = numEntriesToWrite + 50; - for (; i < numEntriesToWrite; i++) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); + // write-batch-3 - entries1.add(entry.array()); - lh.addEntry(entry.array()); - } + for (; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); - assertTrue( - "Stats should have captured a new UnderReplication during write", - bkc.getTestStatsProvider().getCounter( - CLIENT_SCOPE + "." + ADD_OP_UR) - .get() > 0); + entries1.add(entry.array()); + lh.addEntry(entry.array()); + } - sleepLatch1.countDown(); - sleepLatch2.countDown(); + assertTrue( + bkc.getTestStatsProvider().getCounter( + CLIENT_SCOPE + "." + ADD_OP_UR) + .get() > 0, + "Stats should have captured a new UnderReplication during write"); - // Replace the bookie with a fake bookie - ServerConfiguration conf = killBookie(ensemble.get(0)); - BookieWriteLedgerTest.CorruptReadBookie corruptBookie = new BookieWriteLedgerTest.CorruptReadBookie(conf); - startAndAddBookie(conf, corruptBookie); + sleepLatch1.countDown(); + sleepLatch2.countDown(); - i = numEntriesToWrite; - numEntriesToWrite = numEntriesToWrite + 50; + // Replace the bookie with a fake bookie + ServerConfiguration conf = killBookie(ensemble.get(0)); + BookieWriteLedgerTest.CorruptReadBookie corruptBookie = new BookieWriteLedgerTest.CorruptReadBookie( + conf); + startAndAddBookie(conf, corruptBookie); - // write-batch-4 + i = numEntriesToWrite; + numEntriesToWrite = numEntriesToWrite + 50; - for (; i < numEntriesToWrite; i++) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); + // write-batch-4 - entries1.add(entry.array()); - lh.addEntry(entry.array()); - } + for (; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); - readEntries(lh, entries1); - assertTrue( - "Stats should have captured DigestMismatch on Read", - bkc.getTestStatsProvider().getCounter( - CLIENT_SCOPE + "." + READ_OP_DM) - .get() > 0); - lh.close(); + entries1.add(entry.array()); + lh.addEntry(entry.array()); } - /** - * Verty delayedWriteError causes ensemble changes. - */ - @Test - public void testDelayedWriteEnsembleChange() throws Exception { - // Create a ledger - lh = bkc.createLedger(3, 3, 2, digestType, ledgerPassword); - baseClientConf.setAddEntryTimeout(1); - - int numEntriesToWrite = 10; - // write-batch-1 - for (int i = 0; i < numEntriesToWrite; i++) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - - entries1.add(entry.array()); - lh.addEntry(entry.array()); - } - CountDownLatch sleepLatch1 = new CountDownLatch(1); - - // get bookie at index-0 - BookieId bookie1 = lh.getCurrentEnsemble().get(0); - sleepBookie(bookie1, sleepLatch1); + readEntries(lh, entries1); + assertTrue( + bkc.getTestStatsProvider().getCounter( + CLIENT_SCOPE + "." + READ_OP_DM) + .get() > 0, + "Stats should have captured DigestMismatch on Read"); + lh.close(); + } + + /** + * Verty delayedWriteError causes ensemble changes. + */ + @MethodSource("data") + @ParameterizedTest + public void delayedWriteEnsembleChange(boolean useV2, boolean writeJournal) throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + // Create a ledger + lh = bkc.createLedger(3, 3, 2, digestType, ledgerPassword); + baseClientConf.setAddEntryTimeout(1); + + int numEntriesToWrite = 10; + // write-batch-1 + for (int i = 0; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + + entries1.add(entry.array()); + lh.addEntry(entry.array()); + } - int i = numEntriesToWrite; - numEntriesToWrite = numEntriesToWrite + 10; + CountDownLatch sleepLatch1 = new CountDownLatch(1); - // write-batch-2 + // get bookie at index-0 + BookieId bookie1 = lh.getCurrentEnsemble().get(0); + sleepBookie(bookie1, sleepLatch1); - for (; i < numEntriesToWrite; i++) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); + int i = numEntriesToWrite; + numEntriesToWrite = numEntriesToWrite + 10; - entries1.add(entry.array()); - lh.addEntry(entry.array()); - } - // Sleep to receive delayed error on the write directed to the sleeping bookie - Thread.sleep(baseClientConf.getAddEntryTimeout() * 1000 * 2); - assertTrue( - "Stats should have captured a new UnderReplication during write", - bkc.getTestStatsProvider().getCounter( - CLIENT_SCOPE + "." + ADD_OP_UR) - .get() > 0); - - i = numEntriesToWrite; - numEntriesToWrite = numEntriesToWrite + 10; - - // write-batch-3 - for (; i < numEntriesToWrite; i++) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - - entries1.add(entry.array()); - lh.addEntry(entry.array()); - } - - sleepLatch1.countDown(); - // get the bookie at index-0 again, this must be different. - BookieId bookie2 = lh.getCurrentEnsemble().get(0); - - assertFalse( - "Delayed write error must have forced ensemble change", - bookie1.equals(bookie2)); - lh.close(); - } - /** - * Verify the functionality Ledgers with different digests. - * - * @throws Exception - */ - @Test - public void testLedgerDigestTest() throws Exception { - for (DigestType type: DigestType.values()) { - lh = bkc.createLedger(5, 3, 2, type, ledgerPassword); - - for (int i = 0; i < numEntriesToWrite; i++) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - - entries1.add(entry.array()); - lh.addEntry(entry.array()); - } - - readEntries(lh, entries1); - - long lid = lh.getId(); - lh.close(); - bkc.deleteLedger(lid); - entries1.clear(); - } - } + // write-batch-2 - /** - * Verify the functionality of Advanced Ledger which returns - * LedgerHandleAdv. LedgerHandleAdv takes entryId for addEntry, and let - * user manage entryId allocation. - * - * @throws Exception - */ - @Test - public void testLedgerCreateAdv() throws Exception { - // Create a ledger - lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); - for (int i = 0; i < numEntriesToWrite; i++) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - - entries1.add(entry.array()); - lh.addEntry(i, entry.array()); - } - // Start one more bookies - startNewBookie(); - - // Shutdown one bookie in the last ensemble and continue writing - List ensemble = lh.getLedgerMetadata().getAllEnsembles().entrySet().iterator().next() - .getValue(); - killBookie(ensemble.get(0)); - - int i = numEntriesToWrite; - numEntriesToWrite = numEntriesToWrite + 50; - for (; i < numEntriesToWrite; i++) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - - entries1.add(entry.array()); - lh.addEntry(i, entry.array()); - } + for (; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); - readEntries(lh, entries1); - lh.close(); + entries1.add(entry.array()); + lh.addEntry(entry.array()); } - - /** - * Verify that attempts to use addEntry() variant that does not require specifying entry id - * on LedgerHandleAdv results in error. - * - * @throws Exception - */ - @Test - public void testLedgerCreateAdvAndWriteNonAdv() throws Exception { - long ledgerId = 0xABCDEF; - lh = bkc.createLedgerAdv(ledgerId, 3, 3, 2, digestType, ledgerPassword, null); - - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - - try { - lh.addEntry(entry.array()); - fail("expected IllegalOpException"); - } catch (BKException.BKIllegalOpException e) { - // pass, expected - } finally { - lh.close(); - bkc.deleteLedger(ledgerId); - } + // Sleep to receive delayed error on the write directed to the sleeping bookie + Thread.sleep(baseClientConf.getAddEntryTimeout() * 1000 * 2); + assertTrue( + bkc.getTestStatsProvider().getCounter( + CLIENT_SCOPE + "." + ADD_OP_UR) + .get() > 0, + "Stats should have captured a new UnderReplication during write"); + + i = numEntriesToWrite; + numEntriesToWrite = numEntriesToWrite + 10; + + // write-batch-3 + for (; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + + entries1.add(entry.array()); + lh.addEntry(entry.array()); } - /** - * Verify that LedgerHandleAdv cannot handle addEntry without the entryId. - * - * @throws Exception - */ - @Test - public void testNoAddEntryLedgerCreateAdv() throws Exception { - + sleepLatch1.countDown(); + // get the bookie at index-0 again, this must be different. + BookieId bookie2 = lh.getCurrentEnsemble().get(0); + + assertNotEquals(bookie1, bookie2, "Delayed write error must have forced ensemble change"); + lh.close(); + } + + /** + * Verify the functionality Ledgers with different digests. + * + * @throws Exception + */ + @MethodSource("data") + @ParameterizedTest + public void ledgerDigestTest(boolean useV2, boolean writeJournal) throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + for (DigestType type : DigestType.values()) { + lh = bkc.createLedger(5, 3, 2, type, ledgerPassword); + + for (int i = 0; i < numEntriesToWrite; i++) { ByteBuffer entry = ByteBuffer.allocate(4); entry.putInt(rng.nextInt(maxInt)); entry.position(0); - lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); - assertTrue(lh instanceof LedgerHandleAdv); - - try { - lh.addEntry(entry.array()); - fail("using LedgerHandleAdv addEntry without entryId is forbidden"); - } catch (BKException e) { - assertEquals(e.getCode(), BKException.Code.IllegalOpException); - } - - try { - lh.addEntry(entry.array(), 0, 4); - fail("using LedgerHandleAdv addEntry without entryId is forbidden"); - } catch (BKException e) { - assertEquals(e.getCode(), BKException.Code.IllegalOpException); - } - - try { - CompletableFuture done = new CompletableFuture<>(); - lh.asyncAddEntry(Unpooled.wrappedBuffer(entry.array()), - (int rc, LedgerHandle lh1, long entryId, Object ctx) -> { - SyncCallbackUtils.finish(rc, null, done); - }, null); - done.get(); - } catch (ExecutionException ee) { - assertTrue(ee.getCause() instanceof BKException); - BKException e = (BKException) ee.getCause(); - assertEquals(e.getCode(), BKException.Code.IllegalOpException); - } + entries1.add(entry.array()); + lh.addEntry(entry.array()); + } - try { - CompletableFuture done = new CompletableFuture<>(); - lh.asyncAddEntry(entry.array(), - (int rc, LedgerHandle lh1, long entryId, Object ctx) -> { - SyncCallbackUtils.finish(rc, null, done); - }, null); - done.get(); - } catch (ExecutionException ee) { - assertTrue(ee.getCause() instanceof BKException); - BKException e = (BKException) ee.getCause(); - assertEquals(e.getCode(), BKException.Code.IllegalOpException); - } + readEntries(lh, entries1); - try { - CompletableFuture done = new CompletableFuture<>(); - lh.asyncAddEntry(entry.array(), 0, 4, - (int rc, LedgerHandle lh1, long entryId, Object ctx) -> { - SyncCallbackUtils.finish(rc, null, done); - }, null); - done.get(); - } catch (ExecutionException ee) { - assertTrue(ee.getCause() instanceof BKException); - BKException e = (BKException) ee.getCause(); - assertEquals(e.getCode(), BKException.Code.IllegalOpException); - } - lh.close(); + long lid = lh.getId(); + lh.close(); + bkc.deleteLedger(lid); + entries1.clear(); } - - /** - * Verify the functionality of Advanced Ledger which accepts ledgerId as input and returns - * LedgerHandleAdv. LedgerHandleAdv takes entryId for addEntry, and let - * user manage entryId allocation. - * - * @throws Exception - */ - @Test - public void testLedgerCreateAdvWithLedgerId() throws Exception { - // Create a ledger - long ledgerId = 0xABCDEF; - lh = bkc.createLedgerAdv(ledgerId, 5, 3, 2, digestType, ledgerPassword, null); - for (int i = 0; i < numEntriesToWrite; i++) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - - entries1.add(entry.array()); - lh.addEntry(i, entry.array()); - } - // Start one more bookies - startNewBookie(); - - // Shutdown one bookie in the last ensemble and continue writing - List ensemble = lh.getLedgerMetadata().getAllEnsembles().entrySet().iterator().next() - .getValue(); - killBookie(ensemble.get(0)); - - int i = numEntriesToWrite; - numEntriesToWrite = numEntriesToWrite + 50; - for (; i < numEntriesToWrite; i++) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - - entries1.add(entry.array()); - lh.addEntry(i, entry.array()); - } - - readEntries(lh, entries1); - lh.close(); - bkc.deleteLedger(ledgerId); + } + + /** + * Verify the functionality of Advanced Ledger which returns LedgerHandleAdv. LedgerHandleAdv + * takes entryId for addEntry, and let user manage entryId allocation. + * + * @throws Exception + */ + @MethodSource("data") + @ParameterizedTest + public void ledgerCreateAdv(boolean useV2, boolean writeJournal) throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + // Create a ledger + lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); + for (int i = 0; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + + entries1.add(entry.array()); + lh.addEntry(i, entry.array()); + } + // Start one more bookies + startNewBookie(); + + // Shutdown one bookie in the last ensemble and continue writing + List ensemble = lh.getLedgerMetadata().getAllEnsembles().entrySet().iterator().next() + .getValue(); + killBookie(ensemble.get(0)); + + int i = numEntriesToWrite; + numEntriesToWrite = numEntriesToWrite + 50; + for (; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + + entries1.add(entry.array()); + lh.addEntry(i, entry.array()); } - /** - * Verify the functionality of Ledger create which accepts customMetadata as input. - * Also verifies that the data written is read back properly. - * - * @throws Exception - */ - @Test - public void testLedgerCreateWithCustomMetadata() throws Exception { - // Create a ledger - long ledgerId; - int maxLedgers = 10; - for (int i = 0; i < maxLedgers; i++) { - Map inputCustomMetadataMap = new HashMap(); - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - - // each ledger has different number of key, value pairs. - for (int j = 0; j < i; j++) { - inputCustomMetadataMap.put("key" + j, UUID.randomUUID().toString().getBytes()); - } - - if (i < maxLedgers / 2) { - // 0 to 4 test with createLedger interface - lh = bkc.createLedger(5, 3, 2, digestType, ledgerPassword, inputCustomMetadataMap); - ledgerId = lh.getId(); - lh.addEntry(entry.array()); - } else { - // 5 to 9 test with createLedgerAdv interface - lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword, inputCustomMetadataMap); - ledgerId = lh.getId(); - lh.addEntry(0, entry.array()); - } - lh.close(); - - // now reopen the ledger; this should fetch all the metadata stored on zk - // and the customMetadata written and read should match - lh = bkc.openLedger(ledgerId, digestType, ledgerPassword); - Map outputCustomMetadataMap = lh.getCustomMetadata(); - assertTrue("Can't retrieve proper Custom Data", - areByteArrayValMapsEqual(inputCustomMetadataMap, outputCustomMetadataMap)); - lh.close(); - bkc.deleteLedger(ledgerId); - } + readEntries(lh, entries1); + lh.close(); + } + + /** + * Verify that attempts to use addEntry() variant that does not require specifying entry id on + * LedgerHandleAdv results in error. + * + * @throws Exception + */ + @MethodSource("data") + @ParameterizedTest + public void ledgerCreateAdvAndWriteNonAdv(boolean useV2, boolean writeJournal) throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + long ledgerId = 0xABCDEF; + lh = bkc.createLedgerAdv(ledgerId, 3, 3, 2, digestType, ledgerPassword, null); + + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + + try { + lh.addEntry(entry.array()); + fail("expected IllegalOpException"); + } catch (BKException.BKIllegalOpException e) { + // pass, expected + } finally { + lh.close(); + bkc.deleteLedger(ledgerId); + } + } + + /** + * Verify that LedgerHandleAdv cannot handle addEntry without the entryId. + * + * @throws Exception + */ + @MethodSource("data") + @ParameterizedTest + public void noAddEntryLedgerCreateAdv(boolean useV2, boolean writeJournal) throws Exception { + + initBookieWriteLedgerTest(useV2, writeJournal); + + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + + lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); + assertTrue(lh instanceof LedgerHandleAdv); + + try { + lh.addEntry(entry.array()); + fail("using LedgerHandleAdv addEntry without entryId is forbidden"); + } catch (BKException e) { + assertEquals(BKException.Code.IllegalOpException, e.getCode()); } - /** - * Routine to compare two {@code Map}; Since the values in the map are {@code byte[]}, we can't use - * {@code Map.equals}. - * @param first - * The first map - * @param second - * The second map to compare with - * @return true if the 2 maps contain the exact set of {@code } pairs. - */ - public static boolean areByteArrayValMapsEqual(Map first, Map second) { - if (first == null && second == null) { - return true; - } + try { + lh.addEntry(entry.array(), 0, 4); + fail("using LedgerHandleAdv addEntry without entryId is forbidden"); + } catch (BKException e) { + assertEquals(BKException.Code.IllegalOpException, e.getCode()); + } - // above check confirms that both are not null; - // if one is null the other isn't; so they must - // be different - if (first == null || second == null) { - return false; - } + try { + CompletableFuture done = new CompletableFuture<>(); + lh.asyncAddEntry(Unpooled.wrappedBuffer(entry.array()), + (int rc, LedgerHandle lh1, long entryId, Object ctx) -> { + SyncCallbackUtils.finish(rc, null, done); + }, null); + done.get(); + } catch (ExecutionException ee) { + assertTrue(ee.getCause() instanceof BKException); + BKException e = (BKException) ee.getCause(); + assertEquals(BKException.Code.IllegalOpException, e.getCode()); + } - if (first.size() != second.size()) { - return false; - } - for (Map.Entry entry : first.entrySet()) { - if (!Arrays.equals(entry.getValue(), second.get(entry.getKey()))) { - return false; - } - } - return true; + try { + CompletableFuture done = new CompletableFuture<>(); + lh.asyncAddEntry(entry.array(), + (int rc, LedgerHandle lh1, long entryId, Object ctx) -> { + SyncCallbackUtils.finish(rc, null, done); + }, null); + done.get(); + } catch (ExecutionException ee) { + assertTrue(ee.getCause() instanceof BKException); + BKException e = (BKException) ee.getCause(); + assertEquals(BKException.Code.IllegalOpException, e.getCode()); } - /* - * Verify the functionality of Advanced Ledger which accepts ledgerId as - * input and returns LedgerHandleAdv. LedgerHandleAdv takes entryId for - * addEntry, and let user manage entryId allocation. - * This testcase is mainly added for covering missing code coverage branches - * in LedgerHandleAdv - * - * @throws Exception - */ - @Test - public void testLedgerHandleAdvFunctionality() throws Exception { - // Create a ledger - long ledgerId = 0xABCDEF; - lh = bkc.createLedgerAdv(ledgerId, 5, 3, 2, digestType, ledgerPassword, null); - numEntriesToWrite = 3; + try { + CompletableFuture done = new CompletableFuture<>(); + lh.asyncAddEntry(entry.array(), 0, 4, + (int rc, LedgerHandle lh1, long entryId, Object ctx) -> { + SyncCallbackUtils.finish(rc, null, done); + }, null); + done.get(); + } catch (ExecutionException ee) { + assertTrue(ee.getCause() instanceof BKException); + BKException e = (BKException) ee.getCause(); + assertEquals(BKException.Code.IllegalOpException, e.getCode()); + } + lh.close(); + } + + /** + * Verify the functionality of Advanced Ledger which accepts ledgerId as input and returns + * LedgerHandleAdv. LedgerHandleAdv takes entryId for addEntry, and let user manage entryId + * allocation. + * + * @throws Exception + */ + @MethodSource("data") + @ParameterizedTest + public void ledgerCreateAdvWithLedgerId(boolean useV2, boolean writeJournal) throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + // Create a ledger + long ledgerId = 0xABCDEF; + lh = bkc.createLedgerAdv(ledgerId, 5, 3, 2, digestType, ledgerPassword, null); + for (int i = 0; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + + entries1.add(entry.array()); + lh.addEntry(i, entry.array()); + } + // Start one more bookies + startNewBookie(); + + // Shutdown one bookie in the last ensemble and continue writing + List ensemble = lh.getLedgerMetadata().getAllEnsembles().entrySet().iterator().next() + .getValue(); + killBookie(ensemble.get(0)); + + int i = numEntriesToWrite; + numEntriesToWrite = numEntriesToWrite + 50; + for (; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + + entries1.add(entry.array()); + lh.addEntry(i, entry.array()); + } - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - entries1.add(entry.array()); + readEntries(lh, entries1); + lh.close(); + bkc.deleteLedger(ledgerId); + } + + /** + * Verify the functionality of Ledger create which accepts customMetadata as input. Also verifies + * that the data written is read back properly. + * + * @throws Exception + */ + @MethodSource("data") + @ParameterizedTest + public void ledgerCreateWithCustomMetadata(boolean useV2, boolean writeJournal) throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + // Create a ledger + long ledgerId; + int maxLedgers = 10; + for (int i = 0; i < maxLedgers; i++) { + Map inputCustomMetadataMap = new HashMap(); + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + + // each ledger has different number of key, value pairs. + for (int j = 0; j < i; j++) { + inputCustomMetadataMap.put("key" + j, UUID.randomUUID().toString().getBytes()); + } + + if (i < maxLedgers / 2) { + // 0 to 4 test with createLedger interface + lh = bkc.createLedger(5, 3, 2, digestType, ledgerPassword, inputCustomMetadataMap); + ledgerId = lh.getId(); + lh.addEntry(entry.array()); + } else { + // 5 to 9 test with createLedgerAdv interface + lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword, inputCustomMetadataMap); + ledgerId = lh.getId(); lh.addEntry(0, entry.array()); + } + lh.close(); + + // now reopen the ledger; this should fetch all the metadata stored on zk + // and the customMetadata written and read should match + lh = bkc.openLedger(ledgerId, digestType, ledgerPassword); + Map outputCustomMetadataMap = lh.getCustomMetadata(); + assertTrue(areByteArrayValMapsEqual(inputCustomMetadataMap, outputCustomMetadataMap), + "Can't retrieve proper Custom Data"); + lh.close(); + bkc.deleteLedger(ledgerId); + } + } + + /** + * Routine to compare two {@code Map}; Since the values in the map are {@code + * byte[]}, we can't use {@code Map.equals}. + * + * @param first The first map + * @param second The second map to compare with + * @return true if the 2 maps contain the exact set of {@code } pairs. + */ + public static boolean areByteArrayValMapsEqual(Map first, + Map second) { + if (first == null && second == null) { + return true; + } - // here asyncAddEntry(final long entryId, final byte[] data, final - // AddCallback cb, final Object ctx) method is - // called which is not covered in any other testcase - entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - entries1.add(entry.array()); - CountDownLatch latch = new CountDownLatch(1); - final int[] returnedRC = new int[1]; - lh.asyncAddEntry(1, entry.array(), new AddCallback() { - @Override - public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) { - CountDownLatch latch = (CountDownLatch) ctx; - returnedRC[0] = rc; - latch.countDown(); - } - }, latch); - latch.await(); - assertTrue("Returned code is expected to be OK", returnedRC[0] == BKException.Code.OK); - - // here addEntry is called with incorrect offset and length - entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - try { - lh.addEntry(2, entry.array(), -3, 9); - fail("AddEntry is called with negative offset and incorrect length," - + "so it is expected to throw RuntimeException/IndexOutOfBoundsException"); - } catch (RuntimeException exception) { - // expected RuntimeException/IndexOutOfBoundsException - } - - // here addEntry is called with corrected offset and length and it is - // supposed to succeed - entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - entries1.add(entry.array()); - lh.addEntry(2, entry.array()); - - // LedgerHandle is closed for write - lh.close(); + // above check confirms that both are not null; + // if one is null the other isn't; so they must + // be different + if (first == null || second == null) { + return false; + } - // here addEntry is called even after the close of the LedgerHandle, so - // it is expected to throw exception - entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - entries1.add(entry.array()); - try { - lh.addEntry(3, entry.array()); - fail("AddEntry is called after the close of LedgerHandle," - + "so it is expected to throw BKLedgerClosedException"); - } catch (BKLedgerClosedException exception) { - } + if (first.size() != second.size()) { + return false; + } + for (Map.Entry entry : first.entrySet()) { + if (!Arrays.equals(entry.getValue(), second.get(entry.getKey()))) { + return false; + } + } + return true; + } + + /* + * Verify the functionality of Advanced Ledger which accepts ledgerId as + * input and returns LedgerHandleAdv. LedgerHandleAdv takes entryId for + * addEntry, and let user manage entryId allocation. + * This testcase is mainly added for covering missing code coverage branches + * in LedgerHandleAdv + * + * @throws Exception + */ + @MethodSource("data") + @ParameterizedTest + public void ledgerHandleAdvFunctionality(boolean useV2, boolean writeJournal) throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + // Create a ledger + long ledgerId = 0xABCDEF; + lh = bkc.createLedgerAdv(ledgerId, 5, 3, 2, digestType, ledgerPassword, null); + numEntriesToWrite = 3; + + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + entries1.add(entry.array()); + lh.addEntry(0, entry.array()); + + // here asyncAddEntry(final long entryId, final byte[] data, final + // AddCallback cb, final Object ctx) method is + // called which is not covered in any other testcase + entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + entries1.add(entry.array()); + CountDownLatch latch = new CountDownLatch(1); + final int[] returnedRC = new int[1]; + lh.asyncAddEntry(1, entry.array(), new AddCallback() { + @Override + public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) { + CountDownLatch latch = (CountDownLatch) ctx; + returnedRC[0] = rc; + latch.countDown(); + } + }, latch); + latch.await(); + assertEquals(BKException.Code.OK, returnedRC[0], "Returned code is expected to be OK"); + + // here addEntry is called with incorrect offset and length + entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + try { + lh.addEntry(2, entry.array(), -3, 9); + fail("AddEntry is called with negative offset and incorrect length," + + "so it is expected to throw RuntimeException/IndexOutOfBoundsException"); + } catch (RuntimeException exception) { + // expected RuntimeException/IndexOutOfBoundsException + } - readEntries(lh, entries1); - bkc.deleteLedger(ledgerId); + // here addEntry is called with corrected offset and length and it is + // supposed to succeed + entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + entries1.add(entry.array()); + lh.addEntry(2, entry.array()); + + // LedgerHandle is closed for write + lh.close(); + + // here addEntry is called even after the close of the LedgerHandle, so + // it is expected to throw exception + entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + entries1.add(entry.array()); + try { + lh.addEntry(3, entry.array()); + fail("AddEntry is called after the close of LedgerHandle," + + "so it is expected to throw BKLedgerClosedException"); + } catch (BKLedgerClosedException exception) { } - /** - * In a loop create/write/delete the ledger with same ledgerId through - * the functionality of Advanced Ledger which accepts ledgerId as input. - * - * @throws Exception - */ - @Test - public void testLedgerCreateAdvWithLedgerIdInLoop() throws Exception { - int ledgerCount = 40; - - long maxId = 9999999999L; - if (baseConf.getLedgerManagerFactoryClass().equals(LongHierarchicalLedgerManagerFactory.class)) { - // since LongHierarchicalLedgerManager supports ledgerIds of decimal length upto 19 digits but other - // LedgerManagers only upto 10 decimals - maxId = Long.MAX_VALUE; - } + readEntries(lh, entries1); + bkc.deleteLedger(ledgerId); + } + + /** + * In a loop create/write/delete the ledger with same ledgerId through the functionality of + * Advanced Ledger which accepts ledgerId as input. + * + * @throws Exception + */ + @MethodSource("data") + @ParameterizedTest + public void ledgerCreateAdvWithLedgerIdInLoop(boolean useV2, boolean writeJournal) + throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + int ledgerCount = 40; + + long maxId = 9999999999L; + if (baseConf.getLedgerManagerFactoryClass() + .equals(LongHierarchicalLedgerManagerFactory.class)) { + // since LongHierarchicalLedgerManager supports ledgerIds of decimal length upto 19 digits but other + // LedgerManagers only upto 10 decimals + maxId = Long.MAX_VALUE; + } - rng.longs(ledgerCount, 0, maxId) // generate a stream of ledger ids - .mapToObj(ledgerId -> { // create a ledger for each ledger id - LOG.info("Creating adv ledger with id {}", ledgerId); - return bkc.newCreateLedgerOp() - .withEnsembleSize(1).withWriteQuorumSize(1).withAckQuorumSize(1) - .withDigestType(org.apache.bookkeeper.client.api.DigestType.CRC32) - .withPassword(ledgerPassword).makeAdv().withLedgerId(ledgerId) - .execute() - .thenCompose(writer -> { // Add entries to ledger when created - LOG.info("Writing stream of {} entries to {}", - numEntriesToWrite, ledgerId); - List entries = rng.ints(numEntriesToWrite, 0, maxInt) - .mapToObj(i -> { - ByteBuf entry = Unpooled.buffer(4); - entry.retain(); - entry.writeInt(i); - return entry; - }) - .collect(Collectors.toList()); - CompletableFuture lastRequest = null; - int i = 0; - for (ByteBuf entry : entries) { - long entryId = i++; - LOG.info("Writing {}:{} as {}", - ledgerId, entryId, entry.slice().readInt()); - lastRequest = writer.writeAsync(entryId, entry); - } - return lastRequest - .thenApply(___ -> Pair.of(writer, entries)); - }); - }) - .parallel().map(CompletableFuture::join) // wait for all creations and adds in parallel - .forEach(e -> { // check that each set of adds succeeded - try { - WriteAdvHandle handle = e.getLeft(); - List entries = e.getRight(); - // Read and verify - LOG.info("Read entries for ledger: {}", handle.getId()); - readEntries(handle, entries); - entries.forEach(ByteBuf::release); - handle.close(); - bkc.deleteLedger(handle.getId()); - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - Assert.fail("Test interrupted"); - } catch (Exception ex) { - LOG.info("Readback failed with exception", ex); - Assert.fail("Readback failed " + ex.getMessage()); - } - }); - } - - /** - * In a loop create/write/read/delete the ledger with ledgerId through the - * functionality of Advanced Ledger which accepts ledgerId as input. - * In this testcase (other testcases don't cover these conditions, hence new - * testcase is added), we create entries which are greater than - * SKIP_LIST_MAX_ALLOC_ENTRY size and tried to addEntries so that the total - * length of data written in this testcase is much greater than - * SKIP_LIST_SIZE_LIMIT, so that entries will be flushed from EntryMemTable - * to persistent storage - * - * @throws Exception - */ - @Test - public void testLedgerCreateAdvWithLedgerIdInLoop2() throws Exception { - - assertTrue("Here we are expecting Bookies are configured to use SortedLedgerStorage", - baseConf.getSortedLedgerStorageEnabled()); - - long ledgerId; - int ledgerCount = 10; - - List> entryList = new ArrayList>(); - LedgerHandle[] lhArray = new LedgerHandle[ledgerCount]; - long skipListSizeLimit = baseConf.getSkipListSizeLimit(); - int skipListArenaMaxAllocSize = baseConf.getSkipListArenaMaxAllocSize(); - - List tmpEntry; - for (int lc = 0; lc < ledgerCount; lc++) { - tmpEntry = new ArrayList(); - - ledgerId = rng.nextLong(); - ledgerId &= Long.MAX_VALUE; - if (!baseConf.getLedgerManagerFactoryClass().equals(LongHierarchicalLedgerManagerFactory.class)) { - // since LongHierarchicalLedgerManager supports ledgerIds of - // decimal length upto 19 digits but other - // LedgerManagers only upto 10 decimals - ledgerId %= 9999999999L; - } - - if (LOG.isDebugEnabled()) { - LOG.debug("Iteration: {} LedgerId: {}", lc, ledgerId); - } - lh = bkc.createLedgerAdv(ledgerId, 5, 3, 2, digestType, ledgerPassword, null); - lhArray[lc] = lh; - - long ledgerLength = 0; - int i = 0; - while (ledgerLength < ((4 * skipListSizeLimit) / ledgerCount)) { - int length; - if (rng.nextBoolean()) { - length = Math.abs(rng.nextInt()) % (skipListArenaMaxAllocSize); - } else { - // here we want length to be random no. in the range of skipListArenaMaxAllocSize and - // 4*skipListArenaMaxAllocSize - length = (Math.abs(rng.nextInt()) % (skipListArenaMaxAllocSize * 3)) + skipListArenaMaxAllocSize; + rng.longs(ledgerCount, 0, maxId) // generate a stream of ledger ids + .mapToObj(ledgerId -> { // create a ledger for each ledger id + LOG.info("Creating adv ledger with id {}", ledgerId); + return bkc.newCreateLedgerOp() + .withEnsembleSize(1).withWriteQuorumSize(1).withAckQuorumSize(1) + .withDigestType(org.apache.bookkeeper.client.api.DigestType.CRC32) + .withPassword(ledgerPassword).makeAdv().withLedgerId(ledgerId) + .execute() + .thenCompose(writer -> { // Add entries to ledger when created + LOG.info("Writing stream of {} entries to {}", + numEntriesToWrite, ledgerId); + List entries = rng.ints(numEntriesToWrite, 0, maxInt) + .mapToObj(i -> { + ByteBuf entry = Unpooled.buffer(4); + entry.retain(); + entry.writeInt(i); + return entry; + }) + .collect(Collectors.toList()); + CompletableFuture lastRequest = null; + int i = 0; + for (ByteBuf entry : entries) { + long entryId = i++; + LOG.info("Writing {}:{} as {}", + ledgerId, entryId, entry.slice().readInt()); + lastRequest = writer.writeAsync(entryId, entry); } - byte[] data = new byte[length]; - rng.nextBytes(data); - tmpEntry.add(data); - lh.addEntry(i, data); - ledgerLength += length; - i++; - } - entryList.add(tmpEntry); - } - for (int lc = 0; lc < ledgerCount; lc++) { + return lastRequest + .thenApply(___ -> Pair.of(writer, entries)); + }); + }) + .parallel().map(CompletableFuture::join) // wait for all creations and adds in parallel + .forEach(e -> { // check that each set of adds succeeded + try { + WriteAdvHandle handle = e.getLeft(); + List entries = e.getRight(); // Read and verify - long lid = lhArray[lc].getId(); - if (LOG.isDebugEnabled()) { - LOG.debug("readEntries for lc: {} ledgerId: {} ", lc, lhArray[lc].getId()); - } - readEntriesAndValidateDataArray(lhArray[lc], entryList.get(lc)); - lhArray[lc].close(); - bkc.deleteLedger(lid); - } + LOG.info("Read entries for ledger: {}", handle.getId()); + readEntries(handle, entries); + entries.forEach(ByteBuf::release); + handle.close(); + bkc.deleteLedger(handle.getId()); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + fail("Test interrupted"); + } catch (Exception ex) { + LOG.info("Readback failed with exception", ex); + fail("Readback failed " + ex.getMessage()); + } + }); + } + + /** + * In a loop create/write/read/delete the ledger with ledgerId through the functionality of + * Advanced Ledger which accepts ledgerId as input. In this testcase (other testcases don't cover + * these conditions, hence new testcase is added), we create entries which are greater than + * SKIP_LIST_MAX_ALLOC_ENTRY size and tried to addEntries so that the total length of data written + * in this testcase is much greater than SKIP_LIST_SIZE_LIMIT, so that entries will be flushed + * from EntryMemTable to persistent storage + * + * @throws Exception + */ + @MethodSource("data") + @ParameterizedTest + public void ledgerCreateAdvWithLedgerIdInLoop2(boolean useV2, boolean writeJournal) + throws Exception { + + initBookieWriteLedgerTest(useV2, writeJournal); + + assertTrue(baseConf.getSortedLedgerStorageEnabled(), + "Here we are expecting Bookies are configured to use SortedLedgerStorage"); + + long ledgerId; + int ledgerCount = 10; + + List> entryList = new ArrayList>(); + LedgerHandle[] lhArray = new LedgerHandle[ledgerCount]; + long skipListSizeLimit = baseConf.getSkipListSizeLimit(); + int skipListArenaMaxAllocSize = baseConf.getSkipListArenaMaxAllocSize(); + + List tmpEntry; + for (int lc = 0; lc < ledgerCount; lc++) { + tmpEntry = new ArrayList(); + + ledgerId = rng.nextLong(); + ledgerId &= Long.MAX_VALUE; + if (!baseConf.getLedgerManagerFactoryClass() + .equals(LongHierarchicalLedgerManagerFactory.class)) { + // since LongHierarchicalLedgerManager supports ledgerIds of + // decimal length upto 19 digits but other + // LedgerManagers only upto 10 decimals + ledgerId %= 9999999999L; + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Iteration: {} LedgerId: {}", lc, ledgerId); + } + lh = bkc.createLedgerAdv(ledgerId, 5, 3, 2, digestType, ledgerPassword, null); + lhArray[lc] = lh; + + long ledgerLength = 0; + int i = 0; + while (ledgerLength < ((4 * skipListSizeLimit) / ledgerCount)) { + int length; + if (rng.nextBoolean()) { + length = Math.abs(rng.nextInt()) % (skipListArenaMaxAllocSize); + } else { + // here we want length to be random no. in the range of skipListArenaMaxAllocSize and + // 4*skipListArenaMaxAllocSize + length = (Math.abs(rng.nextInt()) % (skipListArenaMaxAllocSize * 3)) + + skipListArenaMaxAllocSize; + } + byte[] data = new byte[length]; + rng.nextBytes(data); + tmpEntry.add(data); + lh.addEntry(i, data); + ledgerLength += length; + i++; + } + entryList.add(tmpEntry); } - - /** - * Verify asynchronous writing when few bookie failures in last ensemble. - */ - @Test - public void testAsyncWritesWithMultipleFailuresInLastEnsemble() - throws Exception { - // Create ledgers - lh = bkc.createLedger(5, 4, digestType, ledgerPassword); - lh2 = bkc.createLedger(5, 4, digestType, ledgerPassword); - - LOG.info("Ledger ID-1: " + lh.getId()); - LOG.info("Ledger ID-2: " + lh2.getId()); - for (int i = 0; i < numEntriesToWrite; i++) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - - entries1.add(entry.array()); - entries2.add(entry.array()); - lh.addEntry(entry.array()); - lh2.addEntry(entry.array()); - } - // Start three more bookies - startNewBookie(); - startNewBookie(); - startNewBookie(); - - // Shutdown three bookies in the last ensemble and continue writing - List ensemble = lh.getLedgerMetadata() - .getAllEnsembles().entrySet().iterator().next().getValue(); - killBookie(ensemble.get(0)); - killBookie(ensemble.get(1)); - killBookie(ensemble.get(2)); - - // adding one more entry to both the ledgers async after multiple bookie - // failures. This will do asynchronously modifying the ledger metadata - // simultaneously. - numEntriesToWrite++; - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - entries1.add(entry.array()); - entries2.add(entry.array()); - - SyncObj syncObj1 = new SyncObj(); - SyncObj syncObj2 = new SyncObj(); - lh.asyncAddEntry(entry.array(), this, syncObj1); - lh2.asyncAddEntry(entry.array(), this, syncObj2); - - // wait for all entries to be acknowledged for the first ledger - synchronized (syncObj1) { - while (syncObj1.counter < 1) { - if (LOG.isDebugEnabled()) { - LOG.debug("Entries counter = " + syncObj1.counter); - } - syncObj1.wait(); - } - assertEquals(BKException.Code.OK, syncObj1.rc); - } - // wait for all entries to be acknowledged for the second ledger - synchronized (syncObj2) { - while (syncObj2.counter < 1) { - if (LOG.isDebugEnabled()) { - LOG.debug("Entries counter = " + syncObj2.counter); - } - syncObj2.wait(); - } - assertEquals(BKException.Code.OK, syncObj2.rc); - } - - // reading ledger till the last entry - readEntries(lh, entries1); - readEntries(lh2, entries2); - lh.close(); - lh2.close(); + for (int lc = 0; lc < ledgerCount; lc++) { + // Read and verify + long lid = lhArray[lc].getId(); + if (LOG.isDebugEnabled()) { + LOG.debug("readEntries for lc: {} ledgerId: {} ", lc, lhArray[lc].getId()); + } + readEntriesAndValidateDataArray(lhArray[lc], entryList.get(lc)); + lhArray[lc].close(); + bkc.deleteLedger(lid); + } + } + + /** + * Verify asynchronous writing when few bookie failures in last ensemble. + */ + @MethodSource("data") + @ParameterizedTest + public void asyncWritesWithMultipleFailuresInLastEnsemble(boolean useV2, boolean writeJournal) + throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + // Create ledgers + lh = bkc.createLedger(5, 4, digestType, ledgerPassword); + lh2 = bkc.createLedger(5, 4, digestType, ledgerPassword); + + LOG.info("Ledger ID-1: " + lh.getId()); + LOG.info("Ledger ID-2: " + lh2.getId()); + for (int i = 0; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + + entries1.add(entry.array()); + entries2.add(entry.array()); + lh.addEntry(entry.array()); + lh2.addEntry(entry.array()); + } + // Start three more bookies + startNewBookie(); + startNewBookie(); + startNewBookie(); + + // Shutdown three bookies in the last ensemble and continue writing + List ensemble = lh.getLedgerMetadata() + .getAllEnsembles().entrySet().iterator().next().getValue(); + killBookie(ensemble.get(0)); + killBookie(ensemble.get(1)); + killBookie(ensemble.get(2)); + + // adding one more entry to both the ledgers async after multiple bookie + // failures. This will do asynchronously modifying the ledger metadata + // simultaneously. + numEntriesToWrite++; + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + entries1.add(entry.array()); + entries2.add(entry.array()); + + SyncObj syncObj1 = new SyncObj(); + SyncObj syncObj2 = new SyncObj(); + lh.asyncAddEntry(entry.array(), this, syncObj1); + lh2.asyncAddEntry(entry.array(), this, syncObj2); + + // wait for all entries to be acknowledged for the first ledger + synchronized (syncObj1) { + while (syncObj1.counter < 1) { + if (LOG.isDebugEnabled()) { + LOG.debug("Entries counter = " + syncObj1.counter); + } + syncObj1.wait(); + } + assertEquals(BKException.Code.OK, syncObj1.rc); + } + // wait for all entries to be acknowledged for the second ledger + synchronized (syncObj2) { + while (syncObj2.counter < 1) { + if (LOG.isDebugEnabled()) { + LOG.debug("Entries counter = " + syncObj2.counter); + } + syncObj2.wait(); + } + assertEquals(BKException.Code.OK, syncObj2.rc); } - /** - * Verify Advanced asynchronous writing with entryIds in reverse order. - */ - @Test - public void testLedgerCreateAdvWithAsyncWritesWithBookieFailures() throws Exception { - // Create ledgers - lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); - lh2 = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); - - LOG.info("Ledger ID-1: " + lh.getId()); - LOG.info("Ledger ID-2: " + lh2.getId()); - SyncObj syncObj1 = new SyncObj(); - SyncObj syncObj2 = new SyncObj(); - for (int i = numEntriesToWrite - 1; i >= 0; i--) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - try { - entries1.add(0, entry.array()); - entries2.add(0, entry.array()); - } catch (Exception e) { - e.printStackTrace(); - } - lh.asyncAddEntry(i, entry.array(), 0, entry.capacity(), this, syncObj1); - lh2.asyncAddEntry(i, entry.array(), 0, entry.capacity(), this, syncObj2); - } - // Start One more bookie and shutdown one from last ensemble before reading - startNewBookie(); - List ensemble = lh.getLedgerMetadata().getAllEnsembles().entrySet().iterator().next() - .getValue(); - killBookie(ensemble.get(0)); - - // Wait for all entries to be acknowledged for the first ledger - synchronized (syncObj1) { - while (syncObj1.counter < numEntriesToWrite) { - syncObj1.wait(); - } - assertEquals(BKException.Code.OK, syncObj1.rc); - } - // Wait for all entries to be acknowledged for the second ledger - synchronized (syncObj2) { - while (syncObj2.counter < numEntriesToWrite) { - syncObj2.wait(); - } - assertEquals(BKException.Code.OK, syncObj2.rc); - } - - // Reading ledger till the last entry - readEntries(lh, entries1); - readEntries(lh2, entries2); - lh.close(); - lh2.close(); + // reading ledger till the last entry + readEntries(lh, entries1); + readEntries(lh2, entries2); + lh.close(); + lh2.close(); + } + + /** + * Verify Advanced asynchronous writing with entryIds in reverse order. + */ + @MethodSource("data") + @ParameterizedTest + public void ledgerCreateAdvWithAsyncWritesWithBookieFailures(boolean useV2, boolean writeJournal) + throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + // Create ledgers + lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); + lh2 = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); + + LOG.info("Ledger ID-1: " + lh.getId()); + LOG.info("Ledger ID-2: " + lh2.getId()); + SyncObj syncObj1 = new SyncObj(); + SyncObj syncObj2 = new SyncObj(); + for (int i = numEntriesToWrite - 1; i >= 0; i--) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + try { + entries1.add(0, entry.array()); + entries2.add(0, entry.array()); + } catch (Exception e) { + e.printStackTrace(); + } + lh.asyncAddEntry(i, entry.array(), 0, entry.capacity(), this, syncObj1); + lh2.asyncAddEntry(i, entry.array(), 0, entry.capacity(), this, syncObj2); + } + // Start One more bookie and shutdown one from last ensemble before reading + startNewBookie(); + List ensemble = lh.getLedgerMetadata().getAllEnsembles().entrySet().iterator().next() + .getValue(); + killBookie(ensemble.get(0)); + + // Wait for all entries to be acknowledged for the first ledger + synchronized (syncObj1) { + while (syncObj1.counter < numEntriesToWrite) { + syncObj1.wait(); + } + assertEquals(BKException.Code.OK, syncObj1.rc); + } + // Wait for all entries to be acknowledged for the second ledger + synchronized (syncObj2) { + while (syncObj2.counter < numEntriesToWrite) { + syncObj2.wait(); + } + assertEquals(BKException.Code.OK, syncObj2.rc); } - /** - * LedgerHandleAdv out of order writers with ensemble changes. - * Verify that entry that was written to old ensemble will be - * written to new enseble too after ensemble change. - * - * @throws Exception - */ - @Test - public void testLedgerHandleAdvOutOfOrderWriteAndFrocedEnsembleChange() throws Exception { - // Create a ledger - long ledgerId = 0xABCDEF; - SyncObj syncObj1 = new SyncObj(); - ByteBuffer entry; - lh = bkc.createLedgerAdv(ledgerId, 3, 3, 3, digestType, ledgerPassword, null); - entry = ByteBuffer.allocate(4); - // Add entries 0-4 - for (int i = 0; i < 5; i++) { - entry.rewind(); - entry.putInt(rng.nextInt(maxInt)); - lh.addEntry(i, entry.array()); - } + // Reading ledger till the last entry + readEntries(lh, entries1); + readEntries(lh2, entries2); + lh.close(); + lh2.close(); + } + + /** + * LedgerHandleAdv out of order writers with ensemble changes. Verify that entry that was written + * to old ensemble will be written to new enseble too after ensemble change. + * + * @throws Exception + */ + @MethodSource("data") + @ParameterizedTest + public void ledgerHandleAdvOutOfOrderWriteAndFrocedEnsembleChange(boolean useV2, + boolean writeJournal) throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + // Create a ledger + long ledgerId = 0xABCDEF; + SyncObj syncObj1 = new SyncObj(); + ByteBuffer entry; + lh = bkc.createLedgerAdv(ledgerId, 3, 3, 3, digestType, ledgerPassword, null); + entry = ByteBuffer.allocate(4); + // Add entries 0-4 + for (int i = 0; i < 5; i++) { + entry.rewind(); + entry.putInt(rng.nextInt(maxInt)); + lh.addEntry(i, entry.array()); + } - // Add 10 as Async Entry, which goes to first ensemble - ByteBuffer entry1 = ByteBuffer.allocate(4); - entry1.putInt(rng.nextInt(maxInt)); - lh.asyncAddEntry(10, entry1.array(), 0, entry1.capacity(), this, syncObj1); - - // Make sure entry-10 goes to the bookies and gets response. - java.util.Queue myPendingAddOps = lh.getPendingAddOps(); - PendingAddOp addOp = null; - boolean pendingAddOpReceived = false; - - while (!pendingAddOpReceived) { - addOp = myPendingAddOps.peek(); - if (addOp.entryId == 10 && addOp.completed) { - pendingAddOpReceived = true; - } else { - Thread.sleep(1000); - } - } + // Add 10 as Async Entry, which goes to first ensemble + ByteBuffer entry1 = ByteBuffer.allocate(4); + entry1.putInt(rng.nextInt(maxInt)); + lh.asyncAddEntry(10, entry1.array(), 0, entry1.capacity(), this, syncObj1); + + // Make sure entry-10 goes to the bookies and gets response. + java.util.Queue myPendingAddOps = lh.getPendingAddOps(); + PendingAddOp addOp = null; + boolean pendingAddOpReceived = false; + + while (!pendingAddOpReceived) { + addOp = myPendingAddOps.peek(); + if (addOp.entryId == 10 && addOp.completed) { + pendingAddOpReceived = true; + } else { + Thread.sleep(1000); + } + } - CountDownLatch sleepLatch1 = new CountDownLatch(1); - List ensemble; - - ensemble = lh.getLedgerMetadata().getAllEnsembles().entrySet().iterator().next().getValue(); - - // Put all 3 bookies to sleep and start 3 new ones - sleepBookie(ensemble.get(0), sleepLatch1); - sleepBookie(ensemble.get(1), sleepLatch1); - sleepBookie(ensemble.get(2), sleepLatch1); - startNewBookie(); - startNewBookie(); - startNewBookie(); - - // Original bookies are in sleep, new bookies added. - // Now add entries 5-9 which forces ensemble changes - // So at this point entries 0-4, 10 went to first - // ensemble, 5-9 will go to new ensemble. - for (int i = 5; i < 10; i++) { - entry.rewind(); - entry.putInt(rng.nextInt(maxInt)); - lh.addEntry(i, entry.array()); - } + CountDownLatch sleepLatch1 = new CountDownLatch(1); + List ensemble; + + ensemble = lh.getLedgerMetadata().getAllEnsembles().entrySet().iterator().next().getValue(); + + // Put all 3 bookies to sleep and start 3 new ones + sleepBookie(ensemble.get(0), sleepLatch1); + sleepBookie(ensemble.get(1), sleepLatch1); + sleepBookie(ensemble.get(2), sleepLatch1); + startNewBookie(); + startNewBookie(); + startNewBookie(); + + // Original bookies are in sleep, new bookies added. + // Now add entries 5-9 which forces ensemble changes + // So at this point entries 0-4, 10 went to first + // ensemble, 5-9 will go to new ensemble. + for (int i = 5; i < 10; i++) { + entry.rewind(); + entry.putInt(rng.nextInt(maxInt)); + lh.addEntry(i, entry.array()); + } - // Wakeup all 3 bookies that went to sleep - sleepLatch1.countDown(); + // Wakeup all 3 bookies that went to sleep + sleepLatch1.countDown(); - // Wait for all entries to be acknowledged for the first ledger - synchronized (syncObj1) { - while (syncObj1.counter < 1) { - syncObj1.wait(); - } - assertEquals(BKException.Code.OK, syncObj1.rc); - } + // Wait for all entries to be acknowledged for the first ledger + synchronized (syncObj1) { + while (syncObj1.counter < 1) { + syncObj1.wait(); + } + assertEquals(BKException.Code.OK, syncObj1.rc); + } - // Close write handle - lh.close(); + // Close write handle + lh.close(); - // Open read handle - lh = bkc.openLedger(ledgerId, digestType, ledgerPassword); + // Open read handle + lh = bkc.openLedger(ledgerId, digestType, ledgerPassword); - // Make sure to read all 10 entries. - for (int i = 0; i < 11; i++) { - lh.readEntries(i, i); - } - lh.close(); - bkc.deleteLedger(ledgerId); + // Make sure to read all 10 entries. + for (int i = 0; i < 11; i++) { + lh.readEntries(i, i); + } + lh.close(); + bkc.deleteLedger(ledgerId); + } + + /** + * Verify Advanced asynchronous writing with entryIds in pseudo random order with bookie failures + * between writes. + */ + @MethodSource("data") + @ParameterizedTest + public void ledgerCreateAdvWithRandomAsyncWritesWithBookieFailuresBetweenWrites(boolean useV2, + boolean writeJournal) throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + // Create ledgers + lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); + lh2 = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); + + LOG.info("Ledger ID-1: " + lh.getId()); + LOG.info("Ledger ID-2: " + lh2.getId()); + SyncObj syncObj1 = new SyncObj(); + SyncObj syncObj2 = new SyncObj(); + int batchSize = 5; + int i, j; + + // Fill the result buffers first + for (i = 0; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + try { + entries1.add(0, entry.array()); + entries2.add(0, entry.array()); + } catch (Exception e) { + e.printStackTrace(); + } } - /** - * Verify Advanced asynchronous writing with entryIds in pseudo random order with bookie failures between writes. - */ - @Test - public void testLedgerCreateAdvWithRandomAsyncWritesWithBookieFailuresBetweenWrites() throws Exception { - // Create ledgers - lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); - lh2 = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); - - LOG.info("Ledger ID-1: " + lh.getId()); - LOG.info("Ledger ID-2: " + lh2.getId()); - SyncObj syncObj1 = new SyncObj(); - SyncObj syncObj2 = new SyncObj(); - int batchSize = 5; - int i, j; - - // Fill the result buffers first - for (i = 0; i < numEntriesToWrite; i++) { - ByteBuffer entry = ByteBuffer.allocate(4); - - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - try { - entries1.add(0, entry.array()); - entries2.add(0, entry.array()); - } catch (Exception e) { - e.printStackTrace(); - } - } - - for (i = 0; i < batchSize; i++) { - for (j = i; j < numEntriesToWrite; j = j + batchSize) { - byte[] entry1 = entries1.get(j); - byte[] entry2 = entries2.get(j); - lh.asyncAddEntry(j, entry1, 0, entry1.length, this, syncObj1); - lh2.asyncAddEntry(j, entry2, 0, entry2.length, this, syncObj2); - if (j == numEntriesToWrite / 2) { - // Start One more bookie and shutdown one from last ensemble at half-way - startNewBookie(); - List ensemble = lh.getLedgerMetadata().getAllEnsembles().entrySet() - .iterator().next().getValue(); - killBookie(ensemble.get(0)); - } - } - } + for (i = 0; i < batchSize; i++) { + for (j = i; j < numEntriesToWrite; j = j + batchSize) { + byte[] entry1 = entries1.get(j); + byte[] entry2 = entries2.get(j); + lh.asyncAddEntry(j, entry1, 0, entry1.length, this, syncObj1); + lh2.asyncAddEntry(j, entry2, 0, entry2.length, this, syncObj2); + if (j == numEntriesToWrite / 2) { + // Start One more bookie and shutdown one from last ensemble at half-way + startNewBookie(); + List ensemble = lh.getLedgerMetadata().getAllEnsembles().entrySet() + .iterator().next().getValue(); + killBookie(ensemble.get(0)); + } + } + } - // Wait for all entries to be acknowledged for the first ledger - synchronized (syncObj1) { - while (syncObj1.counter < numEntriesToWrite) { - syncObj1.wait(); - } - assertEquals(BKException.Code.OK, syncObj1.rc); - } - // Wait for all entries to be acknowledged for the second ledger - synchronized (syncObj2) { - while (syncObj2.counter < numEntriesToWrite) { - syncObj2.wait(); - } - assertEquals(BKException.Code.OK, syncObj2.rc); - } + // Wait for all entries to be acknowledged for the first ledger + synchronized (syncObj1) { + while (syncObj1.counter < numEntriesToWrite) { + syncObj1.wait(); + } + assertEquals(BKException.Code.OK, syncObj1.rc); + } + // Wait for all entries to be acknowledged for the second ledger + synchronized (syncObj2) { + while (syncObj2.counter < numEntriesToWrite) { + syncObj2.wait(); + } + assertEquals(BKException.Code.OK, syncObj2.rc); + } - // Reading ledger till the last entry - readEntries(lh, entries1); - readEntries(lh2, entries2); - lh.close(); - lh2.close(); + // Reading ledger till the last entry + readEntries(lh, entries1); + readEntries(lh2, entries2); + lh.close(); + lh2.close(); + } + + /** + * Verify Advanced asynchronous writing with entryIds in pseudo random order. + */ + @MethodSource("data") + @ParameterizedTest + public void ledgerCreateAdvWithRandomAsyncWritesWithBookieFailures(boolean useV2, + boolean writeJournal) throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + // Create ledgers + lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); + lh2 = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); + + LOG.info("Ledger ID-1: " + lh.getId()); + LOG.info("Ledger ID-2: " + lh2.getId()); + SyncObj syncObj1 = new SyncObj(); + SyncObj syncObj2 = new SyncObj(); + int batchSize = 5; + int i, j; + + // Fill the result buffers first + for (i = 0; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + try { + entries1.add(0, entry.array()); + entries2.add(0, entry.array()); + } catch (Exception e) { + e.printStackTrace(); + } } - /** - * Verify Advanced asynchronous writing with entryIds in pseudo random order. - */ - @Test - public void testLedgerCreateAdvWithRandomAsyncWritesWithBookieFailures() throws Exception { - // Create ledgers - lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); - lh2 = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); - - LOG.info("Ledger ID-1: " + lh.getId()); - LOG.info("Ledger ID-2: " + lh2.getId()); - SyncObj syncObj1 = new SyncObj(); - SyncObj syncObj2 = new SyncObj(); - int batchSize = 5; - int i, j; - - // Fill the result buffers first - for (i = 0; i < numEntriesToWrite; i++) { - ByteBuffer entry = ByteBuffer.allocate(4); - - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - try { - entries1.add(0, entry.array()); - entries2.add(0, entry.array()); - } catch (Exception e) { - e.printStackTrace(); - } - } + for (i = 0; i < batchSize; i++) { + for (j = i; j < numEntriesToWrite; j = j + batchSize) { + byte[] entry1 = entries1.get(j); + byte[] entry2 = entries2.get(j); + lh.asyncAddEntry(j, entry1, 0, entry1.length, this, syncObj1); + lh2.asyncAddEntry(j, entry2, 0, entry2.length, this, syncObj2); + } + } + // Start One more bookie and shutdown one from last ensemble before reading + startNewBookie(); + List ensemble = lh.getLedgerMetadata().getAllEnsembles().entrySet().iterator().next() + .getValue(); + killBookie(ensemble.get(0)); + + // Wait for all entries to be acknowledged for the first ledger + synchronized (syncObj1) { + while (syncObj1.counter < numEntriesToWrite) { + syncObj1.wait(); + } + assertEquals(BKException.Code.OK, syncObj1.rc); + } + // Wait for all entries to be acknowledged for the second ledger + synchronized (syncObj2) { + while (syncObj2.counter < numEntriesToWrite) { + syncObj2.wait(); + } + assertEquals(BKException.Code.OK, syncObj2.rc); + } - for (i = 0; i < batchSize; i++) { - for (j = i; j < numEntriesToWrite; j = j + batchSize) { - byte[] entry1 = entries1.get(j); - byte[] entry2 = entries2.get(j); - lh.asyncAddEntry(j, entry1, 0, entry1.length, this, syncObj1); - lh2.asyncAddEntry(j, entry2, 0, entry2.length, this, syncObj2); - } - } - // Start One more bookie and shutdown one from last ensemble before reading - startNewBookie(); - List ensemble = lh.getLedgerMetadata().getAllEnsembles().entrySet().iterator().next() - .getValue(); - killBookie(ensemble.get(0)); - - // Wait for all entries to be acknowledged for the first ledger - synchronized (syncObj1) { - while (syncObj1.counter < numEntriesToWrite) { - syncObj1.wait(); - } - assertEquals(BKException.Code.OK, syncObj1.rc); - } - // Wait for all entries to be acknowledged for the second ledger + // Reading ledger till the last entry + readEntries(lh, entries1); + readEntries(lh2, entries2); + lh.close(); + lh2.close(); + } + + /** + * Skips few entries before closing the ledger and assert that the lastAddConfirmed is right + * before our skipEntryId. + * + * @throws Exception + */ + @MethodSource("data") + @ParameterizedTest + public void ledgerCreateAdvWithSkipEntries(boolean useV2, boolean writeJournal) throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + long ledgerId; + SyncObj syncObj1 = new SyncObj(); + + // Create a ledger + lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); + // Save ledgerId to reopen the ledger + ledgerId = lh.getId(); + LOG.info("Ledger ID: " + ledgerId); + int skipEntryId = rng.nextInt(numEntriesToWrite - 1); + for (int i = numEntriesToWrite - 1; i >= 0; i--) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + try { + entries1.add(0, entry.array()); + } catch (Exception e) { + e.printStackTrace(); + } + if (i == skipEntryId) { + LOG.info("Skipping entry:{}", skipEntryId); + continue; + } + lh.asyncAddEntry(i, entry.array(), 0, entry.capacity(), this, syncObj1); + } + // wait for all entries to be acknowledged for the first ledger + synchronized (syncObj1) { + while (syncObj1.counter < skipEntryId) { + syncObj1.wait(); + } + assertEquals(BKException.Code.OK, syncObj1.rc); + } + // Close the ledger + lh.close(); + // Open the ledger + lh = bkc.openLedger(ledgerId, digestType, ledgerPassword); + assertEquals(lh.lastAddConfirmed, skipEntryId - 1); + lh.close(); + } + + /** + * Verify the functionality LedgerHandleAdv addEntry with duplicate entryIds. + * + * @throws Exception + */ + @MethodSource("data") + @ParameterizedTest + public void ledgerCreateAdvSyncAddDuplicateEntryIds(boolean useV2, boolean writeJournal) + throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + // Create a ledger + lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); + LOG.info("Ledger ID: " + lh.getId()); + for (int i = 0; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + + entries1.add(entry.array()); + lh.addEntry(i, entry.array()); + entry.position(0); + } + readEntries(lh, entries1); + + int dupEntryId = rng.nextInt(numEntriesToWrite - 1); + + try { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + lh.addEntry(dupEntryId, entry.array()); + fail("Expected exception not thrown"); + } catch (BKException e) { + // This test expects DuplicateEntryIdException + assertEquals(BKException.Code.DuplicateEntryIdException, e.getCode()); + } + lh.close(); + } + + /** + * Verify the functionality LedgerHandleAdv asyncAddEntry with duplicate entryIds. + * + * @throws Exception + */ + @MethodSource("data") + @ParameterizedTest + public void ledgerCreateAdvSyncAsyncAddDuplicateEntryIds(boolean useV2, boolean writeJournal) + throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + long ledgerId; + SyncObj syncObj1 = new SyncObj(); + SyncObj syncObj2 = new SyncObj(); + + // Create a ledger + lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); + // Save ledgerId to reopen the ledger + ledgerId = lh.getId(); + LOG.info("Ledger ID: " + ledgerId); + for (int i = numEntriesToWrite - 1; i >= 0; i--) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + try { + entries1.add(0, entry.array()); + } catch (Exception e) { + e.printStackTrace(); + } + lh.asyncAddEntry(i, entry.array(), 0, entry.capacity(), this, syncObj1); + if (rng.nextBoolean()) { + // Attempt to write the same entry + lh.asyncAddEntry(i, entry.array(), 0, entry.capacity(), this, syncObj2); synchronized (syncObj2) { - while (syncObj2.counter < numEntriesToWrite) { - syncObj2.wait(); - } - assertEquals(BKException.Code.OK, syncObj2.rc); + while (syncObj2.counter < 1) { + syncObj2.wait(); + } + assertEquals(BKException.Code.DuplicateEntryIdException, syncObj2.rc); } - - // Reading ledger till the last entry - readEntries(lh, entries1); - readEntries(lh2, entries2); - lh.close(); - lh2.close(); + } } - - /** - * Skips few entries before closing the ledger and assert that the - * lastAddConfirmed is right before our skipEntryId. - * - * @throws Exception - */ - @Test - public void testLedgerCreateAdvWithSkipEntries() throws Exception { - long ledgerId; - SyncObj syncObj1 = new SyncObj(); - - // Create a ledger - lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); - // Save ledgerId to reopen the ledger - ledgerId = lh.getId(); - LOG.info("Ledger ID: " + ledgerId); - int skipEntryId = rng.nextInt(numEntriesToWrite - 1); - for (int i = numEntriesToWrite - 1; i >= 0; i--) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - try { - entries1.add(0, entry.array()); - } catch (Exception e) { - e.printStackTrace(); - } - if (i == skipEntryId) { - LOG.info("Skipping entry:{}", skipEntryId); - continue; - } - lh.asyncAddEntry(i, entry.array(), 0, entry.capacity(), this, syncObj1); - } - // wait for all entries to be acknowledged for the first ledger - synchronized (syncObj1) { - while (syncObj1.counter < skipEntryId) { - syncObj1.wait(); - } - assertEquals(BKException.Code.OK, syncObj1.rc); - } - // Close the ledger - lh.close(); - // Open the ledger - lh = bkc.openLedger(ledgerId, digestType, ledgerPassword); - assertEquals(lh.lastAddConfirmed, skipEntryId - 1); - lh.close(); - } - - /** - * Verify the functionality LedgerHandleAdv addEntry with duplicate entryIds. - * - * @throws Exception - */ - @Test - public void testLedgerCreateAdvSyncAddDuplicateEntryIds() throws Exception { - // Create a ledger - lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); - LOG.info("Ledger ID: " + lh.getId()); - for (int i = 0; i < numEntriesToWrite; i++) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - - entries1.add(entry.array()); - lh.addEntry(i, entry.array()); - entry.position(0); - } - readEntries(lh, entries1); - - int dupEntryId = rng.nextInt(numEntriesToWrite - 1); - - try { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - lh.addEntry(dupEntryId, entry.array()); - fail("Expected exception not thrown"); - } catch (BKException e) { - // This test expects DuplicateEntryIdException - assertEquals(e.getCode(), BKException.Code.DuplicateEntryIdException); - } - lh.close(); + // Wait for all entries to be acknowledged for the first ledger + synchronized (syncObj1) { + while (syncObj1.counter < numEntriesToWrite) { + syncObj1.wait(); + } + assertEquals(BKException.Code.OK, syncObj1.rc); } - - /** - * Verify the functionality LedgerHandleAdv asyncAddEntry with duplicate - * entryIds. - * - * @throws Exception - */ - @Test - public void testLedgerCreateAdvSyncAsyncAddDuplicateEntryIds() throws Exception { - long ledgerId; - SyncObj syncObj1 = new SyncObj(); - SyncObj syncObj2 = new SyncObj(); - - // Create a ledger - lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword); - // Save ledgerId to reopen the ledger - ledgerId = lh.getId(); - LOG.info("Ledger ID: " + ledgerId); - for (int i = numEntriesToWrite - 1; i >= 0; i--) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - try { - entries1.add(0, entry.array()); - } catch (Exception e) { - e.printStackTrace(); - } - lh.asyncAddEntry(i, entry.array(), 0, entry.capacity(), this, syncObj1); - if (rng.nextBoolean()) { - // Attempt to write the same entry - lh.asyncAddEntry(i, entry.array(), 0, entry.capacity(), this, syncObj2); - synchronized (syncObj2) { - while (syncObj2.counter < 1) { - syncObj2.wait(); - } - assertEquals(BKException.Code.DuplicateEntryIdException, syncObj2.rc); - } - } - } - // Wait for all entries to be acknowledged for the first ledger - synchronized (syncObj1) { - while (syncObj1.counter < numEntriesToWrite) { - syncObj1.wait(); - } - assertEquals(BKException.Code.OK, syncObj1.rc); - } - // Close the ledger - lh.close(); - } - - @Test - @SuppressWarnings("unchecked") - public void testLedgerCreateAdvByteBufRefCnt() throws Exception { - long ledgerId = rng.nextLong(); - ledgerId &= Long.MAX_VALUE; - if (!baseConf.getLedgerManagerFactoryClass().equals(LongHierarchicalLedgerManagerFactory.class)) { - // since LongHierarchicalLedgerManager supports ledgerIds of - // decimal length upto 19 digits but other - // LedgerManagers only upto 10 decimals - ledgerId %= 9999999999L; - } - - final LedgerHandle lh = bkc.createLedgerAdv(ledgerId, 5, 3, 2, digestType, ledgerPassword, null); - - final List allocs = Lists.newArrayList( - new PooledByteBufAllocator(true), - new PooledByteBufAllocator(false), - new UnpooledByteBufAllocator(true), - new UnpooledByteBufAllocator(false)); - - long entryId = 0; - for (AbstractByteBufAllocator alloc: allocs) { - final ByteBuf data = alloc.buffer(10); - data.writeBytes(("fragment0" + entryId).getBytes()); - assertEquals("ref count on ByteBuf should be 1", 1, data.refCnt()); - - CompletableFuture cf = new CompletableFuture<>(); - lh.asyncAddEntry(entryId, data, (rc, handle, eId, qwcLatency, ctx) -> { - CompletableFuture future = (CompletableFuture) ctx; - future.complete(rc); - }, cf); - - int rc = cf.get(); - assertEquals("rc code is OK", BKException.Code.OK, rc); - - for (int i = 0; i < 10; i++) { - if (data.refCnt() == 0) { - break; - } - TimeUnit.MILLISECONDS.sleep(250); // recycler runs asynchronously - } - assertEquals("writing entry with id " + entryId + ", ref count on ByteBuf should be 0 ", - 0, data.refCnt()); - - org.apache.bookkeeper.client.api.LedgerEntry e = lh.read(entryId, entryId).getEntry(entryId); - assertEquals("entry data is correct", "fragment0" + entryId, new String(e.getEntryBytes())); - entryId++; - } - - bkc.deleteLedger(lh.ledgerId); + // Close the ledger + lh.close(); + } + + @MethodSource("data") + @ParameterizedTest + @SuppressWarnings("unchecked") + public void ledgerCreateAdvByteBufRefCnt(boolean useV2, boolean writeJournal) throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + long ledgerId = rng.nextLong(); + ledgerId &= Long.MAX_VALUE; + if (!baseConf.getLedgerManagerFactoryClass() + .equals(LongHierarchicalLedgerManagerFactory.class)) { + // since LongHierarchicalLedgerManager supports ledgerIds of + // decimal length upto 19 digits but other + // LedgerManagers only upto 10 decimals + ledgerId %= 9999999999L; } - @Test - @SuppressWarnings("unchecked") - public void testLedgerCreateByteBufRefCnt() throws Exception { - final LedgerHandle lh = bkc.createLedger(5, 3, 2, digestType, ledgerPassword, null); - - final List allocs = Lists.newArrayList( - new PooledByteBufAllocator(true), - new PooledByteBufAllocator(false), - new UnpooledByteBufAllocator(true), - new UnpooledByteBufAllocator(false)); - - int entryId = 0; - for (AbstractByteBufAllocator alloc: allocs) { - final ByteBuf data = alloc.buffer(10); - data.writeBytes(("fragment0" + entryId).getBytes()); - assertEquals("ref count on ByteBuf should be 1", 1, data.refCnt()); - - CompletableFuture cf = new CompletableFuture<>(); - lh.asyncAddEntry(data, (rc, handle, eId, ctx) -> { - CompletableFuture future = (CompletableFuture) ctx; - future.complete(rc); - }, cf); - - int rc = cf.get(); - assertEquals("rc code is OK", BKException.Code.OK, rc); - - for (int i = 0; i < 10; i++) { - if (data.refCnt() == 0) { - break; - } - TimeUnit.MILLISECONDS.sleep(250); // recycler runs asynchronously - } - assertEquals("writing entry with id " + entryId + ", ref count on ByteBuf should be 0 ", - 0, data.refCnt()); - - org.apache.bookkeeper.client.api.LedgerEntry e = lh.read(entryId, entryId).getEntry(entryId); - assertEquals("entry data is correct", "fragment0" + entryId, new String(e.getEntryBytes())); - entryId++; - } - - bkc.deleteLedger(lh.ledgerId); + final LedgerHandle lh = bkc + .createLedgerAdv(ledgerId, 5, 3, 2, digestType, ledgerPassword, null); + + final List allocs = Lists.newArrayList( + new PooledByteBufAllocator(true), + new PooledByteBufAllocator(false), + new UnpooledByteBufAllocator(true), + new UnpooledByteBufAllocator(false)); + + long entryId = 0; + for (AbstractByteBufAllocator alloc : allocs) { + final ByteBuf data = alloc.buffer(10); + data.writeBytes(("fragment0" + entryId).getBytes()); + assertEquals(1, data.refCnt(), "ref count on ByteBuf should be 1"); + + CompletableFuture cf = new CompletableFuture<>(); + lh.asyncAddEntry(entryId, data, (rc, handle, eId, qwcLatency, ctx) -> { + CompletableFuture future = (CompletableFuture) ctx; + future.complete(rc); + }, cf); + + int rc = cf.get(); + assertEquals(BKException.Code.OK, rc, "rc code is OK"); + + for (int i = 0; i < 10; i++) { + if (data.refCnt() == 0) { + break; + } + TimeUnit.MILLISECONDS.sleep(250); // recycler runs asynchronously + } + assertEquals(0, data.refCnt(), + "writing entry with id " + entryId + ", ref count on ByteBuf should be 0 "); + + org.apache.bookkeeper.client.api.LedgerEntry e = lh.read(entryId, entryId).getEntry(entryId); + assertEquals("fragment0" + entryId, new String(e.getEntryBytes()), "entry data is correct"); + entryId++; } - @Test - public void testReadLacNotSameWithMetadataLedgerReplication() throws Exception { - lh = bkc.createLedger(3, 3, 2, digestType, ledgerPassword); - for (int i = 0; i < 10; ++i) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - lh.addEntry(entry.array()); - } - - List ensemble = lh.getLedgerMetadata().getAllEnsembles().entrySet().iterator().next().getValue(); - assertEquals(1, lh.getLedgerMetadata().getAllEnsembles().size()); - killBookie(ensemble.get(1)); - - try { - lh.ensembleChangeLoop(ensemble, Collections.singletonMap(1, ensemble.get(1))); - } catch (Exception e) { - fail(); - } - - LedgerHandle lh1 = bkc.openLedgerNoRecovery(lh.ledgerId, digestType, ledgerPassword); - assertEquals(2, lh1.getLedgerMetadata().getAllEnsembles().size()); - List firstEnsemble = lh1.getLedgerMetadata().getAllEnsembles().firstEntry().getValue(); - - long entryId = lh1.getLedgerMetadata().getAllEnsembles().lastEntry().getKey() - 1; - try { - lh1.readAsync(entryId, entryId).get(); - fail(); - } catch (Exception e) { - LOG.info("Failed to read entry: {} ", entryId, e); - } - - MetadataBookieDriver driver = MetadataDrivers.getBookieDriver( - URI.create(baseConf.getMetadataServiceUri())); - driver.initialize( - baseConf, - NullStatsLogger.INSTANCE); - // initialize urReplicationManager - LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); - LedgerUnderreplicationManager underReplicationManager = mFactory.newLedgerUnderreplicationManager(); - baseConf.setOpenLedgerRereplicationGracePeriod(String.valueOf(30)); - - - ReplicationWorker replicationWorker = new ReplicationWorker(baseConf); - replicationWorker.start(); - String basePath = ZKMetadataDriverBase.resolveZkLedgersRootPath(baseClientConf) + '/' - + BookKeeperConstants.UNDER_REPLICATION_NODE - + BookKeeperConstants.DEFAULT_ZK_LEDGERS_ROOT_PATH; - - try { - underReplicationManager.markLedgerUnderreplicated(lh1.getId(), ensemble.get(1).toString()); - - Awaitility.waitAtMost(30, TimeUnit.SECONDS).untilAsserted(() -> - assertFalse(ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh1.getId(), basePath)) - ); - - assertNotEquals(firstEnsemble, lh1.getLedgerMetadata().getAllEnsembles().firstEntry().getValue()); - } finally { - replicationWorker.shutdown(); - } + bkc.deleteLedger(lh.ledgerId); + } + + @MethodSource("data") + @ParameterizedTest + @SuppressWarnings("unchecked") + public void ledgerCreateByteBufRefCnt(boolean useV2, boolean writeJournal) throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + final LedgerHandle lh = bkc.createLedger(5, 3, 2, digestType, ledgerPassword, null); + + final List allocs = Lists.newArrayList( + new PooledByteBufAllocator(true), + new PooledByteBufAllocator(false), + new UnpooledByteBufAllocator(true), + new UnpooledByteBufAllocator(false)); + + int entryId = 0; + for (AbstractByteBufAllocator alloc : allocs) { + final ByteBuf data = alloc.buffer(10); + data.writeBytes(("fragment0" + entryId).getBytes()); + assertEquals(1, data.refCnt(), "ref count on ByteBuf should be 1"); + + CompletableFuture cf = new CompletableFuture<>(); + lh.asyncAddEntry(data, (rc, handle, eId, ctx) -> { + CompletableFuture future = (CompletableFuture) ctx; + future.complete(rc); + }, cf); + + int rc = cf.get(); + assertEquals(BKException.Code.OK, rc, "rc code is OK"); + + for (int i = 0; i < 10; i++) { + if (data.refCnt() == 0) { + break; + } + TimeUnit.MILLISECONDS.sleep(250); // recycler runs asynchronously + } + assertEquals(0, data.refCnt(), + "writing entry with id " + entryId + ", ref count on ByteBuf should be 0 "); + + org.apache.bookkeeper.client.api.LedgerEntry e = lh.read(entryId, entryId).getEntry(entryId); + assertEquals("fragment0" + entryId, new String(e.getEntryBytes()), "entry data is correct"); + entryId++; } - @Test - public void testLedgerMetadataTest() throws Exception { - baseClientConf.setLedgerMetadataFormatVersion(LedgerMetadataSerDe.METADATA_FORMAT_VERSION_2); - BookKeeperTestClient bkc = new BookKeeperTestClient(baseClientConf, new TestStatsProvider()); - // Create a ledger - lh = bkc.createLedger(3, 3, 2, digestType, ledgerPassword); - assertEquals(lh.getLedgerMetadata().getMetadataFormatVersion(), LedgerMetadataSerDe.METADATA_FORMAT_VERSION_2); - lh.close(); - } - - private void readEntries(LedgerHandle lh, List entries) throws InterruptedException, BKException { - ls = lh.readEntries(0, numEntriesToWrite - 1); - int index = 0; - while (ls.hasMoreElements()) { - ByteBuffer origbb = ByteBuffer.wrap(entries.get(index++)); - Integer origEntry = origbb.getInt(); - ByteBuffer result = ByteBuffer.wrap(ls.nextElement().getEntry()); - Integer retrEntry = result.getInt(); - if (LOG.isDebugEnabled()) { - LOG.debug("Length of result: " + result.capacity()); - LOG.debug("Original entry: " + origEntry); - LOG.debug("Retrieved entry: " + retrEntry); - } - assertTrue("Checking entry " + index + " for equality", origEntry - .equals(retrEntry)); - } + bkc.deleteLedger(lh.ledgerId); + } + + @MethodSource("data") + @ParameterizedTest + public void readLacNotSameWithMetadataLedgerReplication(boolean useV2, boolean writeJournal) + throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + lh = bkc.createLedger(3, 3, 2, digestType, ledgerPassword); + for (int i = 0; i < 10; ++i) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + lh.addEntry(entry.array()); } - private void readEntries(ReadHandle reader, List entries) throws Exception { - assertEquals("Not enough entries in ledger " + reader.getId(), - reader.getLastAddConfirmed(), entries.size() - 1); - try (LedgerEntries readEntries = reader.read(0, reader.getLastAddConfirmed())) { - int i = 0; - for (org.apache.bookkeeper.client.api.LedgerEntry e : readEntries) { - int entryId = i++; - ByteBuf origEntry = entries.get(entryId); - ByteBuf readEntry = e.getEntryBuffer(); - assertEquals("Unexpected contents in " + reader.getId() + ":" + entryId, origEntry, readEntry); - } - } + List ensemble = lh.getLedgerMetadata().getAllEnsembles().entrySet().iterator().next() + .getValue(); + assertEquals(1, lh.getLedgerMetadata().getAllEnsembles().size()); + killBookie(ensemble.get(1)); + + Assertions.assertDoesNotThrow(() -> { + lh.ensembleChangeLoop(ensemble, Collections.singletonMap(1, ensemble.get(1))); + }); + + LedgerHandle lh1 = bkc.openLedgerNoRecovery(lh.ledgerId, digestType, ledgerPassword); + assertEquals(2, lh1.getLedgerMetadata().getAllEnsembles().size()); + List firstEnsemble = lh1.getLedgerMetadata().getAllEnsembles().firstEntry() + .getValue(); + + long entryId = lh1.getLedgerMetadata().getAllEnsembles().lastEntry().getKey() - 1; + try { + lh1.readAsync(entryId, entryId).get(); + fail(); + } catch (Exception e) { + LOG.info("Failed to read entry: {} ", entryId, e); } - private void readEntriesAndValidateDataArray(LedgerHandle lh, List entries) - throws InterruptedException, BKException { - ls = lh.readEntries(0, entries.size() - 1); - int index = 0; - while (ls.hasMoreElements()) { - byte[] originalData = entries.get(index++); - byte[] receivedData = ls.nextElement().getEntry(); - if (LOG.isDebugEnabled()) { - LOG.debug("Length of originalData: {}", originalData.length); - LOG.debug("Length of receivedData: {}", receivedData.length); - } - assertEquals( - String.format("LedgerID: %d EntryID: %d OriginalDataLength: %d ReceivedDataLength: %d", lh.getId(), - (index - 1), originalData.length, receivedData.length), - originalData.length, receivedData.length); - Assert.assertArrayEquals( - String.format("Checking LedgerID: %d EntryID: %d for equality", lh.getId(), (index - 1)), - originalData, receivedData); - } + MetadataBookieDriver driver = MetadataDrivers.getBookieDriver( + URI.create(baseConf.getMetadataServiceUri())); + driver.initialize( + baseConf, + NullStatsLogger.INSTANCE); + // initialize urReplicationManager + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager underReplicationManager = mFactory + .newLedgerUnderreplicationManager(); + baseConf.setOpenLedgerRereplicationGracePeriod(String.valueOf(30)); + + ReplicationWorker replicationWorker = new ReplicationWorker(baseConf); + replicationWorker.start(); + String basePath = ZKMetadataDriverBase.resolveZkLedgersRootPath(baseClientConf) + '/' + + BookKeeperConstants.UNDER_REPLICATION_NODE + + BookKeeperConstants.DEFAULT_ZK_LEDGERS_ROOT_PATH; + + try { + underReplicationManager.markLedgerUnderreplicated(lh1.getId(), ensemble.get(1).toString()); + + Awaitility.waitAtMost(30, TimeUnit.SECONDS).untilAsserted(() -> + assertFalse(ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh1.getId(), basePath)) + ); + + assertNotEquals(firstEnsemble, + lh1.getLedgerMetadata().getAllEnsembles().firstEntry().getValue()); + } finally { + replicationWorker.shutdown(); } - - @Override - public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) { - SyncObj x = (SyncObj) ctx; - synchronized (x) { - x.rc = rc; - x.counter++; - x.notify(); - } + } + + @MethodSource("data") + @ParameterizedTest + public void ledgerMetadataTest(boolean useV2, boolean writeJournal) throws Exception { + initBookieWriteLedgerTest(useV2, writeJournal); + baseClientConf.setLedgerMetadataFormatVersion(LedgerMetadataSerDe.METADATA_FORMAT_VERSION_2); + BookKeeperTestClient bkc = new BookKeeperTestClient(baseClientConf, new TestStatsProvider()); + // Create a ledger + lh = bkc.createLedger(3, 3, 2, digestType, ledgerPassword); + assertEquals(LedgerMetadataSerDe.METADATA_FORMAT_VERSION_2, + lh.getLedgerMetadata().getMetadataFormatVersion()); + lh.close(); + } + + private void readEntries(LedgerHandle lh, List entries) + throws InterruptedException, BKException { + ls = lh.readEntries(0, numEntriesToWrite - 1); + int index = 0; + while (ls.hasMoreElements()) { + ByteBuffer origbb = ByteBuffer.wrap(entries.get(index++)); + Integer origEntry = origbb.getInt(); + ByteBuffer result = ByteBuffer.wrap(ls.nextElement().getEntry()); + Integer retrEntry = result.getInt(); + if (LOG.isDebugEnabled()) { + LOG.debug("Length of result: " + result.capacity()); + LOG.debug("Original entry: " + origEntry); + LOG.debug("Retrieved entry: " + retrEntry); + } + assertEquals(origEntry, retrEntry, "Checking entry " + index + " for equality"); + } + } + + private void readEntries(ReadHandle reader, List entries) throws Exception { + assertEquals(reader.getLastAddConfirmed(), entries.size() - 1, + "Not enough entries in ledger " + reader.getId()); + try (LedgerEntries readEntries = reader.read(0, reader.getLastAddConfirmed())) { + int i = 0; + for (org.apache.bookkeeper.client.api.LedgerEntry e : readEntries) { + int entryId = i++; + ByteBuf origEntry = entries.get(entryId); + ByteBuf readEntry = e.getEntryBuffer(); + assertEquals(origEntry, readEntry, + "Unexpected contents in " + reader.getId() + ":" + entryId); + } + } + } + + private void readEntriesAndValidateDataArray(LedgerHandle lh, List entries) + throws InterruptedException, BKException { + ls = lh.readEntries(0, entries.size() - 1); + int index = 0; + while (ls.hasMoreElements()) { + byte[] originalData = entries.get(index++); + byte[] receivedData = ls.nextElement().getEntry(); + if (LOG.isDebugEnabled()) { + LOG.debug("Length of originalData: {}", originalData.length); + LOG.debug("Length of receivedData: {}", receivedData.length); + } + assertEquals( + originalData.length, receivedData.length, String + .format("LedgerID: %d EntryID: %d OriginalDataLength: %d ReceivedDataLength: %d", + lh.getId(), + (index - 1), originalData.length, receivedData.length)); + assertArrayEquals( + originalData, receivedData, String + .format("Checking LedgerID: %d EntryID: %d for equality", lh.getId(), (index - 1))); + } + } + + @Override + public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) { + SyncObj x = (SyncObj) ctx; + synchronized (x) { + x.rc = rc; + x.counter++; + x.notify(); } + } - static class CorruptReadBookie extends TestBookieImpl { + static class CorruptReadBookie extends TestBookieImpl { - static final Logger LOG = LoggerFactory.getLogger(CorruptReadBookie.class); - ByteBuf localBuf; + static final Logger LOG = LoggerFactory.getLogger(CorruptReadBookie.class); + ByteBuf localBuf; - public CorruptReadBookie(ServerConfiguration conf) - throws Exception { - super(conf); - } + public CorruptReadBookie(ServerConfiguration conf) + throws Exception { + super(conf); + } - @Override - public ByteBuf readEntry(long ledgerId, long entryId) throws IOException, NoLedgerException, BookieException { - localBuf = super.readEntry(ledgerId, entryId); + @Override + public ByteBuf readEntry(long ledgerId, long entryId) + throws IOException, NoLedgerException, BookieException { + localBuf = super.readEntry(ledgerId, entryId); + + int capacity = 0; + while (capacity < localBuf.capacity()) { + localBuf.setByte(capacity, 0); + capacity++; + } + return localBuf; + } - int capacity = 0; - while (capacity < localBuf.capacity()) { - localBuf.setByte(capacity, 0); - capacity++; - } - return localBuf; - } + } - } + public void initBookieWriteLedgerTest(boolean useV2, boolean writeJournal) { + this.useV2 = useV2; + this.writeJournal = writeJournal; + } } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookieWriteLedgersWithDifferentDigestsTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookieWriteLedgersWithDifferentDigestsTest.java index c93d65e40aa..1cb10094a56 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookieWriteLedgersWithDifferentDigestsTest.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookieWriteLedgersWithDifferentDigestsTest.java @@ -21,9 +21,9 @@ package org.apache.bookkeeper.client; import static org.apache.bookkeeper.bookie.BookieException.Code.OK; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -33,175 +33,180 @@ import java.util.Random; import org.apache.bookkeeper.client.BookKeeper.DigestType; import org.apache.bookkeeper.test.BookKeeperClusterTestCase; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Verify reads from ledgers with different digest types. - * This can happen as result of clients using different settings - * yet reading each other data or configuration change roll out. + * Verify reads from ledgers with different digest types. This can happen as result of clients using + * different settings yet reading each other data or configuration change roll out. */ -@RunWith(Parameterized.class) public class BookieWriteLedgersWithDifferentDigestsTest extends BookKeeperClusterTestCase implements AsyncCallback.AddCallbackWithLatency { - private static final Logger LOG = LoggerFactory - .getLogger(BookieWriteLedgersWithDifferentDigestsTest.class); - - byte[] ledgerPassword = "aaa".getBytes(); - LedgerHandle lh; - Enumeration ls; - - // test related variables - final int numEntriesToWrite = 20; - int maxInt = Integer.MAX_VALUE; - Random rng; - ArrayList entries1; // generated entries - ArrayList entries2; // generated entries - - private final DigestType digestType; - private final DigestType otherDigestType; + private static final Logger LOG = LoggerFactory + .getLogger(BookieWriteLedgersWithDifferentDigestsTest.class); + // test related variables + final int numEntriesToWrite = 20; + byte[] ledgerPassword = "aaa".getBytes(); + LedgerHandle lh; + Enumeration ls; + int maxInt = Integer.MAX_VALUE; + Random rng; + ArrayList entries1; // generated entries + ArrayList entries2; // generated entries + + private DigestType digestType; + private DigestType otherDigestType; + + public BookieWriteLedgersWithDifferentDigestsTest() { + super(3); + } + + public static Collection configs() { + return Arrays.asList(new Object[][]{{DigestType.MAC}, {DigestType.CRC32}, {DigestType.CRC32C}}); + } + + @BeforeEach + public void setUp() throws Exception { + super.setUp(); + rng = new Random(System.currentTimeMillis()); // Initialize the Random + // Number Generator + entries1 = new ArrayList(); // initialize the entries list + entries2 = new ArrayList(); // initialize the entries list + } + + public void initBookieWriteLedgersWithDifferentDigestsTest(DigestType digestType) { + this.digestType = digestType; + this.otherDigestType = digestType == DigestType.CRC32 ? DigestType.MAC : DigestType.CRC32; + String ledgerManagerFactory = "org.apache.bookkeeper.meta.HierarchicalLedgerManagerFactory"; + // set ledger manager + baseConf.setLedgerManagerFactoryClassName(ledgerManagerFactory); + baseClientConf.setLedgerManagerFactoryClassName(ledgerManagerFactory); + } + + @MethodSource("configs") + @ParameterizedTest + public void ledgersWithDifferentDigestTypesNoAutodetection(DigestType digestType) + throws Exception { + initBookieWriteLedgersWithDifferentDigestsTest(digestType); + bkc.conf.setEnableDigestTypeAutodetection(false); + // Create ledgers + lh = bkc.createLedgerAdv(3, 2, 2, digestType, ledgerPassword); + + final long id = lh.ledgerId; + + LOG.info("Ledger ID: {}, digestType: {}", lh.getId(), digestType); + SyncObj syncObj1 = new SyncObj(); + for (int i = numEntriesToWrite - 1; i >= 0; i--) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + entries1.add(0, entry.array()); + lh.asyncAddEntry(i, entry.array(), 0, entry.capacity(), this, syncObj1); + } - private static class SyncObj { - volatile int counter; - volatile int rc; + // Wait for all entries to be acknowledged + waitForEntriesAddition(syncObj1, numEntriesToWrite); - public SyncObj() { - counter = 0; - } - } + // Reads here work ok because ledger uses digest type set during create + readEntries(lh, entries1); + lh.close(); - @Parameterized.Parameters - public static Collection configs() { - return Arrays.asList(new Object[][] { {DigestType.MAC }, {DigestType.CRC32}, {DigestType.CRC32C} }); + try { + bkc.openLedgerNoRecovery(id, otherDigestType, ledgerPassword).close(); + fail("digest mismatch error is expected"); + } catch (BKException bke) { + // expected } - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - rng = new Random(System.currentTimeMillis()); // Initialize the Random - // Number Generator - entries1 = new ArrayList(); // initialize the entries list - entries2 = new ArrayList(); // initialize the entries list + } + + @MethodSource("configs") + @ParameterizedTest + public void ledgersWithDifferentDigestTypesWithAutodetection(DigestType digestType) + throws Exception { + initBookieWriteLedgersWithDifferentDigestsTest(digestType); + bkc.conf.setEnableDigestTypeAutodetection(true); + // Create ledgers + lh = bkc.createLedgerAdv(3, 2, 2, digestType, ledgerPassword); + + final long id = lh.ledgerId; + + LOG.info("Ledger ID-1: " + lh.getId()); + SyncObj syncObj1 = new SyncObj(); + for (int i = numEntriesToWrite - 1; i >= 0; i--) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(maxInt)); + entry.position(0); + entries1.add(0, entry.array()); + lh.asyncAddEntry(i, entry.array(), 0, entry.capacity(), this, syncObj1); } - public BookieWriteLedgersWithDifferentDigestsTest(DigestType digestType) { - super(3); - this.digestType = digestType; - this.otherDigestType = digestType == DigestType.CRC32 ? DigestType.MAC : DigestType.CRC32; - String ledgerManagerFactory = "org.apache.bookkeeper.meta.HierarchicalLedgerManagerFactory"; - // set ledger manager - baseConf.setLedgerManagerFactoryClassName(ledgerManagerFactory); - baseClientConf.setLedgerManagerFactoryClassName(ledgerManagerFactory); + // Wait for all entries to be acknowledged + waitForEntriesAddition(syncObj1, numEntriesToWrite); + + // Reads here work ok because ledger uses digest type set during create + readEntries(lh, entries1); + lh.close(); + + // open here would fail if provided digest type is used + // it passes because ledger just uses digest type from its metadata/autodetects it + lh = bkc.openLedgerNoRecovery(id, otherDigestType, ledgerPassword); + readEntries(lh, entries1); + lh.close(); + } + + private void waitForEntriesAddition(SyncObj syncObj, int numEntriesToWrite) + throws InterruptedException { + synchronized (syncObj) { + while (syncObj.counter < numEntriesToWrite) { + syncObj.wait(); + } + assertEquals(BKException.Code.OK, syncObj.rc); } - - @Test - public void testLedgersWithDifferentDigestTypesNoAutodetection() throws Exception { - bkc.conf.setEnableDigestTypeAutodetection(false); - // Create ledgers - lh = bkc.createLedgerAdv(3, 2, 2, digestType, ledgerPassword); - - final long id = lh.ledgerId; - - LOG.info("Ledger ID: {}, digestType: {}", lh.getId(), digestType); - SyncObj syncObj1 = new SyncObj(); - for (int i = numEntriesToWrite - 1; i >= 0; i--) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - entries1.add(0, entry.array()); - lh.asyncAddEntry(i, entry.array(), 0, entry.capacity(), this, syncObj1); - } - - // Wait for all entries to be acknowledged - waitForEntriesAddition(syncObj1, numEntriesToWrite); - - // Reads here work ok because ledger uses digest type set during create - readEntries(lh, entries1); - lh.close(); - - try { - bkc.openLedgerNoRecovery(id, otherDigestType, ledgerPassword).close(); - fail("digest mismatch error is expected"); - } catch (BKException bke) { - // expected - } + } + + private void readEntries(LedgerHandle lh, ArrayList entries) + throws InterruptedException, BKException { + ls = lh.readEntries(0, numEntriesToWrite - 1); + int index = 0; + while (ls.hasMoreElements()) { + ByteBuffer origbb = ByteBuffer.wrap(entries.get(index++)); + Integer origEntry = origbb.getInt(); + ByteBuffer result = ByteBuffer.wrap(ls.nextElement().getEntry()); + Integer retrEntry = result.getInt(); + if (LOG.isDebugEnabled()) { + LOG.debug("Length of result: " + result.capacity()); + LOG.debug("Original entry: " + origEntry); + LOG.debug("Retrieved entry: " + retrEntry); + } + assertEquals(origEntry, retrEntry, "Checking entry " + index + " for equality"); } - - @Test - public void testLedgersWithDifferentDigestTypesWithAutodetection() throws Exception { - bkc.conf.setEnableDigestTypeAutodetection(true); - // Create ledgers - lh = bkc.createLedgerAdv(3, 2, 2, digestType, ledgerPassword); - - final long id = lh.ledgerId; - - LOG.info("Ledger ID-1: " + lh.getId()); - SyncObj syncObj1 = new SyncObj(); - for (int i = numEntriesToWrite - 1; i >= 0; i--) { - ByteBuffer entry = ByteBuffer.allocate(4); - entry.putInt(rng.nextInt(maxInt)); - entry.position(0); - entries1.add(0, entry.array()); - lh.asyncAddEntry(i, entry.array(), 0, entry.capacity(), this, syncObj1); - } - - // Wait for all entries to be acknowledged - waitForEntriesAddition(syncObj1, numEntriesToWrite); - - // Reads here work ok because ledger uses digest type set during create - readEntries(lh, entries1); - lh.close(); - - // open here would fail if provided digest type is used - // it passes because ledger just uses digest type from its metadata/autodetects it - lh = bkc.openLedgerNoRecovery(id, otherDigestType, ledgerPassword); - readEntries(lh, entries1); - lh.close(); + } + + @Override + public void addCompleteWithLatency(int rc, LedgerHandle lh, long entryId, long qwcLatency, + Object ctx) { + SyncObj x = (SyncObj) ctx; + captureThrowable(() -> { + assertTrue(rc != OK || qwcLatency > 0, "Successful write should have non-zero latency"); + }); + synchronized (x) { + x.rc = rc; + x.counter++; + x.notify(); } + } - private void waitForEntriesAddition(SyncObj syncObj, int numEntriesToWrite) throws InterruptedException { - synchronized (syncObj) { - while (syncObj.counter < numEntriesToWrite) { - syncObj.wait(); - } - assertEquals(BKException.Code.OK, syncObj.rc); - } - } + private static class SyncObj { - private void readEntries(LedgerHandle lh, ArrayList entries) throws InterruptedException, BKException { - ls = lh.readEntries(0, numEntriesToWrite - 1); - int index = 0; - while (ls.hasMoreElements()) { - ByteBuffer origbb = ByteBuffer.wrap(entries.get(index++)); - Integer origEntry = origbb.getInt(); - ByteBuffer result = ByteBuffer.wrap(ls.nextElement().getEntry()); - Integer retrEntry = result.getInt(); - if (LOG.isDebugEnabled()) { - LOG.debug("Length of result: " + result.capacity()); - LOG.debug("Original entry: " + origEntry); - LOG.debug("Retrieved entry: " + retrEntry); - } - assertTrue("Checking entry " + index + " for equality", origEntry - .equals(retrEntry)); - } - } + volatile int counter; + volatile int rc; - @Override - public void addCompleteWithLatency(int rc, LedgerHandle lh, long entryId, long qwcLatency, Object ctx) { - SyncObj x = (SyncObj) ctx; - captureThrowable(() -> { - assertTrue("Successful write should have non-zero latency", rc != OK || qwcLatency > 0); - }); - synchronized (x) { - x.rc = rc; - x.counter++; - x.notify(); - } + public SyncObj() { + counter = 0; } + } } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/ClientUtil.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/ClientUtil.java index 3f8af53c133..488ebe71c28 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/ClientUtil.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/ClientUtil.java @@ -35,53 +35,60 @@ * Client utilities. */ public class ClientUtil { - public static final org.apache.bookkeeper.client.api.DigestType DIGEST_TYPE = - org.apache.bookkeeper.client.api.DigestType.CRC32C; - public static final byte[] PASSWD = "foobar".getBytes(UTF_8); - public static ByteBuf generatePacket(long ledgerId, long entryId, long lastAddConfirmed, - long length, byte[] data) throws GeneralSecurityException { - return generatePacket(ledgerId, entryId, lastAddConfirmed, length, data, 0, data.length); - } + public static final org.apache.bookkeeper.client.api.DigestType DIGEST_TYPE = + org.apache.bookkeeper.client.api.DigestType.CRC32C; + public static final byte[] PASSWD = "foobar".getBytes(UTF_8); - public static ByteBuf generatePacket(long ledgerId, long entryId, long lastAddConfirmed, long length, byte[] data, - int offset, int len) throws GeneralSecurityException { - DigestManager dm = DigestManager.instantiate(ledgerId, new byte[2], DigestType.CRC32, - UnpooledByteBufAllocator.DEFAULT, true); - return MockBookieClient.copyDataWithSkipHeader(dm.computeDigestAndPackageForSending(entryId, lastAddConfirmed, + public static ByteBuf generatePacket(long ledgerId, long entryId, long lastAddConfirmed, + long length, byte[] data) + throws GeneralSecurityException { + return generatePacket(ledgerId, entryId, lastAddConfirmed, length, data, 0, data.length); + } + + public static ByteBuf generatePacket(long ledgerId, long entryId, long lastAddConfirmed, + long length, byte[] data, + int offset, int len) throws GeneralSecurityException { + DigestManager dm = + DigestManager + .instantiate(ledgerId, new byte[2], DigestType.CRC32, UnpooledByteBufAllocator.DEFAULT, + true); + return MockBookieClient + .copyDataWithSkipHeader(dm.computeDigestAndPackageForSending(entryId, lastAddConfirmed, length, Unpooled.wrappedBuffer(data, offset, len), new byte[20], 0)); - } + } - /** - * Returns that whether ledger is in open state. - */ - public static boolean isLedgerOpen(LedgerHandle handle) { - return !handle.getLedgerMetadata().isClosed(); - } + /** + * Returns that whether ledger is in open state. + */ + public static boolean isLedgerOpen(LedgerHandle handle) { + return !handle.getLedgerMetadata().isClosed(); + } - public static Versioned setupLedger(ClientContext clientCtx, long ledgerId, - LedgerMetadataBuilder builder) throws Exception { - return setupLedger(clientCtx.getLedgerManager(), ledgerId, builder); - } + public static Versioned setupLedger(ClientContext clientCtx, long ledgerId, + LedgerMetadataBuilder builder) throws Exception { + return setupLedger(clientCtx.getLedgerManager(), ledgerId, builder); + } - public static Versioned setupLedger(LedgerManager ledgerManager, long ledgerId, - LedgerMetadataBuilder builder) throws Exception { - LedgerMetadata md = builder.withPassword(PASSWD).withDigestType(DIGEST_TYPE).withId(ledgerId).build(); - return ledgerManager.createLedgerMetadata(ledgerId, md).get(); - } + public static Versioned setupLedger(LedgerManager ledgerManager, long ledgerId, + LedgerMetadataBuilder builder) throws Exception { + LedgerMetadata md = builder.withPassword(PASSWD).withDigestType(DIGEST_TYPE).withId(ledgerId) + .build(); + return ledgerManager.createLedgerMetadata(ledgerId, md).get(); + } - public static Versioned transformMetadata(ClientContext clientCtx, long ledgerId, - Function transform) - throws Exception { - return transformMetadata(clientCtx.getLedgerManager(), ledgerId, transform); - } + public static Versioned transformMetadata(ClientContext clientCtx, long ledgerId, + Function transform) throws Exception { + return transformMetadata(clientCtx.getLedgerManager(), ledgerId, transform); + } - public static Versioned transformMetadata(LedgerManager ledgerManager, long ledgerId, - Function transform) - throws Exception { - Versioned current = ledgerManager.readLedgerMetadata(ledgerId).get(); - return ledgerManager.writeLedgerMetadata(ledgerId, transform.apply(current.getValue()), - current.getVersion()).get(); - } + public static Versioned transformMetadata(LedgerManager ledgerManager, + long ledgerId, + Function transform) throws Exception { + Versioned current = ledgerManager.readLedgerMetadata(ledgerId).get(); + return ledgerManager + .writeLedgerMetadata(ledgerId, transform.apply(current.getValue()), current.getVersion()) + .get(); + } } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/ConcurrentV2RecoveryTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/ConcurrentV2RecoveryTest.java index 2a8a57735f8..c628c273050 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/ConcurrentV2RecoveryTest.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/ConcurrentV2RecoveryTest.java @@ -20,6 +20,8 @@ */ package org.apache.bookkeeper.client; +import static org.junit.jupiter.api.Assertions.assertEquals; + import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; @@ -33,78 +35,73 @@ import org.apache.bookkeeper.conf.ClientConfiguration; import org.apache.bookkeeper.meta.HierarchicalLedgerManagerFactory; import org.apache.bookkeeper.test.BookKeeperClusterTestCase; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Tests concurrent attempts to open and recovery a ledger with V2 protocol. */ -public class ConcurrentV2RecoveryTest extends BookKeeperClusterTestCase { - private static final Logger LOG = LoggerFactory.getLogger(ConcurrentV2RecoveryTest.class); - private final DigestType digestType; +public class ConcurrentV2RecoveryTest extends BookKeeperClusterTestCase { - public ConcurrentV2RecoveryTest() { - super(4); - this.digestType = DigestType.CRC32; - } + private static final Logger LOG = LoggerFactory.getLogger(ConcurrentV2RecoveryTest.class); + private final DigestType digestType; - @Test - public void testConcurrentOpen() throws Exception { - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()) - .setNumChannelsPerBookie(16) - .setUseV2WireProtocol(true) - .setZkTimeout(20000) - .setAddEntryTimeout(30) - .setReadEntryTimeout(30) - .setSpeculativeReadTimeout(0) - .setThrottleValue(0) - .setLedgerManagerFactoryClassName(HierarchicalLedgerManagerFactory.class.getName()); + public ConcurrentV2RecoveryTest() { + super(4); + this.digestType = DigestType.CRC32; + } - BookKeeper bkc = new BookKeeper(conf); + @Test + public void testConcurrentOpen() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()).setNumChannelsPerBookie(16) + .setUseV2WireProtocol(true).setZkTimeout(20000).setAddEntryTimeout(30) + .setReadEntryTimeout(30) + .setSpeculativeReadTimeout(0).setThrottleValue(0) + .setLedgerManagerFactoryClassName(HierarchicalLedgerManagerFactory.class.getName()); - for (int j = 0; j < 10; j++) { - LedgerHandle lh = bkc.createLedger(DigestType.CRC32, "testPasswd".getBytes()); - lh.addEntry("foobar".getBytes()); + BookKeeper bkc = new BookKeeper(conf); - long ledgerId = lh.getId(); - final long finalLedgerId = ledgerId; - ExecutorService executor = Executors.newFixedThreadPool(10); - List> futures = new ArrayList<>(); - CountDownLatch latch = new CountDownLatch(1); - for (int i = 0; i < 5; i++) { - final CompletableFuture future = new CompletableFuture<>(); - executor.submit(() -> { - latch.await(); + for (int j = 0; j < 10; j++) { + LedgerHandle lh = bkc.createLedger(DigestType.CRC32, "testPasswd".getBytes()); + lh.addEntry("foobar".getBytes()); - bkc.asyncOpenLedger(finalLedgerId, - DigestType.CRC32, "testPasswd".getBytes(), - (rc, handle, ctx) -> { - if (rc != BKException.Code.OK) { - future.completeExceptionally(BKException.create(rc)); - } else { - future.complete(handle); - } - }, null); - return future; - }); - futures.add(future); - } + long ledgerId = lh.getId(); + final long finalLedgerId = ledgerId; + ExecutorService executor = Executors.newFixedThreadPool(10); + List> futures = new ArrayList<>(); + CountDownLatch latch = new CountDownLatch(1); + for (int i = 0; i < 5; i++) { + final CompletableFuture future = new CompletableFuture<>(); + executor.submit(() -> { + latch.await(); - latch.countDown(); - for (Future f : futures) { - try { - f.get(10, TimeUnit.SECONDS); - } catch (ExecutionException ee) { - // also fine, recovery can currently fail because of metadata conflicts. - // We should fix this at some point by making the metadata immutable, - // and restarting the entire operation - Assert.assertEquals(ee.getCause().getClass(), BKException.BKLedgerRecoveryException.class); + bkc.asyncOpenLedger(finalLedgerId, DigestType.CRC32, "testPasswd".getBytes(), + (rc, handle, ctx) -> { + if (rc != BKException.Code.OK) { + future.completeExceptionally(BKException.create(rc)); + } else { + future.complete(handle); } - } + }, null); + return future; + }); + futures.add(future); + } + + latch.countDown(); + for (Future f : futures) { + try { + f.get(10, TimeUnit.SECONDS); + } catch (ExecutionException ee) { + // also fine, recovery can currently fail because of metadata conflicts. + // We should fix this at some point by making the metadata immutable, + // and restarting the entire operation + assertEquals(BKException.BKLedgerRecoveryException.class, ee.getCause().getClass()); } - bkc.close(); + } } + bkc.close(); + } } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/DeferredSyncTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/DeferredSyncTest.java index a49b5775945..7c847e52862 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/DeferredSyncTest.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/DeferredSyncTest.java @@ -18,10 +18,11 @@ package org.apache.bookkeeper.client; import static org.apache.bookkeeper.common.concurrent.FutureUtils.result; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; @@ -30,205 +31,178 @@ import org.apache.bookkeeper.client.api.WriteFlag; import org.apache.bookkeeper.client.api.WriteHandle; import org.apache.bookkeeper.net.BookieId; -import org.junit.Test; +import org.junit.jupiter.api.Test; /** * Client side tests on deferred sync write flag. */ -public class DeferredSyncTest extends MockBookKeeperTestCase { - - static final byte[] PASSWORD = "password".getBytes(); - static final ByteBuf DATA = Unpooled.wrappedBuffer("foobar".getBytes()); - static final int NUM_ENTRIES = 100; - - @Test - public void testAddEntryLastAddConfirmedDoesNotAdvance() throws Exception { - try (WriteHandle wh = result(newCreateLedgerOp() - .withEnsembleSize(3) - .withWriteQuorumSize(3) - .withAckQuorumSize(2) - .withPassword(PASSWORD) - .withWriteFlags(WriteFlag.DEFERRED_SYNC) - .execute())) { - for (int i = 0; i < NUM_ENTRIES - 1; i++) { - result(wh.appendAsync(DATA.retainedDuplicate())); - } - long lastEntryID = result(wh.appendAsync(DATA.retainedDuplicate())); - assertEquals(NUM_ENTRIES - 1, lastEntryID); - assertEquals(NUM_ENTRIES - 1, wh.getLastAddPushed()); - assertEquals(-1, wh.getLastAddConfirmed()); - } +class DeferredSyncTest extends MockBookKeeperTestCase { + + static final byte[] PASSWORD = "password".getBytes(); + static final ByteBuf DATA = Unpooled.wrappedBuffer("foobar".getBytes()); + static final int NUM_ENTRIES = 100; + + @Test + void addEntryLastAddConfirmedDoesNotAdvance() throws Exception { + try (WriteHandle wh = result( + newCreateLedgerOp().withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(2) + .withPassword(PASSWORD).withWriteFlags(WriteFlag.DEFERRED_SYNC).execute())) { + for (int i = 0; i < NUM_ENTRIES - 1; i++) { + result(wh.appendAsync(DATA.retainedDuplicate())); + } + long lastEntryID = result(wh.appendAsync(DATA.retainedDuplicate())); + assertEquals(NUM_ENTRIES - 1, lastEntryID); + assertEquals(NUM_ENTRIES - 1, wh.getLastAddPushed()); + assertEquals(-1, wh.getLastAddConfirmed()); } - - @Test - public void testAddEntryLastAddConfirmedAdvanceWithForce() throws Exception { - try (WriteHandle wh = result(newCreateLedgerOp() - .withEnsembleSize(3) - .withWriteQuorumSize(3) - .withAckQuorumSize(2) - .withPassword(PASSWORD) - .withWriteFlags(WriteFlag.DEFERRED_SYNC) - .execute())) { - for (int i = 0; i < NUM_ENTRIES - 1; i++) { - result(wh.appendAsync(DATA.retainedDuplicate())); - } - long lastEntryID = result(wh.appendAsync(DATA.retainedDuplicate())); - assertEquals(NUM_ENTRIES - 1, lastEntryID); - assertEquals(NUM_ENTRIES - 1, wh.getLastAddPushed()); - assertEquals(-1, wh.getLastAddConfirmed()); - result(wh.force()); - assertEquals(NUM_ENTRIES - 1, wh.getLastAddConfirmed()); - } + } + + @Test + void addEntryLastAddConfirmedAdvanceWithForce() throws Exception { + try (WriteHandle wh = result( + newCreateLedgerOp().withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(2) + .withPassword(PASSWORD).withWriteFlags(WriteFlag.DEFERRED_SYNC).execute())) { + for (int i = 0; i < NUM_ENTRIES - 1; i++) { + result(wh.appendAsync(DATA.retainedDuplicate())); + } + long lastEntryID = result(wh.appendAsync(DATA.retainedDuplicate())); + assertEquals(NUM_ENTRIES - 1, lastEntryID); + assertEquals(NUM_ENTRIES - 1, wh.getLastAddPushed()); + assertEquals(-1, wh.getLastAddConfirmed()); + result(wh.force()); + assertEquals(NUM_ENTRIES - 1, wh.getLastAddConfirmed()); } - - @Test - public void testForceOnWriteAdvHandle() throws Exception { - try (WriteAdvHandle wh = result(newCreateLedgerOp() - .withEnsembleSize(3) - .withWriteQuorumSize(3) - .withAckQuorumSize(2) - .withPassword(PASSWORD) - .withWriteFlags(WriteFlag.DEFERRED_SYNC) - .makeAdv() - .execute())) { - CompletableFuture w0 = wh.writeAsync(0, DATA.retainedDuplicate()); - CompletableFuture w2 = wh.writeAsync(2, DATA.retainedDuplicate()); - CompletableFuture w3 = wh.writeAsync(3, DATA.retainedDuplicate()); - result(w0); - result(wh.force()); - assertEquals(0, wh.getLastAddConfirmed()); - CompletableFuture w1 = wh.writeAsync(1, DATA.retainedDuplicate()); - result(w3); - assertTrue(w1.isDone()); - assertTrue(w2.isDone()); - CompletableFuture w5 = wh.writeAsync(5, DATA.retainedDuplicate()); - result(wh.force()); - assertEquals(3, wh.getLastAddConfirmed()); - wh.writeAsync(4, DATA.retainedDuplicate()); - result(w5); - result(wh.force()); - assertEquals(5, wh.getLastAddConfirmed()); - } + } + + @Test + void forceOnWriteAdvHandle() throws Exception { + try (WriteAdvHandle wh = result(newCreateLedgerOp().withEnsembleSize(3).withWriteQuorumSize(3) + .withAckQuorumSize(2).withPassword(PASSWORD).withWriteFlags(WriteFlag.DEFERRED_SYNC) + .makeAdv().execute())) { + CompletableFuture w0 = wh.writeAsync(0, DATA.retainedDuplicate()); + CompletableFuture w2 = wh.writeAsync(2, DATA.retainedDuplicate()); + CompletableFuture w3 = wh.writeAsync(3, DATA.retainedDuplicate()); + result(w0); + result(wh.force()); + assertEquals(0, wh.getLastAddConfirmed()); + CompletableFuture w1 = wh.writeAsync(1, DATA.retainedDuplicate()); + result(w3); + assertTrue(w1.isDone()); + assertTrue(w2.isDone()); + CompletableFuture w5 = wh.writeAsync(5, DATA.retainedDuplicate()); + result(wh.force()); + assertEquals(3, wh.getLastAddConfirmed()); + wh.writeAsync(4, DATA.retainedDuplicate()); + result(w5); + result(wh.force()); + assertEquals(5, wh.getLastAddConfirmed()); } - - @Test - public void testForceRequiresFullEnsemble() throws Exception { - try (WriteHandle wh = result(newCreateLedgerOp() - .withEnsembleSize(3) - .withWriteQuorumSize(2) - .withAckQuorumSize(2) - .withPassword(PASSWORD) - .withWriteFlags(WriteFlag.DEFERRED_SYNC) - .execute())) { - for (int i = 0; i < NUM_ENTRIES - 1; i++) { - result(wh.appendAsync(DATA.retainedDuplicate())); - } - long lastEntryID = result(wh.appendAsync(DATA.retainedDuplicate())); - assertEquals(NUM_ENTRIES - 1, lastEntryID); - assertEquals(NUM_ENTRIES - 1, wh.getLastAddPushed()); - assertEquals(-1, wh.getLastAddConfirmed()); - - BookieId bookieAddress = wh.getLedgerMetadata().getEnsembleAt(wh.getLastAddPushed()).get(0); - killBookie(bookieAddress); - - // write should succeed (we still have 2 bookies out of 3) - result(wh.appendAsync(DATA.retainedDuplicate())); - - // force cannot go, it must be acknowledged by all of the bookies in the ensamble - try { - result(wh.force()); - } catch (BKException.BKBookieException failed) { - } - // bookie comes up again, force must succeed - startKilledBookie(bookieAddress); - result(wh.force()); - } - } - - @Test - public void testForceWillAdvanceLacOnlyUpToLastAcknowledgedWrite() throws Exception { - try (WriteHandle wh = result(newCreateLedgerOp() - .withEnsembleSize(3) - .withWriteQuorumSize(3) - .withAckQuorumSize(3) - .withPassword(PASSWORD) - .withWriteFlags(WriteFlag.DEFERRED_SYNC) - .execute())) { - for (int i = 0; i < NUM_ENTRIES - 1; i++) { - result(wh.appendAsync(DATA.retainedDuplicate())); - } - long lastEntryIdBeforeSuspend = result(wh.appendAsync(DATA.retainedDuplicate())); - assertEquals(NUM_ENTRIES - 1, lastEntryIdBeforeSuspend); - assertEquals(-1, wh.getLastAddConfirmed()); - - // one bookie will stop sending acks for forceLedger - BookieId bookieAddress = wh.getLedgerMetadata().getEnsembleAt(wh.getLastAddPushed()).get(0); - suspendBookieForceLedgerAcks(bookieAddress); - - // start and complete a force, lastAddConfirmed cannot be "lastAddPushedAfterSuspendedWrite" - // because the write has not yet been acknowledged by AckQuorumSize Bookies - CompletableFuture forceResult = wh.force(); - assertEquals(-1, wh.getLastAddConfirmed()); - - // send an entry and receive ack - long lastEntry = wh.append(DATA.retainedDuplicate()); - - // receive the ack for forceLedger - resumeBookieWriteAcks(bookieAddress); - result(forceResult); - - // now LastAddConfirmed will be equals to the last confirmed entry - // before force() started - assertEquals(lastEntryIdBeforeSuspend, wh.getLastAddConfirmed()); - - result(wh.force()); - assertEquals(lastEntry, wh.getLastAddConfirmed()); - } + } + + @Test + void forceRequiresFullEnsemble() throws Exception { + try (WriteHandle wh = result( + newCreateLedgerOp().withEnsembleSize(3).withWriteQuorumSize(2).withAckQuorumSize(2) + .withPassword(PASSWORD).withWriteFlags(WriteFlag.DEFERRED_SYNC).execute())) { + for (int i = 0; i < NUM_ENTRIES - 1; i++) { + result(wh.appendAsync(DATA.retainedDuplicate())); + } + long lastEntryID = result(wh.appendAsync(DATA.retainedDuplicate())); + assertEquals(NUM_ENTRIES - 1, lastEntryID); + assertEquals(NUM_ENTRIES - 1, wh.getLastAddPushed()); + assertEquals(-1, wh.getLastAddConfirmed()); + + BookieId bookieAddress = wh.getLedgerMetadata().getEnsembleAt(wh.getLastAddPushed()).get(0); + killBookie(bookieAddress); + + // write should succeed (we still have 2 bookies out of 3) + result(wh.appendAsync(DATA.retainedDuplicate())); + + // force cannot go, it must be acknowledged by all of the bookies in the ensamble + try { + result(wh.force()); + } catch (BKException.BKBookieException failed) { + } + // bookie comes up again, force must succeed + startKilledBookie(bookieAddress); + result(wh.force()); } - - @Test - public void testForbiddenEnsembleChange() throws Exception { - try (WriteHandle wh = result(newCreateLedgerOp() - .withEnsembleSize(1) - .withWriteQuorumSize(1) - .withAckQuorumSize(1) - .withPassword(PASSWORD) - .withWriteFlags(WriteFlag.DEFERRED_SYNC) - .execute())) { - for (int i = 0; i < NUM_ENTRIES - 1; i++) { - wh.append(DATA.retainedDuplicate()); - } - - assertEquals(1, availableBookies.size()); - // kill the only bookie in the ensamble - killBookie(wh.getLedgerMetadata().getEnsembleAt(wh.getLastAddPushed()).get(0)); - assertEquals(0, availableBookies.size()); - startNewBookie(); - assertEquals(1, availableBookies.size()); - - try { - // we cannot switch to the new bookie with DEFERRED_SYNC - wh.append(DATA.retainedDuplicate()); - fail("since ensemble change is disable we cannot be able to write any more"); - } catch (BKException.BKWriteException ex) { - // expected - } - LedgerHandle lh = (LedgerHandle) wh; - assertFalse(lh.hasDelayedWriteFailedBookies()); - } + } + + @Test + void forceWillAdvanceLacOnlyUpToLastAcknowledgedWrite() throws Exception { + try (WriteHandle wh = result( + newCreateLedgerOp().withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(3) + .withPassword(PASSWORD).withWriteFlags(WriteFlag.DEFERRED_SYNC).execute())) { + for (int i = 0; i < NUM_ENTRIES - 1; i++) { + result(wh.appendAsync(DATA.retainedDuplicate())); + } + long lastEntryIdBeforeSuspend = result(wh.appendAsync(DATA.retainedDuplicate())); + assertEquals(NUM_ENTRIES - 1, lastEntryIdBeforeSuspend); + assertEquals(-1, wh.getLastAddConfirmed()); + + // one bookie will stop sending acks for forceLedger + BookieId bookieAddress = wh.getLedgerMetadata().getEnsembleAt(wh.getLastAddPushed()).get(0); + suspendBookieForceLedgerAcks(bookieAddress); + + // start and complete a force, lastAddConfirmed cannot be "lastAddPushedAfterSuspendedWrite" + // because the write has not yet been acknowledged by AckQuorumSize Bookies + CompletableFuture forceResult = wh.force(); + assertEquals(-1, wh.getLastAddConfirmed()); + + // send an entry and receive ack + long lastEntry = wh.append(DATA.retainedDuplicate()); + + // receive the ack for forceLedger + resumeBookieWriteAcks(bookieAddress); + result(forceResult); + + // now LastAddConfirmed will be equals to the last confirmed entry + // before force() started + assertEquals(lastEntryIdBeforeSuspend, wh.getLastAddConfirmed()); + + result(wh.force()); + assertEquals(lastEntry, wh.getLastAddConfirmed()); } - - @Test(expected = BKException.BKLedgerClosedException.class) - public void testCannotIssueForceOnClosedLedgerHandle() throws Exception { - WriteHandle wh = result(newCreateLedgerOp() - .withEnsembleSize(1) - .withWriteQuorumSize(1) - .withAckQuorumSize(1) - .withPassword(PASSWORD) - .withWriteFlags(WriteFlag.DEFERRED_SYNC) - .execute()); - wh.close(); - result(wh.force()); + } + + @Test + void forbiddenEnsembleChange() throws Exception { + try (WriteHandle wh = result( + newCreateLedgerOp().withEnsembleSize(1).withWriteQuorumSize(1).withAckQuorumSize(1) + .withPassword(PASSWORD).withWriteFlags(WriteFlag.DEFERRED_SYNC).execute())) { + for (int i = 0; i < NUM_ENTRIES - 1; i++) { + wh.append(DATA.retainedDuplicate()); + } + + assertEquals(1, availableBookies.size()); + // kill the only bookie in the ensamble + killBookie(wh.getLedgerMetadata().getEnsembleAt(wh.getLastAddPushed()).get(0)); + assertEquals(0, availableBookies.size()); + startNewBookie(); + assertEquals(1, availableBookies.size()); + + try { + // we cannot switch to the new bookie with DEFERRED_SYNC + wh.append(DATA.retainedDuplicate()); + fail("since ensemble change is disable we cannot be able to write any more"); + } catch (BKException.BKWriteException ex) { + // expected + } + LedgerHandle lh = (LedgerHandle) wh; + assertFalse(lh.hasDelayedWriteFailedBookies()); } + } + + @Test + void cannotIssueForceOnClosedLedgerHandle() throws Exception { + assertThrows(BKException.BKLedgerClosedException.class, () -> { + WriteHandle wh = result( + newCreateLedgerOp().withEnsembleSize(1).withWriteQuorumSize(1).withAckQuorumSize(1) + .withPassword(PASSWORD).withWriteFlags(WriteFlag.DEFERRED_SYNC).execute()); + wh.close(); + result(wh.force()); + }); + } } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/ExplicitLacTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/ExplicitLacTest.java index 42d1aebf6ac..a4fd62a2c00 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/ExplicitLacTest.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/ExplicitLacTest.java @@ -18,10 +18,9 @@ */ package org.apache.bookkeeper.client; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.util.Arrays; import java.util.Collection; @@ -35,320 +34,330 @@ import org.apache.bookkeeper.conf.ClientConfiguration; import org.apache.bookkeeper.test.BookKeeperClusterTestCase; import org.apache.bookkeeper.util.TestUtils; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; /** * Test cases for `Explicit Lac` feature. */ -@RunWith(Parameterized.class) public class ExplicitLacTest extends BookKeeperClusterTestCase { - private final DigestType digestType; - - public ExplicitLacTest(Class storageClass) { - super(1); - this.digestType = DigestType.CRC32; - baseConf.setLedgerStorageClass(storageClass.getName()); - /* - * to persist explicitLac, journalFormatVersionToWrite should be atleast - * V6 and fileInfoFormatVersionToWrite should be atleast V1 - */ - baseConf.setJournalFormatVersionToWrite(6); - baseConf.setFileInfoFormatVersionToWrite(1); + private DigestType digestType; + + public ExplicitLacTest() { + super(1); + } + + public static Collection configs() { + return Arrays.asList( + new Object[][]{{InterleavedLedgerStorage.class}, {SortedLedgerStorage.class}, + {DbLedgerStorage.class}, }); + } + + public void initExplicitLacTest(Class storageClass) { + this.digestType = DigestType.CRC32; + baseConf.setLedgerStorageClass(storageClass.getName()); + /* + * to persist explicitLac, journalFormatVersionToWrite should be atleast + * V6 and fileInfoFormatVersionToWrite should be atleast V1 + */ + baseConf.setJournalFormatVersionToWrite(6); + baseConf.setFileInfoFormatVersionToWrite(1); + } + + @MethodSource("configs") + @ParameterizedTest + public void readHandleWithNoExplicitLAC(Class storageClass) + throws Exception { + initExplicitLacTest(storageClass); + ClientConfiguration confWithNoExplicitLAC = new ClientConfiguration(); + confWithNoExplicitLAC.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + confWithNoExplicitLAC.setExplictLacInterval(0); + + BookKeeper bkcWithNoExplicitLAC = new BookKeeper(confWithNoExplicitLAC); + + LedgerHandle wlh = bkcWithNoExplicitLAC + .createLedger(1, 1, 1, digestType, "testPasswd".getBytes()); + long ledgerId = wlh.getId(); + int numOfEntries = 5; + for (int i = 0; i < numOfEntries; i++) { + wlh.addEntry(("foobar" + i).getBytes()); } - @Parameters - public static Collection configs() { - return Arrays.asList(new Object[][] { - { InterleavedLedgerStorage.class }, - { SortedLedgerStorage.class }, - { DbLedgerStorage.class }, - }); + LedgerHandle rlh = bkcWithNoExplicitLAC + .openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes()); + assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), + "Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh + .getLastAddConfirmed()); + + Enumeration entries = rlh.readEntries(0, numOfEntries - 2); + int entryId = 0; + while (entries.hasMoreElements()) { + LedgerEntry entry = entries.nextElement(); + String entryString = new String(entry.getEntry()); + assertEquals(entryString, "foobar" + entryId, + "Expected entry String: " + ("foobar" + entryId) + " actual entry String: " + + entryString); + entryId++; } - @Test - public void testReadHandleWithNoExplicitLAC() throws Exception { - ClientConfiguration confWithNoExplicitLAC = new ClientConfiguration(); - confWithNoExplicitLAC.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - confWithNoExplicitLAC.setExplictLacInterval(0); - - BookKeeper bkcWithNoExplicitLAC = new BookKeeper(confWithNoExplicitLAC); - - LedgerHandle wlh = bkcWithNoExplicitLAC.createLedger( - 1, 1, 1, - digestType, "testPasswd".getBytes()); - long ledgerId = wlh.getId(); - int numOfEntries = 5; - for (int i = 0; i < numOfEntries; i++) { - wlh.addEntry(("foobar" + i).getBytes()); - } - - LedgerHandle rlh = bkcWithNoExplicitLAC.openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes()); - assertTrue( - "Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(), - (rlh.getLastAddConfirmed() == (numOfEntries - 2))); - - Enumeration entries = rlh.readEntries(0, numOfEntries - 2); - int entryId = 0; - while (entries.hasMoreElements()) { - LedgerEntry entry = entries.nextElement(); - String entryString = new String(entry.getEntry()); - assertTrue("Expected entry String: " + ("foobar" + entryId) + " actual entry String: " + entryString, - entryString.equals("foobar" + entryId)); - entryId++; - } - - for (int i = numOfEntries; i < 2 * numOfEntries; i++) { - wlh.addEntry(("foobar" + i).getBytes()); - } - - TestUtils.waitUntilLacUpdated(rlh, numOfEntries - 2); - - assertTrue( - "Expected LAC of wlh: " + (2 * numOfEntries - 1) + " actual LAC of rlh: " + wlh.getLastAddConfirmed(), - (wlh.getLastAddConfirmed() == (2 * numOfEntries - 1))); - assertTrue( - "Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(), - (rlh.getLastAddConfirmed() == (numOfEntries - 2))); - - // since explicitlacflush policy is not enabled for writeledgerhandle, when we try - // to read explicitlac for rlh, it will be reading up to the piggyback value. - long explicitlac = rlh.readExplicitLastConfirmed(); - assertTrue( - "Expected Explicit LAC of rlh: " + (numOfEntries - 2) + " actual ExplicitLAC of rlh: " + explicitlac, - (explicitlac == (2 * numOfEntries - 2))); - - try { - rlh.readEntries(2 * numOfEntries - 1, 2 * numOfEntries - 1); - fail("rlh readEntries beyond " + (2 * numOfEntries - 2) + " should fail with ReadException"); - } catch (BKException.BKReadException readException) { - } - - rlh.close(); - wlh.close(); - bkcWithNoExplicitLAC.close(); + for (int i = numOfEntries; i < 2 * numOfEntries; i++) { + wlh.addEntry(("foobar" + i).getBytes()); } - @Test - public void testExplicitLACIsPersisted() throws Exception { - ClientConfiguration confWithNoExplicitLAC = new ClientConfiguration(); - confWithNoExplicitLAC.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - // enable explicitLacFlush by setting non-zero value for - // explictLacInterval - confWithNoExplicitLAC.setExplictLacInterval(50); - - BookKeeper bkcWithExplicitLAC = new BookKeeper(confWithNoExplicitLAC); - - LedgerHandle wlh = bkcWithExplicitLAC.createLedger(1, 1, 1, digestType, "testPasswd".getBytes()); - long ledgerId = wlh.getId(); - int numOfEntries = 5; - for (int i = 0; i < numOfEntries; i++) { - wlh.addEntry(("foobar" + i).getBytes()); - } - - LedgerHandle rlh = bkcWithExplicitLAC.openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes()); - assertEquals("LAC of rlh", (long) numOfEntries - 2, rlh.getLastAddConfirmed()); - - for (int i = numOfEntries; i < 2 * numOfEntries; i++) { - wlh.addEntry(("foobar" + i).getBytes()); - } - - assertEquals("LAC of wlh", (2 * numOfEntries - 1), wlh.getLastAddConfirmed()); - assertEquals("LAC of rlh", (long) numOfEntries - 2, rlh.getLastAddConfirmed()); - assertEquals("Read LAC of rlh", (2 * numOfEntries - 2), rlh.readLastAddConfirmed()); - assertEquals("Read explicit LAC of rlh", (2 * numOfEntries - 2), rlh.readExplicitLastConfirmed()); - - // we need to wait for atleast 2 explicitlacintervals, - // since in writehandle for the first call - // lh.getExplicitLastAddConfirmed() will be < - // lh.getPiggyBackedLastAddConfirmed(), - // so it wont make explicit writelac in the first run - long readExplicitLastConfirmed = TestUtils.waitUntilExplicitLacUpdated(rlh, 2 * numOfEntries - 1); - assertEquals("Read explicit LAC of rlh after wait for explicitlacflush", (2 * numOfEntries - 1), - readExplicitLastConfirmed); - - // bookies have to be restarted - restartBookies(); - - /* - * since explicitLac is persisted we should be able to read explicitLac - * from the bookies. - */ - LedgerHandle rlh2 = bkcWithExplicitLAC.openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes()); - assertEquals("Read explicit LAC of rlh2 after bookies restart", (2 * numOfEntries - 1), - rlh2.readExplicitLastConfirmed()); - bkcWithExplicitLAC.close(); + TestUtils.waitUntilLacUpdated(rlh, numOfEntries - 2); + + assertTrue((wlh.getLastAddConfirmed() == (2 * numOfEntries - 1)), + "Expected LAC of wlh: " + (2 * numOfEntries - 1) + " actual LAC of rlh: " + wlh + .getLastAddConfirmed()); + assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), + "Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh + .getLastAddConfirmed()); + + // since explicitlacflush policy is not enabled for writeledgerhandle, when we try + // to read explicitlac for rlh, it will be reading up to the piggyback value. + long explicitlac = rlh.readExplicitLastConfirmed(); + assertTrue((explicitlac == (2 * numOfEntries - 2)), + "Expected Explicit LAC of rlh: " + (numOfEntries - 2) + " actual ExplicitLAC of rlh: " + + explicitlac); + + try { + rlh.readEntries(2 * numOfEntries - 1, 2 * numOfEntries - 1); + fail("rlh readEntries beyond " + (2 * numOfEntries - 2) + " should fail with ReadException"); + } catch (BKException.BKReadException readException) { } - @Test - public void testReadHandleWithExplicitLAC() throws Exception { - ClientConfiguration confWithExplicitLAC = new ClientConfiguration(); - confWithExplicitLAC.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - int explicitLacIntervalMillis = 1000; - confWithExplicitLAC.setExplictLacInterval(explicitLacIntervalMillis); - - BookKeeper bkcWithExplicitLAC = new BookKeeper(confWithExplicitLAC); - - LedgerHandle wlh = bkcWithExplicitLAC.createLedger( - 1, 1, 1, - digestType, "testPasswd".getBytes()); - long ledgerId = wlh.getId(); - int numOfEntries = 5; - for (int i = 0; i < numOfEntries; i++) { - wlh.addEntry(("foobar" + i).getBytes()); - } - - LedgerHandle rlh = bkcWithExplicitLAC.openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes()); - - assertTrue( - "Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(), - (rlh.getLastAddConfirmed() == (numOfEntries - 2))); - - for (int i = numOfEntries; i < 2 * numOfEntries; i++) { - wlh.addEntry(("foobar" + i).getBytes()); - } - - // we need to wait for atleast 2 explicitlacintervals, - // since in writehandle for the first call - // lh.getExplicitLastAddConfirmed() will be < - // lh.getPiggyBackedLastAddConfirmed(), - // so it wont make explicit writelac in the first run - TestUtils.waitUntilLacUpdated(rlh, 2 * numOfEntries - 2); - - assertTrue( - "Expected LAC of wlh: " + (2 * numOfEntries - 1) + " actual LAC of wlh: " + wlh.getLastAddConfirmed(), - (wlh.getLastAddConfirmed() == (2 * numOfEntries - 1))); - // readhandle's lastaddconfirmed wont be updated until readExplicitLastConfirmed call is made - assertTrue( - "Expected LAC of rlh: " + (2 * numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(), - (rlh.getLastAddConfirmed() == (2 * numOfEntries - 2))); - - long explicitlac = TestUtils.waitUntilExplicitLacUpdated(rlh, 2 * numOfEntries - 1); - assertTrue("Expected Explicit LAC of rlh: " + (2 * numOfEntries - 1) - + " actual ExplicitLAC of rlh: " + explicitlac, - (explicitlac == (2 * numOfEntries - 1))); - // readExplicitLastConfirmed updates the lac of rlh. - assertTrue( - "Expected LAC of rlh: " + (2 * numOfEntries - 1) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(), - (rlh.getLastAddConfirmed() == (2 * numOfEntries - 1))); - - Enumeration entries = rlh.readEntries(numOfEntries, 2 * numOfEntries - 1); - int entryId = numOfEntries; - while (entries.hasMoreElements()) { - LedgerEntry entry = entries.nextElement(); - String entryString = new String(entry.getEntry()); - assertTrue("Expected entry String: " + ("foobar" + entryId) + " actual entry String: " + entryString, - entryString.equals("foobar" + entryId)); - entryId++; - } - - rlh.close(); - wlh.close(); - bkcWithExplicitLAC.close(); + rlh.close(); + wlh.close(); + bkcWithNoExplicitLAC.close(); + } + + @MethodSource("configs") + @ParameterizedTest + public void explicitLACIsPersisted(Class storageClass) throws Exception { + initExplicitLacTest(storageClass); + ClientConfiguration confWithNoExplicitLAC = new ClientConfiguration(); + confWithNoExplicitLAC.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + // enable explicitLacFlush by setting non-zero value for + // explictLacInterval + confWithNoExplicitLAC.setExplictLacInterval(50); + + BookKeeper bkcWithExplicitLAC = new BookKeeper(confWithNoExplicitLAC); + + LedgerHandle wlh = bkcWithExplicitLAC + .createLedger(1, 1, 1, digestType, "testPasswd".getBytes()); + long ledgerId = wlh.getId(); + int numOfEntries = 5; + for (int i = 0; i < numOfEntries; i++) { + wlh.addEntry(("foobar" + i).getBytes()); } - @Test - public void testReadHandleWithExplicitLACAndDeferredSync() throws Exception { - ClientConfiguration confWithExplicitLAC = new ClientConfiguration(); - confWithExplicitLAC.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - int explicitLacIntervalMillis = 1000; - confWithExplicitLAC.setExplictLacInterval(explicitLacIntervalMillis); - - BookKeeper bkcWithExplicitLAC = new BookKeeper(confWithExplicitLAC); - - LedgerHandle wlh = (LedgerHandle) bkcWithExplicitLAC.newCreateLedgerOp() - .withEnsembleSize(1) - .withWriteQuorumSize(1) - .withAckQuorumSize(1) - .withWriteFlags(WriteFlag.DEFERRED_SYNC) - .withDigestType(digestType.toApiDigestType()) - .withPassword("testPasswd".getBytes()) - .execute() - .get(); - long ledgerId = wlh.getId(); - - // start like testReadHandleWithExplicitLAC - int numOfEntries = 5; - for (int i = 0; i < numOfEntries; i++) { - // if you perform force() + addEntry() you will piggy back LAC as usual - wlh.force().get(); - wlh.addEntry(("foobar" + i).getBytes()); - } - - LedgerHandle rlh = bkcWithExplicitLAC.openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes()); - - assertTrue( - "Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(), - (rlh.getLastAddConfirmed() == (numOfEntries - 2))); - - for (int i = numOfEntries; i < 2 * numOfEntries; i++) { - wlh.addEntry(("foobar" + i).getBytes()); - } - - // running a force() will update local LAC on the writer - // ExplicitLAC timer will send the value even without writes - wlh.force().get(); - - // wait for explicit lac to be sent to bookies - TestUtils.waitUntilExplicitLacUpdated(rlh, 2 * numOfEntries - 2); - - // we need to wait for atleast 2 explicitlacintervals, - // since in writehandle for the first call - // lh.getExplicitLastAddConfirmed() will be < - // lh.getPiggyBackedLastAddConfirmed(), - // so it wont make explicit writelac in the first run - TestUtils.waitUntilLacUpdated(rlh, 2 * numOfEntries - 2); - - assertTrue( - "Expected LAC of wlh: " + (2 * numOfEntries - 1) + " actual LAC of wlh: " + wlh.getLastAddConfirmed(), - (wlh.getLastAddConfirmed() == (2 * numOfEntries - 1))); - - long explicitlac = TestUtils.waitUntilExplicitLacUpdated(rlh, 2 * numOfEntries - 1); - assertTrue("Expected Explicit LAC of rlh: " + (2 * numOfEntries - 1) - + " actual ExplicitLAC of rlh: " + explicitlac, - (explicitlac == (2 * numOfEntries - 1))); - // readExplicitLastConfirmed updates the lac of rlh. - assertTrue( - "Expected LAC of rlh: " + (2 * numOfEntries - 1) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(), - (rlh.getLastAddConfirmed() == (2 * numOfEntries - 1))); - - Enumeration entries = rlh.readEntries(numOfEntries, 2 * numOfEntries - 1); - int entryId = numOfEntries; - while (entries.hasMoreElements()) { - LedgerEntry entry = entries.nextElement(); - String entryString = new String(entry.getEntry()); - assertTrue("Expected entry String: " + ("foobar" + entryId) + " actual entry String: " + entryString, - entryString.equals("foobar" + entryId)); - entryId++; - } - - rlh.close(); - wlh.close(); - bkcWithExplicitLAC.close(); + LedgerHandle rlh = bkcWithExplicitLAC + .openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes()); + assertEquals((long) numOfEntries - 2, rlh.getLastAddConfirmed(), "LAC of rlh"); + + for (int i = numOfEntries; i < 2 * numOfEntries; i++) { + wlh.addEntry(("foobar" + i).getBytes()); } - @Test - public void fallbackV3() throws Exception { - ClientConfiguration v2Conf = new ClientConfiguration(); - v2Conf.setUseV2WireProtocol(true); - v2Conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - v2Conf.setExplictLacInterval(10); - - BookKeeper bookKeeper = new BookKeeper(v2Conf); - LedgerHandle write = (LedgerHandle) bookKeeper.createLedger(1, - 1, - 1, - DigestType.MAC, - "pass".getBytes()); - write.addEntry("test".getBytes()); - TestUtils.waitUntilExplicitLacUpdated(write, 0); - long lac = write.readExplicitLastConfirmed(); - assertEquals(0, lac); - write.close(); - bookKeeper.close(); + assertEquals((2 * numOfEntries - 1), wlh.getLastAddConfirmed(), "LAC of wlh"); + assertEquals((long) numOfEntries - 2, rlh.getLastAddConfirmed(), "LAC of rlh"); + assertEquals((2 * numOfEntries - 2), rlh.readLastAddConfirmed(), "Read LAC of rlh"); + assertEquals((2 * numOfEntries - 2), rlh.readExplicitLastConfirmed(), + "Read explicit LAC of rlh"); + + // we need to wait for atleast 2 explicitlacintervals, + // since in writehandle for the first call + // lh.getExplicitLastAddConfirmed() will be < + // lh.getPiggyBackedLastAddConfirmed(), + // so it wont make explicit writelac in the first run + long readExplicitLastConfirmed = TestUtils + .waitUntilExplicitLacUpdated(rlh, 2 * numOfEntries - 1); + assertEquals((2 * numOfEntries - 1), readExplicitLastConfirmed, + "Read explicit LAC of rlh after wait for explicitlacflush"); + + // bookies have to be restarted + restartBookies(); + + /* + * since explicitLac is persisted we should be able to read explicitLac + * from the bookies. + */ + LedgerHandle rlh2 = bkcWithExplicitLAC + .openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes()); + assertEquals((2 * numOfEntries - 1), rlh2.readExplicitLastConfirmed(), + "Read explicit LAC of rlh2 after bookies restart"); + bkcWithExplicitLAC.close(); + } + + @MethodSource("configs") + @ParameterizedTest + public void readHandleWithExplicitLAC(Class storageClass) + throws Exception { + initExplicitLacTest(storageClass); + ClientConfiguration confWithExplicitLAC = new ClientConfiguration(); + confWithExplicitLAC.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + int explicitLacIntervalMillis = 1000; + confWithExplicitLAC.setExplictLacInterval(explicitLacIntervalMillis); + + BookKeeper bkcWithExplicitLAC = new BookKeeper(confWithExplicitLAC); + + LedgerHandle wlh = bkcWithExplicitLAC + .createLedger(1, 1, 1, digestType, "testPasswd".getBytes()); + long ledgerId = wlh.getId(); + int numOfEntries = 5; + for (int i = 0; i < numOfEntries; i++) { + wlh.addEntry(("foobar" + i).getBytes()); } + LedgerHandle rlh = bkcWithExplicitLAC + .openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes()); + + assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), + "Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh + .getLastAddConfirmed()); + + for (int i = numOfEntries; i < 2 * numOfEntries; i++) { + wlh.addEntry(("foobar" + i).getBytes()); + } + + // we need to wait for atleast 2 explicitlacintervals, + // since in writehandle for the first call + // lh.getExplicitLastAddConfirmed() will be < + // lh.getPiggyBackedLastAddConfirmed(), + // so it wont make explicit writelac in the first run + TestUtils.waitUntilLacUpdated(rlh, 2 * numOfEntries - 2); + + assertTrue((wlh.getLastAddConfirmed() == (2 * numOfEntries - 1)), + "Expected LAC of wlh: " + (2 * numOfEntries - 1) + " actual LAC of wlh: " + wlh + .getLastAddConfirmed()); + // readhandle's lastaddconfirmed wont be updated until readExplicitLastConfirmed call is made + assertTrue((rlh.getLastAddConfirmed() == (2 * numOfEntries - 2)), + "Expected LAC of rlh: " + (2 * numOfEntries - 2) + " actual LAC of rlh: " + rlh + .getLastAddConfirmed()); + + long explicitlac = TestUtils.waitUntilExplicitLacUpdated(rlh, 2 * numOfEntries - 1); + assertTrue((explicitlac == (2 * numOfEntries - 1)), + "Expected Explicit LAC of rlh: " + (2 * numOfEntries - 1) + " actual ExplicitLAC of rlh: " + + explicitlac); + // readExplicitLastConfirmed updates the lac of rlh. + assertTrue((rlh.getLastAddConfirmed() == (2 * numOfEntries - 1)), + "Expected LAC of rlh: " + (2 * numOfEntries - 1) + " actual LAC of rlh: " + rlh + .getLastAddConfirmed()); + + Enumeration entries = rlh.readEntries(numOfEntries, 2 * numOfEntries - 1); + int entryId = numOfEntries; + while (entries.hasMoreElements()) { + LedgerEntry entry = entries.nextElement(); + String entryString = new String(entry.getEntry()); + assertEquals(entryString, "foobar" + entryId, + "Expected entry String: " + ("foobar" + entryId) + " actual entry String: " + + entryString); + entryId++; + } + + rlh.close(); + wlh.close(); + bkcWithExplicitLAC.close(); + } + + @MethodSource("configs") + @ParameterizedTest + public void readHandleWithExplicitLACAndDeferredSync(Class storageClass) + throws Exception { + initExplicitLacTest(storageClass); + ClientConfiguration confWithExplicitLAC = new ClientConfiguration(); + confWithExplicitLAC.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + int explicitLacIntervalMillis = 1000; + confWithExplicitLAC.setExplictLacInterval(explicitLacIntervalMillis); + + BookKeeper bkcWithExplicitLAC = new BookKeeper(confWithExplicitLAC); + + LedgerHandle wlh = (LedgerHandle) bkcWithExplicitLAC.newCreateLedgerOp().withEnsembleSize(1) + .withWriteQuorumSize(1).withAckQuorumSize(1).withWriteFlags(WriteFlag.DEFERRED_SYNC) + .withDigestType(digestType.toApiDigestType()).withPassword("testPasswd".getBytes()) + .execute().get(); + long ledgerId = wlh.getId(); + + // start like testReadHandleWithExplicitLAC + int numOfEntries = 5; + for (int i = 0; i < numOfEntries; i++) { + // if you perform force() + addEntry() you will piggy back LAC as usual + wlh.force().get(); + wlh.addEntry(("foobar" + i).getBytes()); + } + + LedgerHandle rlh = bkcWithExplicitLAC + .openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes()); + + assertTrue((rlh.getLastAddConfirmed() == (numOfEntries - 2)), + "Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh + .getLastAddConfirmed()); + + for (int i = numOfEntries; i < 2 * numOfEntries; i++) { + wlh.addEntry(("foobar" + i).getBytes()); + } + + // running a force() will update local LAC on the writer + // ExplicitLAC timer will send the value even without writes + wlh.force().get(); + + // wait for explicit lac to be sent to bookies + TestUtils.waitUntilExplicitLacUpdated(rlh, 2 * numOfEntries - 2); + + // we need to wait for atleast 2 explicitlacintervals, + // since in writehandle for the first call + // lh.getExplicitLastAddConfirmed() will be < + // lh.getPiggyBackedLastAddConfirmed(), + // so it wont make explicit writelac in the first run + TestUtils.waitUntilLacUpdated(rlh, 2 * numOfEntries - 2); + + assertTrue((wlh.getLastAddConfirmed() == (2 * numOfEntries - 1)), + "Expected LAC of wlh: " + (2 * numOfEntries - 1) + " actual LAC of wlh: " + wlh + .getLastAddConfirmed()); + + long explicitlac = TestUtils.waitUntilExplicitLacUpdated(rlh, 2 * numOfEntries - 1); + assertTrue((explicitlac == (2 * numOfEntries - 1)), + "Expected Explicit LAC of rlh: " + (2 * numOfEntries - 1) + " actual ExplicitLAC of rlh: " + + explicitlac); + // readExplicitLastConfirmed updates the lac of rlh. + assertTrue((rlh.getLastAddConfirmed() == (2 * numOfEntries - 1)), + "Expected LAC of rlh: " + (2 * numOfEntries - 1) + " actual LAC of rlh: " + rlh + .getLastAddConfirmed()); + + Enumeration entries = rlh.readEntries(numOfEntries, 2 * numOfEntries - 1); + int entryId = numOfEntries; + while (entries.hasMoreElements()) { + LedgerEntry entry = entries.nextElement(); + String entryString = new String(entry.getEntry()); + assertEquals(entryString, "foobar" + entryId, + "Expected entry String: " + ("foobar" + entryId) + " actual entry String: " + + entryString); + entryId++; + } + + rlh.close(); + wlh.close(); + bkcWithExplicitLAC.close(); + } + + @MethodSource("configs") + @ParameterizedTest + public void fallbackV3(Class storageClass) throws Exception { + initExplicitLacTest(storageClass); + ClientConfiguration v2Conf = new ClientConfiguration(); + v2Conf.setUseV2WireProtocol(true); + v2Conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + v2Conf.setExplictLacInterval(10); + + BookKeeper bookKeeper = new BookKeeper(v2Conf); + LedgerHandle write = bookKeeper.createLedger(1, 1, 1, DigestType.MAC, "pass".getBytes()); + write.addEntry("test".getBytes()); + TestUtils.waitUntilExplicitLacUpdated(write, 0); + long lac = write.readExplicitLastConfirmed(); + assertEquals(0, lac); + write.close(); + bookKeeper.close(); + } + } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/GenericEnsemblePlacementPolicyTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/GenericEnsemblePlacementPolicyTest.java index 9a30b5930d0..6891c14eb90 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/GenericEnsemblePlacementPolicyTest.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/GenericEnsemblePlacementPolicyTest.java @@ -17,10 +17,10 @@ */ package org.apache.bookkeeper.client; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.fail; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -32,134 +32,150 @@ import java.util.Set; import org.apache.bookkeeper.net.BookieId; import org.apache.bookkeeper.test.BookKeeperClusterTestCase; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; /** * Testing a generic ensemble placement policy. */ -@RunWith(Parameterized.class) public class GenericEnsemblePlacementPolicyTest extends BookKeeperClusterTestCase { - private BookKeeper.DigestType digestType = BookKeeper.DigestType.CRC32; - private static final String PASSWORD = "testPasswd"; - private static final String property = "foo"; - private static final byte[] value = "bar".getBytes(StandardCharsets.UTF_8); - private static List> customMetadataOnNewEnsembleStack = new ArrayList<>(); - private static List> customMetadataOnReplaceBookieStack = new ArrayList<>(); - - @Parameters - public static Collection getDiskWeightBasedPlacementEnabled() { - return Arrays.asList(new Object[][] { { false }, { true } }); + private static final String PASSWORD = "testPasswd"; + + private static final String property = "foo"; + + private static final byte[] value = "bar".getBytes(StandardCharsets.UTF_8); + + private static final List> customMetadataOnNewEnsembleStack = new ArrayList<>(); + + private static final List> customMetadataOnReplaceBookieStack = new ArrayList<>(); + + private final BookKeeper.DigestType digestType = BookKeeper.DigestType.CRC32; + + public GenericEnsemblePlacementPolicyTest() { + super(0); + } + + public static Collection getDiskWeightBasedPlacementEnabled() { + return Arrays.asList(new Object[][]{{false}, {true}}); + } + + public void initGenericEnsemblePlacementPolicyTest(boolean diskWeightBasedPlacementEnabled) { + baseClientConf.setEnsemblePlacementPolicy(CustomEnsemblePlacementPolicy.class); + baseClientConf.setDiskWeightBasedPlacementEnabled(diskWeightBasedPlacementEnabled); + } + + @BeforeEach + void reset() { + customMetadataOnNewEnsembleStack.clear(); + customMetadataOnReplaceBookieStack.clear(); + } + + @MethodSource("getDiskWeightBasedPlacementEnabled") + @ParameterizedTest + public void newEnsemble(boolean diskWeightBasedPlacementEnabled) throws Exception { + initGenericEnsemblePlacementPolicyTest(diskWeightBasedPlacementEnabled); + numBookies = 1; + startBKCluster(zkUtil.getMetadataServiceUri()); + try { + Map customMetadata = new HashMap<>(); + customMetadata.put(property, value); + try (BookKeeper bk = new BookKeeper(baseClientConf, zkc)) { + bk.createLedger(1, 1, 1, digestType, PASSWORD.getBytes(), customMetadata); + } + assertEquals(1, customMetadataOnNewEnsembleStack.size()); + assertArrayEquals(value, customMetadataOnNewEnsembleStack.get(0).get(property)); + } finally { + stopBKCluster(); } - - public GenericEnsemblePlacementPolicyTest(boolean diskWeightBasedPlacementEnabled) { - super(0); - baseClientConf.setEnsemblePlacementPolicy(CustomEnsemblePlacementPolicy.class); - baseClientConf.setDiskWeightBasedPlacementEnabled(diskWeightBasedPlacementEnabled); + } + + @MethodSource("getDiskWeightBasedPlacementEnabled") + @ParameterizedTest + public void newEnsembleWithNotEnoughBookies(boolean diskWeightBasedPlacementEnabled) + throws Exception { + initGenericEnsemblePlacementPolicyTest(diskWeightBasedPlacementEnabled); + numBookies = 0; + try { + startBKCluster(zkUtil.getMetadataServiceUri()); + Map customMetadata = new HashMap<>(); + customMetadata.put(property, value); + try (BookKeeper bk = new BookKeeper(baseClientConf, zkc)) { + bk.createLedger(1, 1, 1, digestType, PASSWORD.getBytes(), customMetadata); + fail("creation should fail"); + } catch (BKException.BKNotEnoughBookiesException bneb) { + } + assertEquals(2, customMetadataOnNewEnsembleStack.size()); + assertArrayEquals(value, customMetadataOnNewEnsembleStack.get(0).get(property)); + assertArrayEquals(value, customMetadataOnNewEnsembleStack.get(1).get(property)); + } finally { + stopBKCluster(); } - - /** - * A custom ensemble placement policy. - */ - public static final class CustomEnsemblePlacementPolicy extends DefaultEnsemblePlacementPolicy { - - @Override - public PlacementResult replaceBookie(int ensembleSize, int writeQuorumSize, - int ackQuorumSize, Map customMetadata, List currentEnsemble, - BookieId bookieToReplace, Set excludeBookies) - throws BKException.BKNotEnoughBookiesException { - new Exception("replaceBookie " + ensembleSize + "," + customMetadata).printStackTrace(); - assertNotNull(customMetadata); - customMetadataOnReplaceBookieStack.add(customMetadata); - return super.replaceBookie(ensembleSize, writeQuorumSize, ackQuorumSize, customMetadata, - currentEnsemble, bookieToReplace, excludeBookies); + } + + @MethodSource("getDiskWeightBasedPlacementEnabled") + @ParameterizedTest + public void replaceBookie(boolean diskWeightBasedPlacementEnabled) throws Exception { + initGenericEnsemblePlacementPolicyTest(diskWeightBasedPlacementEnabled); + numBookies = 3; + startBKCluster(zkUtil.getMetadataServiceUri()); + try { + Map customMetadata = new HashMap<>(); + customMetadata.put(property, value); + try (BookKeeper bk = new BookKeeper(baseClientConf, zkc)) { + try (LedgerHandle lh = bk + .createLedger(2, 2, 2, digestType, PASSWORD.getBytes(), customMetadata)) { + lh.addEntry(value); + long lId = lh.getId(); + List ensembleAtFirstEntry = lh.getLedgerMetadata().getEnsembleAt(lId); + assertEquals(2, ensembleAtFirstEntry.size()); + killBookie(ensembleAtFirstEntry.get(0)); + lh.addEntry(value); } + } + assertEquals(2, customMetadataOnNewEnsembleStack.size()); + assertArrayEquals(value, customMetadataOnNewEnsembleStack.get(0).get(property)); + // replaceBookie by default calls newEnsemble, so newEnsemble gets called twice + assertArrayEquals(value, customMetadataOnNewEnsembleStack.get(0).get(property)); - @Override - public PlacementResult> newEnsemble(int ensembleSize, int quorumSize, - int ackQuorumSize, Map customMetadata, Set excludeBookies) - throws BKException.BKNotEnoughBookiesException { - assertNotNull(customMetadata); - customMetadataOnNewEnsembleStack.add(customMetadata); - return super.newEnsemble(ensembleSize, quorumSize, ackQuorumSize, customMetadata, excludeBookies); - } - } + assertEquals(1, customMetadataOnReplaceBookieStack.size()); + assertArrayEquals(value, customMetadataOnReplaceBookieStack.get(0).get(property)); - @Before - public void reset() { - customMetadataOnNewEnsembleStack.clear(); - customMetadataOnReplaceBookieStack.clear(); + } finally { + stopBKCluster(); } - - @Test - public void testNewEnsemble() throws Exception { - numBookies = 1; - startBKCluster(zkUtil.getMetadataServiceUri()); - try { - Map customMetadata = new HashMap<>(); - customMetadata.put(property, value); - try (BookKeeper bk = new BookKeeper(baseClientConf, zkc)) { - bk.createLedger(1, 1, 1, digestType, PASSWORD.getBytes(), customMetadata); - } - assertEquals(1, customMetadataOnNewEnsembleStack.size()); - assertArrayEquals(value, customMetadataOnNewEnsembleStack.get(0).get(property)); - } finally { - stopBKCluster(); - } + } + + /** + * A custom ensemble placement policy. + */ + public static final class CustomEnsemblePlacementPolicy extends DefaultEnsemblePlacementPolicy { + + @Override + public PlacementResult replaceBookie(int ensembleSize, int writeQuorumSize, + int ackQuorumSize, + Map customMetadata, List currentEnsemble, + BookieId bookieToReplace, + Set excludeBookies) throws BKException.BKNotEnoughBookiesException { + new Exception("replaceBookie " + ensembleSize + "," + customMetadata).printStackTrace(); + assertNotNull(customMetadata); + customMetadataOnReplaceBookieStack.add(customMetadata); + return super.replaceBookie(ensembleSize, writeQuorumSize, ackQuorumSize, customMetadata, + currentEnsemble, + bookieToReplace, excludeBookies); } - @Test - public void testNewEnsembleWithNotEnoughBookies() throws Exception { - numBookies = 0; - try { - startBKCluster(zkUtil.getMetadataServiceUri()); - Map customMetadata = new HashMap<>(); - customMetadata.put(property, value); - try (BookKeeper bk = new BookKeeper(baseClientConf, zkc)) { - bk.createLedger(1, 1, 1, digestType, PASSWORD.getBytes(), customMetadata); - fail("creation should fail"); - } catch (BKException.BKNotEnoughBookiesException bneb) { - } - assertEquals(2, customMetadataOnNewEnsembleStack.size()); - assertArrayEquals(value, customMetadataOnNewEnsembleStack.get(0).get(property)); - assertArrayEquals(value, customMetadataOnNewEnsembleStack.get(1).get(property)); - } finally { - stopBKCluster(); - } - } - - @Test - public void testReplaceBookie() throws Exception { - numBookies = 3; - startBKCluster(zkUtil.getMetadataServiceUri()); - try { - Map customMetadata = new HashMap<>(); - customMetadata.put(property, value); - try (BookKeeper bk = new BookKeeper(baseClientConf, zkc)) { - try (LedgerHandle lh = bk.createLedger(2, 2, 2, digestType, PASSWORD.getBytes(), customMetadata)) { - lh.addEntry(value); - long lId = lh.getId(); - List ensembleAtFirstEntry = lh.getLedgerMetadata().getEnsembleAt(lId); - assertEquals(2, ensembleAtFirstEntry.size()); - killBookie(ensembleAtFirstEntry.get(0)); - lh.addEntry(value); - } - } - assertEquals(2, customMetadataOnNewEnsembleStack.size()); - assertArrayEquals(value, customMetadataOnNewEnsembleStack.get(0).get(property)); - // replaceBookie by default calls newEnsemble, so newEnsemble gets called twice - assertArrayEquals(value, customMetadataOnNewEnsembleStack.get(0).get(property)); - - assertEquals(1, customMetadataOnReplaceBookieStack.size()); - assertArrayEquals(value, customMetadataOnReplaceBookieStack.get(0).get(property)); - - } finally { - stopBKCluster(); - } + @Override + public PlacementResult> newEnsemble(int ensembleSize, int quorumSize, + int ackQuorumSize, + Map customMetadata, Set excludeBookies) + throws BKException.BKNotEnoughBookiesException { + assertNotNull(customMetadata); + customMetadataOnNewEnsembleStack.add(customMetadata); + return super + .newEnsemble(ensembleSize, quorumSize, ackQuorumSize, customMetadata, excludeBookies); } + } } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/HandleFailuresTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/HandleFailuresTest.java index d1182668e9c..dca7de3af7e 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/HandleFailuresTest.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/HandleFailuresTest.java @@ -21,6 +21,10 @@ package org.apache.bookkeeper.client; import static org.apache.bookkeeper.util.TestUtils.assertEventuallyTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.times; @@ -39,463 +43,484 @@ import org.apache.bookkeeper.net.BookieSocketAddress; import org.apache.bookkeeper.proto.MockBookieClient; import org.apache.bookkeeper.versioning.Versioned; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Ledger recovery tests using mocks rather than a real cluster. */ -public class HandleFailuresTest { - private static final Logger log = LoggerFactory.getLogger(LedgerRecovery2Test.class); - - private static final BookieId b1 = new BookieSocketAddress("b1", 3181).toBookieId(); - private static final BookieId b2 = new BookieSocketAddress("b2", 3181).toBookieId(); - private static final BookieId b3 = new BookieSocketAddress("b3", 3181).toBookieId(); - private static final BookieId b4 = new BookieSocketAddress("b4", 3181).toBookieId(); - private static final BookieId b5 = new BookieSocketAddress("b5", 3181).toBookieId(); - - @Test(timeout = 30000) - public void testChangeTriggeredOneTimeForOneFailure() throws Exception { - MockClientContext clientCtx = MockClientContext.create(); - Versioned md = ClientUtil.setupLedger(clientCtx, 10L, - LedgerMetadataBuilder.create().newEnsembleEntry( - 0L, Lists.newArrayList(b1, b2, b3))); - - clientCtx.getMockRegistrationClient().addBookies(b4).get(); - clientCtx.getMockBookieClient().errorBookies(b1); - - LedgerHandle lh = new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - lh.appendAsync("entry1".getBytes()); - lh.appendAsync("entry2".getBytes()); - lh.appendAsync("entry3".getBytes()); - lh.appendAsync("entry4".getBytes()); - lh.appendAsync("entry5".getBytes()).get(); - - verify(clientCtx.getLedgerManager(), times(1)).writeLedgerMetadata(anyLong(), any(), any()); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().size(), 1); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b4, b2, b3)); +class HandleFailuresTest { + + private static final Logger log = LoggerFactory.getLogger(LedgerRecovery2Test.class); + + private static final BookieId b1 = new BookieSocketAddress("b1", 3181).toBookieId(); + private static final BookieId b2 = new BookieSocketAddress("b2", 3181).toBookieId(); + private static final BookieId b3 = new BookieSocketAddress("b3", 3181).toBookieId(); + private static final BookieId b4 = new BookieSocketAddress("b4", 3181).toBookieId(); + private static final BookieId b5 = new BookieSocketAddress("b5", 3181).toBookieId(); + + @Test + @Timeout(value = 30000, unit = TimeUnit.MILLISECONDS) + void changeTriggeredOneTimeForOneFailure() throws Exception { + MockClientContext clientCtx = MockClientContext.create(); + Versioned md = ClientUtil.setupLedger(clientCtx, 10L, + LedgerMetadataBuilder.create().newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); + + clientCtx.getMockRegistrationClient().addBookies(b4).get(); + clientCtx.getMockBookieClient().errorBookies(b1); + + LedgerHandle lh = + new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + lh.appendAsync("entry1".getBytes()); + lh.appendAsync("entry2".getBytes()); + lh.appendAsync("entry3".getBytes()); + lh.appendAsync("entry4".getBytes()); + lh.appendAsync("entry5".getBytes()).get(); + + verify(clientCtx.getLedgerManager(), times(1)).writeLedgerMetadata(anyLong(), any(), any()); + assertEquals(1, lh.getLedgerMetadata().getAllEnsembles().size()); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b4, b2, b3)); + } + + @Test + @Timeout(value = 30000, unit = TimeUnit.MILLISECONDS) + void secondFailureOccursWhileFirstBeingHandled() throws Exception { + MockClientContext clientCtx = MockClientContext.create(); + Versioned md = + ClientUtil.setupLedger(clientCtx, 10L, LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(3).withAckQuorumSize(3) + .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); + + clientCtx.getMockRegistrationClient().addBookies(b4, b5).get(); + CompletableFuture b2blocker = new CompletableFuture<>(); + clientCtx.getMockBookieClient().setPreWriteHook((bookie, ledgerId, entryId) -> { + if (bookie.equals(b1)) { + return FutureUtils.exception(new BKException.BKWriteException()); + } else if (bookie.equals(b2)) { + return b2blocker; + } else { + return FutureUtils.value(null); + } + }); + CompletableFuture metadataNotifier = new CompletableFuture<>(); + CompletableFuture metadataBlocker = new CompletableFuture<>(); + clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { + metadataNotifier.complete(null); + return metadataBlocker; + }); + + LedgerHandle lh = + new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + lh.appendAsync("entry1".getBytes()); + lh.appendAsync("entry2".getBytes()); + lh.appendAsync("entry3".getBytes()); + lh.appendAsync("entry4".getBytes()); + CompletableFuture future = lh.appendAsync("entry5".getBytes()); + + metadataNotifier.get(); // wait for first metadata write to occur + b2blocker.completeExceptionally(new BKException.BKWriteException()); // make b2 requests fail + metadataBlocker.complete(null); + + future.get(); + verify(clientCtx.getLedgerManager(), times(2)).writeLedgerMetadata(anyLong(), any(), any()); + assertEquals(1, lh.getLedgerMetadata().getAllEnsembles().size()); + assertTrue(lh.getLedgerMetadata().getAllEnsembles().get(0L).contains(b3)); + assertTrue(lh.getLedgerMetadata().getAllEnsembles().get(0L).contains(b4)); + assertTrue(lh.getLedgerMetadata().getAllEnsembles().get(0L).contains(b5)); + } + + @Test + @Timeout(value = 30000, unit = TimeUnit.MILLISECONDS) + void handlingFailuresOneBookieFailsImmediately() throws Exception { + MockClientContext clientCtx = MockClientContext.create(); + Versioned md = + ClientUtil.setupLedger(clientCtx, 10L, LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(3).withAckQuorumSize(3) + .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); + clientCtx.getMockRegistrationClient().addBookies(b4).get(); + clientCtx.getMockBookieClient().errorBookies(b1); + + LedgerHandle lh = + new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + lh.append("entry1".getBytes()); + lh.close(); + + assertTrue(lh.getLedgerMetadata().isClosed()); + assertEquals(1, lh.getLedgerMetadata().getAllEnsembles().size()); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b4, b2, b3)); + } + + @Test + @Timeout(value = 30000, unit = TimeUnit.MILLISECONDS) + void handlingFailuresOneBookieFailsAfterOneEntry() throws Exception { + MockClientContext clientCtx = MockClientContext.create(); + Versioned md = + ClientUtil.setupLedger(clientCtx, 10L, LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(3).withAckQuorumSize(3) + .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); + clientCtx.getMockRegistrationClient().addBookies(b4).get(); + + LedgerHandle lh = + new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + lh.append("entry1".getBytes()); + clientCtx.getMockBookieClient().errorBookies(b1); + lh.append("entry2".getBytes()); + lh.close(); + + assertTrue(lh.getLedgerMetadata().isClosed()); + assertEquals(2, lh.getLedgerMetadata().getAllEnsembles().size()); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3)); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(1L), Lists.newArrayList(b4, b2, b3)); + assertEquals(1L, lh.getLedgerMetadata().getLastEntryId()); + } + + @Test + @Timeout(value = 30000, unit = TimeUnit.MILLISECONDS) + void handlingFailuresMultipleBookieFailImmediatelyNotEnoughToReplace() throws Exception { + MockClientContext clientCtx = MockClientContext.create(); + Versioned md = + ClientUtil.setupLedger(clientCtx, 10L, LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(3).withAckQuorumSize(3) + .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); + clientCtx.getMockBookieClient().errorBookies(b1, b2); + + LedgerHandle lh = + new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + try { + lh.append("entry1".getBytes()); + fail("Shouldn't have been able to add"); + } catch (BKException.BKNotEnoughBookiesException bke) { + // correct behaviour + assertEventuallyTrue("Failure to add should trigger ledger closure", + () -> lh.getLedgerMetadata().isClosed()); + assertEquals(LedgerHandle.INVALID_ENTRY_ID, lh.getLedgerMetadata().getLastEntryId(), + "Ledger should be empty"); + assertEquals(1, lh.getLedgerMetadata().getAllEnsembles().size(), + "Should be only one ensemble"); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3), + "Ensemble shouldn't have changed"); } - - @Test(timeout = 30000) - public void testSecondFailureOccursWhileFirstBeingHandled() throws Exception { - MockClientContext clientCtx = MockClientContext.create(); - Versioned md = ClientUtil.setupLedger(clientCtx, 10L, - LedgerMetadataBuilder.create() - .withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(3) - .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); - - clientCtx.getMockRegistrationClient().addBookies(b4, b5).get(); - CompletableFuture b2blocker = new CompletableFuture<>(); - clientCtx.getMockBookieClient().setPreWriteHook( - (bookie, ledgerId, entryId) -> { - if (bookie.equals(b1)) { - return FutureUtils.exception(new BKException.BKWriteException()); - } else if (bookie.equals(b2)) { - return b2blocker; - } else { - return FutureUtils.value(null); - } - }); - CompletableFuture metadataNotifier = new CompletableFuture<>(); - CompletableFuture metadataBlocker = new CompletableFuture<>(); - clientCtx.getMockLedgerManager().setPreWriteHook( - (ledgerId, metadata) -> { - metadataNotifier.complete(null); - return metadataBlocker; - }); - - LedgerHandle lh = new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - lh.appendAsync("entry1".getBytes()); - lh.appendAsync("entry2".getBytes()); - lh.appendAsync("entry3".getBytes()); - lh.appendAsync("entry4".getBytes()); - CompletableFuture future = lh.appendAsync("entry5".getBytes()); - - metadataNotifier.get(); // wait for first metadata write to occur - b2blocker.completeExceptionally(new BKException.BKWriteException()); // make b2 requests fail - metadataBlocker.complete(null); - - future.get(); - verify(clientCtx.getLedgerManager(), times(2)).writeLedgerMetadata(anyLong(), any(), any()); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().size(), 1); - Assert.assertTrue(lh.getLedgerMetadata().getAllEnsembles().get(0L).contains(b3)); - Assert.assertTrue(lh.getLedgerMetadata().getAllEnsembles().get(0L).contains(b4)); - Assert.assertTrue(lh.getLedgerMetadata().getAllEnsembles().get(0L).contains(b5)); + } + + @Test + @Timeout(value = 30000, unit = TimeUnit.MILLISECONDS) + void handlingFailuresMultipleBookieFailAfterOneEntryNotEnoughToReplace() throws Exception { + MockClientContext clientCtx = MockClientContext.create(); + Versioned md = + ClientUtil.setupLedger(clientCtx, 10L, LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(3).withAckQuorumSize(3) + .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); + + LedgerHandle lh = + new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + lh.append("entry1".getBytes()); + + clientCtx.getMockBookieClient().errorBookies(b1, b2); + + try { + lh.append("entry2".getBytes()); + fail("Shouldn't have been able to add"); + } catch (BKException.BKNotEnoughBookiesException bke) { + // correct behaviour + assertEventuallyTrue("Failure to add should trigger ledger closure", + () -> lh.getLedgerMetadata().isClosed()); + assertEquals(0L, lh.getLedgerMetadata().getLastEntryId(), "Ledger should be empty"); + assertEquals(1, lh.getLedgerMetadata().getAllEnsembles().size(), + "Should be only one ensemble"); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3), + "Ensemble shouldn't have changed"); } - - @Test(timeout = 30000) - public void testHandlingFailuresOneBookieFailsImmediately() throws Exception { - MockClientContext clientCtx = MockClientContext.create(); - Versioned md = ClientUtil.setupLedger(clientCtx, 10L, - LedgerMetadataBuilder.create() - .withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(3) - .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); - clientCtx.getMockRegistrationClient().addBookies(b4).get(); - clientCtx.getMockBookieClient().errorBookies(b1); - - LedgerHandle lh = new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - lh.append("entry1".getBytes()); - lh.close(); - - Assert.assertTrue(lh.getLedgerMetadata().isClosed()); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().size(), 1); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b4, b2, b3)); + } + + @Test + @Timeout(value = 30000, unit = TimeUnit.MILLISECONDS) + void clientClosesWhileFailureHandlerInProgress() throws Exception { + MockClientContext clientCtx = MockClientContext.create(); + Versioned md = + ClientUtil.setupLedger(clientCtx, 10L, LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(3).withAckQuorumSize(3) + .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); + clientCtx.getMockRegistrationClient().addBookies(b4).get(); + clientCtx.getMockBookieClient().errorBookies(b2); + + CompletableFuture changeInProgress = new CompletableFuture<>(); + CompletableFuture blockEnsembleChange = new CompletableFuture<>(); + clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { + // block the write trying to replace b2 with b4 + if (metadata.getAllEnsembles().get(0L).get(1).equals(b4)) { + changeInProgress.complete(null); + return blockEnsembleChange; + } else { + return FutureUtils.value(null); + } + }); + + LedgerHandle lh = + new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + CompletableFuture future = lh.appendAsync("entry1".getBytes()); + changeInProgress.get(); + + lh.close(); + + blockEnsembleChange.complete(null); // allow ensemble change to continue + try { + future.get(); + fail("Add shouldn't have succeeded"); + } catch (ExecutionException ee) { + assertEquals(BKException.BKLedgerClosedException.class, ee.getCause().getClass()); } - - @Test(timeout = 30000) - public void testHandlingFailuresOneBookieFailsAfterOneEntry() throws Exception { - MockClientContext clientCtx = MockClientContext.create(); - Versioned md = ClientUtil.setupLedger(clientCtx, 10L, - LedgerMetadataBuilder.create() - .withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(3) - .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); - clientCtx.getMockRegistrationClient().addBookies(b4).get(); - - LedgerHandle lh = new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - lh.append("entry1".getBytes()); - clientCtx.getMockBookieClient().errorBookies(b1); - lh.append("entry2".getBytes()); - lh.close(); - - Assert.assertTrue(lh.getLedgerMetadata().isClosed()); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().size(), 2); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3)); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(1L), Lists.newArrayList(b4, b2, b3)); - Assert.assertEquals(lh.getLedgerMetadata().getLastEntryId(), 1L); + assertTrue(lh.getLedgerMetadata().isClosed()); + assertEquals(1, lh.getLedgerMetadata().getAllEnsembles().size()); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3)); + assertEquals(LedgerHandle.INVALID_ENTRY_ID, lh.getLedgerMetadata().getLastEntryId()); + } + + @Test + @Timeout(value = 30000, unit = TimeUnit.MILLISECONDS) + void metadataSetToClosedDuringFailureHandler() throws Exception { + MockClientContext clientCtx = MockClientContext.create(); + Versioned md = + ClientUtil.setupLedger(clientCtx, 10L, LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(3).withAckQuorumSize(3) + .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); + clientCtx.getMockRegistrationClient().addBookies(b4).get(); + clientCtx.getMockBookieClient().errorBookies(b2); + + CompletableFuture changeInProgress = new CompletableFuture<>(); + CompletableFuture blockEnsembleChange = new CompletableFuture<>(); + clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { + if (metadata.getAllEnsembles().get(0L).get(1).equals(b4)) { + // block the write trying to replace b2 with b4 + changeInProgress.complete(null); + return blockEnsembleChange; + } else { + return FutureUtils.value(null); + } + }); + + LedgerHandle lh = + new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + CompletableFuture future = lh.appendAsync("entry1".getBytes()); + changeInProgress.get(); + + ClientUtil.transformMetadata(clientCtx, 10L, (metadata) -> LedgerMetadataBuilder.from(metadata) + .withClosedState().withLastEntryId(1234L).withLength(10L).build()); + + blockEnsembleChange.complete(null); // allow ensemble change to continue + try { + future.get(); + fail("Add shouldn't have succeeded"); + } catch (ExecutionException ee) { + assertEquals(BKException.BKLedgerClosedException.class, ee.getCause().getClass()); } - - @Test(timeout = 30000) - public void testHandlingFailuresMultipleBookieFailImmediatelyNotEnoughoReplace() throws Exception { - MockClientContext clientCtx = MockClientContext.create(); - Versioned md = ClientUtil.setupLedger(clientCtx, 10L, - LedgerMetadataBuilder.create() - .withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(3) - .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); - clientCtx.getMockBookieClient().errorBookies(b1, b2); - - LedgerHandle lh = new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - try { - lh.append("entry1".getBytes()); - Assert.fail("Shouldn't have been able to add"); - } catch (BKException.BKNotEnoughBookiesException bke) { - // correct behaviour - assertEventuallyTrue("Failure to add should trigger ledger closure", - () -> lh.getLedgerMetadata().isClosed()); - Assert.assertEquals("Ledger should be empty", - lh.getLedgerMetadata().getLastEntryId(), LedgerHandle.INVALID_ENTRY_ID); - Assert.assertEquals("Should be only one ensemble", lh.getLedgerMetadata().getAllEnsembles().size(), 1); - Assert.assertEquals("Ensemble shouldn't have changed", lh.getLedgerMetadata().getAllEnsembles().get(0L), - Lists.newArrayList(b1, b2, b3)); - } + assertTrue(lh.getLedgerMetadata().isClosed()); + assertEquals(1, lh.getLedgerMetadata().getAllEnsembles().size()); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3)); + assertEquals(1234L, lh.getLedgerMetadata().getLastEntryId()); + } + + @Test + @Timeout(value = 30000, unit = TimeUnit.MILLISECONDS) + void metadataSetToInRecoveryDuringFailureHandler() throws Exception { + MockClientContext clientCtx = MockClientContext.create(); + Versioned md = + ClientUtil.setupLedger(clientCtx, 10L, LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(3).withAckQuorumSize(3) + .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); + clientCtx.getMockRegistrationClient().addBookies(b4).get(); + clientCtx.getMockBookieClient().errorBookies(b2); + + CompletableFuture changeInProgress = new CompletableFuture<>(); + CompletableFuture blockEnsembleChange = new CompletableFuture<>(); + clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { + if (metadata.getAllEnsembles().get(0L).get(1).equals(b4)) { + // block the write trying to replace b2 with b4 + changeInProgress.complete(null); + return blockEnsembleChange; + } else { + return FutureUtils.value(null); + } + }); + + LedgerHandle lh = + new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + CompletableFuture future = lh.appendAsync("entry1".getBytes()); + changeInProgress.get(); + + ClientUtil.transformMetadata(clientCtx, 10L, + (metadata) -> LedgerMetadataBuilder.from(metadata).withInRecoveryState().build()); + + blockEnsembleChange.complete(null); // allow ensemble change to continue + try { + future.get(); + fail("Add shouldn't have succeeded"); + } catch (ExecutionException ee) { + assertEquals(BKException.BKLedgerFencedException.class, ee.getCause().getClass()); } - - @Test(timeout = 30000) - public void testHandlingFailuresMultipleBookieFailAfterOneEntryNotEnoughoReplace() throws Exception { - MockClientContext clientCtx = MockClientContext.create(); - Versioned md = ClientUtil.setupLedger(clientCtx, 10L, - LedgerMetadataBuilder.create() - .withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(3) - .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); - - LedgerHandle lh = new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - lh.append("entry1".getBytes()); - - clientCtx.getMockBookieClient().errorBookies(b1, b2); - - try { - lh.append("entry2".getBytes()); - Assert.fail("Shouldn't have been able to add"); - } catch (BKException.BKNotEnoughBookiesException bke) { - // correct behaviour - assertEventuallyTrue("Failure to add should trigger ledger closure", - () -> lh.getLedgerMetadata().isClosed()); - Assert.assertEquals("Ledger should be empty", lh.getLedgerMetadata().getLastEntryId(), 0L); - Assert.assertEquals("Should be only one ensemble", lh.getLedgerMetadata().getAllEnsembles().size(), 1); - Assert.assertEquals("Ensemble shouldn't have changed", lh.getLedgerMetadata().getAllEnsembles().get(0L), - Lists.newArrayList(b1, b2, b3)); - } - } - - @Test(timeout = 30000) - public void testClientClosesWhileFailureHandlerInProgress() throws Exception { - MockClientContext clientCtx = MockClientContext.create(); - Versioned md = ClientUtil.setupLedger(clientCtx, 10L, - LedgerMetadataBuilder.create() - .withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(3) - .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); - clientCtx.getMockRegistrationClient().addBookies(b4).get(); - clientCtx.getMockBookieClient().errorBookies(b2); - - CompletableFuture changeInProgress = new CompletableFuture<>(); - CompletableFuture blockEnsembleChange = new CompletableFuture<>(); - clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { - // block the write trying to replace b2 with b4 - if (metadata.getAllEnsembles().get(0L).get(1).equals(b4)) { - changeInProgress.complete(null); - return blockEnsembleChange; - } else { - return FutureUtils.value(null); - } - }); - - LedgerHandle lh = new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - CompletableFuture future = lh.appendAsync("entry1".getBytes()); - changeInProgress.get(); - - lh.close(); - - blockEnsembleChange.complete(null); // allow ensemble change to continue - try { - future.get(); - Assert.fail("Add shouldn't have succeeded"); - } catch (ExecutionException ee) { - Assert.assertEquals(ee.getCause().getClass(), BKException.BKLedgerClosedException.class); - } - Assert.assertTrue(lh.getLedgerMetadata().isClosed()); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().size(), 1); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3)); - Assert.assertEquals(lh.getLedgerMetadata().getLastEntryId(), LedgerHandle.INVALID_ENTRY_ID); - } - - @Test(timeout = 30000) - public void testMetadataSetToClosedDuringFailureHandler() throws Exception { - MockClientContext clientCtx = MockClientContext.create(); - Versioned md = ClientUtil.setupLedger(clientCtx, 10L, - LedgerMetadataBuilder.create() - .withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(3) - .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); - clientCtx.getMockRegistrationClient().addBookies(b4).get(); - clientCtx.getMockBookieClient().errorBookies(b2); - - CompletableFuture changeInProgress = new CompletableFuture<>(); - CompletableFuture blockEnsembleChange = new CompletableFuture<>(); - clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { - if (metadata.getAllEnsembles().get(0L).get(1).equals(b4)) { - // block the write trying to replace b2 with b4 - changeInProgress.complete(null); - return blockEnsembleChange; - } else { - return FutureUtils.value(null); - } - }); - - LedgerHandle lh = new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - CompletableFuture future = lh.appendAsync("entry1".getBytes()); - changeInProgress.get(); - - ClientUtil.transformMetadata(clientCtx, 10L, - (metadata) -> LedgerMetadataBuilder.from(metadata) - .withClosedState().withLastEntryId(1234L).withLength(10L).build()); - - blockEnsembleChange.complete(null); // allow ensemble change to continue - try { - future.get(); - Assert.fail("Add shouldn't have succeeded"); - } catch (ExecutionException ee) { - Assert.assertEquals(ee.getCause().getClass(), BKException.BKLedgerClosedException.class); - } - Assert.assertTrue(lh.getLedgerMetadata().isClosed()); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().size(), 1); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3)); - Assert.assertEquals(lh.getLedgerMetadata().getLastEntryId(), 1234L); - } - - @Test(timeout = 30000) - public void testMetadataSetToInRecoveryDuringFailureHandler() throws Exception { - MockClientContext clientCtx = MockClientContext.create(); - Versioned md = ClientUtil.setupLedger(clientCtx, 10L, - LedgerMetadataBuilder.create() - .withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(3) - .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); - clientCtx.getMockRegistrationClient().addBookies(b4).get(); - clientCtx.getMockBookieClient().errorBookies(b2); - - CompletableFuture changeInProgress = new CompletableFuture<>(); - CompletableFuture blockEnsembleChange = new CompletableFuture<>(); - clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { - if (metadata.getAllEnsembles().get(0L).get(1).equals(b4)) { - // block the write trying to replace b2 with b4 - changeInProgress.complete(null); - return blockEnsembleChange; - } else { - return FutureUtils.value(null); - } - }); - - LedgerHandle lh = new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - CompletableFuture future = lh.appendAsync("entry1".getBytes()); - changeInProgress.get(); - - ClientUtil.transformMetadata(clientCtx, 10L, - (metadata) -> LedgerMetadataBuilder.from(metadata).withInRecoveryState().build()); - - blockEnsembleChange.complete(null); // allow ensemble change to continue - try { - future.get(); - Assert.fail("Add shouldn't have succeeded"); - } catch (ExecutionException ee) { - Assert.assertEquals(ee.getCause().getClass(), BKException.BKLedgerFencedException.class); - } - Assert.assertFalse(lh.getLedgerMetadata().isClosed()); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().size(), 1); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3)); - } - - @Test(timeout = 30000) - public void testOldEnsembleChangedDuringFailureHandler() throws Exception { - MockClientContext clientCtx = MockClientContext.create(); - Versioned md = ClientUtil.setupLedger(clientCtx, 10L, - LedgerMetadataBuilder.create() - .withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(3) - .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); - - LedgerHandle lh = new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - lh.append("entry1".getBytes()); - clientCtx.getMockRegistrationClient().addBookies(b4).get(); - clientCtx.getMockBookieClient().errorBookies(b3); - lh.append("entry2".getBytes()); - - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().size(), 2); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3)); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(1L), Lists.newArrayList(b1, b2, b4)); - - - CompletableFuture changeInProgress = new CompletableFuture<>(); - CompletableFuture blockEnsembleChange = new CompletableFuture<>(); - clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { - // block the write trying to replace b1 with b5 - if (metadata.getAllEnsembles().size() > 2 - && metadata.getAllEnsembles().get(2L).get(0).equals(b5)) { - changeInProgress.complete(null); - return blockEnsembleChange; - } else { - return FutureUtils.value(null); - } - }); - - clientCtx.getMockRegistrationClient().addBookies(b5).get(); - clientCtx.getMockBookieClient().errorBookies(b1); - - CompletableFuture future = lh.appendAsync("entry3".getBytes()); - changeInProgress.get(); - - ClientUtil.transformMetadata(clientCtx, 10L, - (metadata) -> LedgerMetadataBuilder.from(metadata).replaceEnsembleEntry( - 0L, Lists.newArrayList(b4, b2, b5)).build()); - - blockEnsembleChange.complete(null); // allow ensemble change to continue - future.get(); - - Assert.assertFalse(lh.getLedgerMetadata().isClosed()); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().size(), 3); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b4, b2, b5)); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(1L), Lists.newArrayList(b1, b2, b4)); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(2L), Lists.newArrayList(b5, b2, b4)); - } - - @Test(timeout = 30000) - public void testNoAddsAreCompletedWhileFailureHandlingInProgress() throws Exception { - MockClientContext clientCtx = MockClientContext.create(); - Versioned md = ClientUtil.setupLedger(clientCtx, 10L, - LedgerMetadataBuilder.create() - .withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(2) - .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); - - clientCtx.getMockRegistrationClient().addBookies(b4).get(); - clientCtx.getMockBookieClient().errorBookies(b3); - - LedgerHandle lh = new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - lh.append("entry1".getBytes()); - - CompletableFuture changeInProgress = new CompletableFuture<>(); - CompletableFuture blockEnsembleChange = new CompletableFuture<>(); - clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { - // block the write trying to replace b3 with b4 - if (metadata.getAllEnsembles().get(1L).get(2).equals(b4)) { - changeInProgress.complete(null); - return blockEnsembleChange; - } else { - return FutureUtils.value(null); - } - }); - - CompletableFuture future = lh.appendAsync("entry2".getBytes()); - changeInProgress.get(); - try { - future.get(1, TimeUnit.SECONDS); - Assert.fail("Shouldn't complete"); - } catch (TimeoutException te) { - } - blockEnsembleChange.complete(null); - future.get(); - - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().size(), 2); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3)); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(1L), Lists.newArrayList(b1, b2, b4)); - } - - @Test(timeout = 30000) - public void testHandleFailureBookieNotInWriteSet() throws Exception { - MockClientContext clientCtx = MockClientContext.create(); - Versioned md = ClientUtil.setupLedger(clientCtx, 10L, - LedgerMetadataBuilder.create() - .withEnsembleSize(3).withWriteQuorumSize(2).withAckQuorumSize(1) - .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); - clientCtx.getMockRegistrationClient().addBookies(b4).get(); - - CompletableFuture b1Delay = new CompletableFuture<>(); - // Delay the first write to b1, then error it - clientCtx.getMockBookieClient().setPreWriteHook((bookie, ledgerId, entryId) -> { - if (bookie.equals(b1)) { - return b1Delay; - } else { - return FutureUtils.value(null); - } - }); - - CompletableFuture changeInProgress = new CompletableFuture<>(); - CompletableFuture blockEnsembleChange = new CompletableFuture<>(); - clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { - changeInProgress.complete(null); - return blockEnsembleChange; - }); - - LedgerHandle lh = new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - log.info("b2 should be enough to complete first add"); - lh.append("entry1".getBytes()); - - log.info("when b1 completes with failure, handleFailures should kick off"); - b1Delay.completeExceptionally(new BKException.BKWriteException()); - - log.info("write second entry, should have enough bookies, but blocks completion on failure handling"); - AtomicReference> e2 = new AtomicReference<>(); - - // Execute appendAsync at the same thread of preWriteHook exception thread. So that the - // `delayedWriteFailedBookies` could update before appendAsync invoke. - ((MockBookieClient) clientCtx.getBookieClient()).getExecutor() - .chooseThread(lh.ledgerId) - .execute(() -> e2.set(lh.appendAsync("entry2".getBytes()))); - changeInProgress.get(); - assertEventuallyTrue("e2 should eventually complete", () -> lh.pendingAddOps.peek().completed); - Assert.assertFalse("e2 shouldn't be completed to client", e2.get().isDone()); - blockEnsembleChange.complete(null); // allow ensemble change to continue - - log.info("e2 should complete"); - e2.get().get(10, TimeUnit.SECONDS); + assertFalse(lh.getLedgerMetadata().isClosed()); + assertEquals(1, lh.getLedgerMetadata().getAllEnsembles().size()); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3)); + } + + @Test + @Timeout(value = 30000, unit = TimeUnit.MILLISECONDS) + void oldEnsembleChangedDuringFailureHandler() throws Exception { + MockClientContext clientCtx = MockClientContext.create(); + Versioned md = + ClientUtil.setupLedger(clientCtx, 10L, LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(3).withAckQuorumSize(3) + .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); + + LedgerHandle lh = + new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + lh.append("entry1".getBytes()); + clientCtx.getMockRegistrationClient().addBookies(b4).get(); + clientCtx.getMockBookieClient().errorBookies(b3); + lh.append("entry2".getBytes()); + + assertEquals(2, lh.getLedgerMetadata().getAllEnsembles().size()); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3)); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(1L), Lists.newArrayList(b1, b2, b4)); + + CompletableFuture changeInProgress = new CompletableFuture<>(); + CompletableFuture blockEnsembleChange = new CompletableFuture<>(); + clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { + // block the write trying to replace b1 with b5 + if (metadata.getAllEnsembles().size() > 2 && metadata.getAllEnsembles().get(2L).get(0) + .equals(b5)) { + changeInProgress.complete(null); + return blockEnsembleChange; + } else { + return FutureUtils.value(null); + } + }); + + clientCtx.getMockRegistrationClient().addBookies(b5).get(); + clientCtx.getMockBookieClient().errorBookies(b1); + + CompletableFuture future = lh.appendAsync("entry3".getBytes()); + changeInProgress.get(); + + ClientUtil.transformMetadata(clientCtx, 10L, (metadata) -> LedgerMetadataBuilder.from(metadata) + .replaceEnsembleEntry(0L, Lists.newArrayList(b4, b2, b5)).build()); + + blockEnsembleChange.complete(null); // allow ensemble change to continue + future.get(); + + assertFalse(lh.getLedgerMetadata().isClosed()); + assertEquals(3, lh.getLedgerMetadata().getAllEnsembles().size()); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b4, b2, b5)); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(1L), Lists.newArrayList(b1, b2, b4)); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(2L), Lists.newArrayList(b5, b2, b4)); + } + + @Test + @Timeout(value = 30000, unit = TimeUnit.MILLISECONDS) + void noAddsAreCompletedWhileFailureHandlingInProgress() throws Exception { + MockClientContext clientCtx = MockClientContext.create(); + Versioned md = + ClientUtil.setupLedger(clientCtx, 10L, LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(3).withAckQuorumSize(2) + .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); + + clientCtx.getMockRegistrationClient().addBookies(b4).get(); + clientCtx.getMockBookieClient().errorBookies(b3); + + LedgerHandle lh = + new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + lh.append("entry1".getBytes()); + + CompletableFuture changeInProgress = new CompletableFuture<>(); + CompletableFuture blockEnsembleChange = new CompletableFuture<>(); + clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { + // block the write trying to replace b3 with b4 + if (metadata.getAllEnsembles().get(1L).get(2).equals(b4)) { + changeInProgress.complete(null); + return blockEnsembleChange; + } else { + return FutureUtils.value(null); + } + }); + + CompletableFuture future = lh.appendAsync("entry2".getBytes()); + changeInProgress.get(); + try { + future.get(1, TimeUnit.SECONDS); + fail("Shouldn't complete"); + } catch (TimeoutException te) { } + blockEnsembleChange.complete(null); + future.get(); + + assertEquals(2, lh.getLedgerMetadata().getAllEnsembles().size()); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3)); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(1L), Lists.newArrayList(b1, b2, b4)); + } + + @Test + @Timeout(value = 30000, unit = TimeUnit.MILLISECONDS) + void handleFailureBookieNotInWriteSet() throws Exception { + MockClientContext clientCtx = MockClientContext.create(); + Versioned md = + ClientUtil.setupLedger(clientCtx, 10L, LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(2).withAckQuorumSize(1) + .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); + clientCtx.getMockRegistrationClient().addBookies(b4).get(); + + CompletableFuture b1Delay = new CompletableFuture<>(); + // Delay the first write to b1, then error it + clientCtx.getMockBookieClient().setPreWriteHook((bookie, ledgerId, entryId) -> { + if (bookie.equals(b1)) { + return b1Delay; + } else { + return FutureUtils.value(null); + } + }); + + CompletableFuture changeInProgress = new CompletableFuture<>(); + CompletableFuture blockEnsembleChange = new CompletableFuture<>(); + clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { + changeInProgress.complete(null); + return blockEnsembleChange; + }); + + LedgerHandle lh = + new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + log.info("b2 should be enough to complete first add"); + lh.append("entry1".getBytes()); + + log.info("when b1 completes with failure, handleFailures should kick off"); + b1Delay.completeExceptionally(new BKException.BKWriteException()); + + log.info( + "write second entry, should have enough bookies, but blocks completion on failure handling"); + AtomicReference> e2 = new AtomicReference<>(); + + // Execute appendAsync at the same thread of preWriteHook exception thread. So that the + // `delayedWriteFailedBookies` could update before appendAsync invoke. + ((MockBookieClient) clientCtx.getBookieClient()).getExecutor().chooseThread(lh.ledgerId) + .execute(() -> e2.set(lh.appendAsync("entry2".getBytes()))); + changeInProgress.get(); + assertEventuallyTrue("e2 should eventually complete", () -> lh.pendingAddOps.peek().completed); + assertFalse(e2.get().isDone(), "e2 shouldn't be completed to client"); + blockEnsembleChange.complete(null); // allow ensemble change to continue + + log.info("e2 should complete"); + e2.get().get(10, TimeUnit.SECONDS); + } } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/LedgerClose2Test.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/LedgerClose2Test.java index 40f69304828..09aca9847f4 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/LedgerClose2Test.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/LedgerClose2Test.java @@ -17,6 +17,11 @@ */ package org.apache.bookkeeper.client; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + import com.google.common.collect.Lists; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; @@ -26,282 +31,282 @@ import org.apache.bookkeeper.net.BookieId; import org.apache.bookkeeper.net.BookieSocketAddress; import org.apache.bookkeeper.versioning.Versioned; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Ledger recovery tests using mocks rather than a real cluster. */ -public class LedgerClose2Test { - private static final Logger log = LoggerFactory.getLogger(LedgerRecovery2Test.class); - - private static final BookieId b1 = new BookieSocketAddress("b1", 3181).toBookieId(); - private static final BookieId b2 = new BookieSocketAddress("b2", 3181).toBookieId(); - private static final BookieId b3 = new BookieSocketAddress("b3", 3181).toBookieId(); - private static final BookieId b4 = new BookieSocketAddress("b4", 3181).toBookieId(); - private static final BookieId b5 = new BookieSocketAddress("b5", 3181).toBookieId(); - - @Test - public void testTryAddAfterCloseHasBeenCalled() throws Exception { - MockClientContext clientCtx = MockClientContext.create(); - - for (int i = 0; i < 1000; i++) { - Versioned md = ClientUtil.setupLedger(clientCtx, i, - LedgerMetadataBuilder.create().newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); - LedgerHandle lh = new LedgerHandle(clientCtx, i, md, BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - CompletableFuture closeFuture = lh.closeAsync(); - try { - long eid = lh.append("entry".getBytes()); - - // if it succeeds, it should be in final ledge - closeFuture.get(); - Assert.assertTrue(lh.getLedgerMetadata().isClosed()); - Assert.assertEquals(lh.getLedgerMetadata().getLastEntryId(), eid); - } catch (BKException.BKLedgerClosedException bke) { - closeFuture.get(); - Assert.assertTrue(lh.getLedgerMetadata().isClosed()); - Assert.assertEquals(lh.getLedgerMetadata().getLastEntryId(), LedgerHandle.INVALID_ENTRY_ID); - } - } - } - - @Test - public void testMetadataChangedDuringClose() throws Exception { - MockClientContext clientCtx = MockClientContext.create(); - Versioned md = ClientUtil.setupLedger(clientCtx, 10L, - LedgerMetadataBuilder.create() - .withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(2) - .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); - - - LedgerHandle lh = new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - lh.append("entry1".getBytes()); - clientCtx.getMockRegistrationClient().addBookies(b4).get(); - clientCtx.getMockBookieClient().errorBookies(b3); - lh.append("entry2".getBytes()); - - CompletableFuture closeInProgress = new CompletableFuture<>(); - CompletableFuture blockClose = new CompletableFuture<>(); - clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { - // block the write trying to replace b3 with b4 - if (metadata.isClosed()) { - closeInProgress.complete(null); - return blockClose; - } else { - return FutureUtils.value(null); - } - }); - CompletableFuture closeFuture = lh.closeAsync(); - closeInProgress.get(); - - ClientUtil.transformMetadata(clientCtx, 10L, - (metadata) -> LedgerMetadataBuilder.from(metadata).replaceEnsembleEntry( - 0L, Lists.newArrayList(b4, b2, b5)).build()); - - blockClose.complete(null); +class LedgerClose2Test { + + private static final Logger log = LoggerFactory.getLogger(LedgerRecovery2Test.class); + + private static final BookieId b1 = new BookieSocketAddress("b1", 3181).toBookieId(); + private static final BookieId b2 = new BookieSocketAddress("b2", 3181).toBookieId(); + private static final BookieId b3 = new BookieSocketAddress("b3", 3181).toBookieId(); + private static final BookieId b4 = new BookieSocketAddress("b4", 3181).toBookieId(); + private static final BookieId b5 = new BookieSocketAddress("b5", 3181).toBookieId(); + + @Test + void tryAddAfterCloseHasBeenCalled() throws Exception { + MockClientContext clientCtx = MockClientContext.create(); + + for (int i = 0; i < 1000; i++) { + Versioned md = ClientUtil.setupLedger(clientCtx, i, + LedgerMetadataBuilder.create().newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); + LedgerHandle lh = + new LedgerHandle(clientCtx, i, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + CompletableFuture closeFuture = lh.closeAsync(); + try { + long eid = lh.append("entry".getBytes()); + + // if it succeeds, it should be in final ledge closeFuture.get(); - - Assert.assertTrue(lh.getLedgerMetadata().isClosed()); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().size(), 2); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b4, b2, b5)); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(1L), Lists.newArrayList(b1, b2, b4)); - Assert.assertEquals(lh.getLedgerMetadata().getLastEntryId(), 1L); - } - - @Test - public void testMetadataCloseWithCorrectLengthDuringClose() throws Exception { - MockClientContext clientCtx = MockClientContext.create(); - Versioned md = ClientUtil.setupLedger(clientCtx, 10L, - LedgerMetadataBuilder.create() - .withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(2) - .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); - - - LedgerHandle lh = new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - long lac = lh.append("entry1".getBytes()); - long length = lh.getLength(); - - CompletableFuture closeInProgress = new CompletableFuture<>(); - CompletableFuture blockClose = new CompletableFuture<>(); - clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { - // block the write trying to do the first close - if (!closeInProgress.isDone() && metadata.isClosed()) { - closeInProgress.complete(null); - return blockClose; - } else { - return FutureUtils.value(null); - } - }); - CompletableFuture closeFuture = lh.closeAsync(); - closeInProgress.get(); - - ClientUtil.transformMetadata(clientCtx, 10L, - (metadata) -> LedgerMetadataBuilder.from(metadata) - .withClosedState().withLastEntryId(lac).withLength(length).build()); - - blockClose.complete(null); + assertTrue(lh.getLedgerMetadata().isClosed()); + assertEquals(lh.getLedgerMetadata().getLastEntryId(), eid); + } catch (BKException.BKLedgerClosedException bke) { closeFuture.get(); - - Assert.assertTrue(lh.getLedgerMetadata().isClosed()); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().size(), 1); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3)); - Assert.assertEquals(lh.getLedgerMetadata().getLastEntryId(), lac); - Assert.assertEquals(lh.getLedgerMetadata().getLength(), length); + assertTrue(lh.getLedgerMetadata().isClosed()); + assertEquals(LedgerHandle.INVALID_ENTRY_ID, lh.getLedgerMetadata().getLastEntryId()); + } } - - @Test - public void testMetadataCloseWithDifferentLengthDuringClose() throws Exception { - MockClientContext clientCtx = MockClientContext.create(); - Versioned md = ClientUtil.setupLedger(clientCtx, 10L, - LedgerMetadataBuilder.create() - .withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(2) - .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); - - - LedgerHandle lh = new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - long lac = lh.append("entry1".getBytes()); - long length = lh.getLength(); - - CompletableFuture closeInProgress = new CompletableFuture<>(); - CompletableFuture blockClose = new CompletableFuture<>(); - clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { - // block the write trying to do the first close - if (!closeInProgress.isDone() && metadata.isClosed()) { - closeInProgress.complete(null); - return blockClose; - } else { - return FutureUtils.value(null); - } - }); - CompletableFuture closeFuture = lh.closeAsync(); - closeInProgress.get(); - - /* close with different length. can happen in cases where there's a write outstanding */ - ClientUtil.transformMetadata(clientCtx, 10L, - (metadata) -> LedgerMetadataBuilder.from(metadata) - .withClosedState().withLastEntryId(lac + 1).withLength(length + 100).build()); - - blockClose.complete(null); - try { - closeFuture.get(); - Assert.fail("Close should fail. Ledger has been closed in a state we don't know how to untangle"); - } catch (ExecutionException ee) { - Assert.assertEquals(ee.getCause().getClass(), BKException.BKMetadataVersionException.class); - } + } + + @Test + void metadataChangedDuringClose() throws Exception { + MockClientContext clientCtx = MockClientContext.create(); + Versioned md = + ClientUtil.setupLedger(clientCtx, 10L, LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(3).withAckQuorumSize(2) + .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); + + LedgerHandle lh = + new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + lh.append("entry1".getBytes()); + clientCtx.getMockRegistrationClient().addBookies(b4).get(); + clientCtx.getMockBookieClient().errorBookies(b3); + lh.append("entry2".getBytes()); + + CompletableFuture closeInProgress = new CompletableFuture<>(); + CompletableFuture blockClose = new CompletableFuture<>(); + clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { + // block the write trying to replace b3 with b4 + if (metadata.isClosed()) { + closeInProgress.complete(null); + return blockClose; + } else { + return FutureUtils.value(null); + } + }); + CompletableFuture closeFuture = lh.closeAsync(); + closeInProgress.get(); + + ClientUtil.transformMetadata(clientCtx, 10L, (metadata) -> LedgerMetadataBuilder.from(metadata) + .replaceEnsembleEntry(0L, Lists.newArrayList(b4, b2, b5)).build()); + + blockClose.complete(null); + closeFuture.get(); + + assertTrue(lh.getLedgerMetadata().isClosed()); + assertEquals(2, lh.getLedgerMetadata().getAllEnsembles().size()); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b4, b2, b5)); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(1L), Lists.newArrayList(b1, b2, b4)); + assertEquals(1L, lh.getLedgerMetadata().getLastEntryId()); + } + + @Test + void metadataCloseWithCorrectLengthDuringClose() throws Exception { + MockClientContext clientCtx = MockClientContext.create(); + Versioned md = + ClientUtil.setupLedger(clientCtx, 10L, LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(3).withAckQuorumSize(2) + .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); + + LedgerHandle lh = + new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + long lac = lh.append("entry1".getBytes()); + long length = lh.getLength(); + + CompletableFuture closeInProgress = new CompletableFuture<>(); + CompletableFuture blockClose = new CompletableFuture<>(); + clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { + // block the write trying to do the first close + if (!closeInProgress.isDone() && metadata.isClosed()) { + closeInProgress.complete(null); + return blockClose; + } else { + return FutureUtils.value(null); + } + }); + CompletableFuture closeFuture = lh.closeAsync(); + closeInProgress.get(); + + ClientUtil.transformMetadata(clientCtx, 10L, (metadata) -> LedgerMetadataBuilder.from(metadata) + .withClosedState().withLastEntryId(lac).withLength(length).build()); + + blockClose.complete(null); + closeFuture.get(); + + assertTrue(lh.getLedgerMetadata().isClosed()); + assertEquals(1, lh.getLedgerMetadata().getAllEnsembles().size()); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3)); + assertEquals(lh.getLedgerMetadata().getLastEntryId(), lac); + assertEquals(lh.getLedgerMetadata().getLength(), length); + } + + @Test + void metadataCloseWithDifferentLengthDuringClose() throws Exception { + MockClientContext clientCtx = MockClientContext.create(); + Versioned md = + ClientUtil.setupLedger(clientCtx, 10L, LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(3).withAckQuorumSize(2) + .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); + + LedgerHandle lh = + new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + long lac = lh.append("entry1".getBytes()); + long length = lh.getLength(); + + CompletableFuture closeInProgress = new CompletableFuture<>(); + CompletableFuture blockClose = new CompletableFuture<>(); + clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { + // block the write trying to do the first close + if (!closeInProgress.isDone() && metadata.isClosed()) { + closeInProgress.complete(null); + return blockClose; + } else { + return FutureUtils.value(null); + } + }); + CompletableFuture closeFuture = lh.closeAsync(); + closeInProgress.get(); + + /* close with different length. can happen in cases where there's a write outstanding */ + ClientUtil.transformMetadata(clientCtx, 10L, (metadata) -> LedgerMetadataBuilder.from(metadata) + .withClosedState().withLastEntryId(lac + 1).withLength(length + 100).build()); + + blockClose.complete(null); + try { + closeFuture.get(); + fail("Close should fail. Ledger has been closed in a state we don't know how to untangle"); + } catch (ExecutionException ee) { + assertEquals(BKException.BKMetadataVersionException.class, ee.getCause().getClass()); } - - @Test - public void testMetadataCloseMarkedInRecoveryWhileClosing() throws Exception { - MockClientContext clientCtx = MockClientContext.create(); - Versioned md = ClientUtil.setupLedger(clientCtx, 10L, - LedgerMetadataBuilder.create() - .withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(2) - .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); - - LedgerHandle lh = new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - long lac = lh.append("entry1".getBytes()); - long length = lh.getLength(); - - CompletableFuture closeInProgress = new CompletableFuture<>(); - CompletableFuture blockClose = new CompletableFuture<>(); - clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { - // block the write trying to do the first close - if (metadata.isClosed()) { - closeInProgress.complete(null); - return blockClose; - } else { - return FutureUtils.value(null); - } - }); - CompletableFuture closeFuture = lh.closeAsync(); - closeInProgress.get(); - - ClientUtil.transformMetadata(clientCtx, 10L, - (metadata) -> LedgerMetadataBuilder.from(metadata).withInRecoveryState().build()); - - blockClose.complete(null); - - closeFuture.get(); // should override in recovery, since this handle knows what it has written - Assert.assertTrue(lh.getLedgerMetadata().isClosed()); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().size(), 1); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3)); - Assert.assertEquals(lh.getLedgerMetadata().getLastEntryId(), lac); - Assert.assertEquals(lh.getLedgerMetadata().getLength(), length); - } - - @Test - public void testCloseWhileAddInProgress() throws Exception { - MockClientContext clientCtx = MockClientContext.create(); - Versioned md = ClientUtil.setupLedger(clientCtx, 10L, - LedgerMetadataBuilder.create() - .withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(2) - .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); - // block all entry writes from completing - CompletableFuture writesHittingBookies = new CompletableFuture<>(); - clientCtx.getMockBookieClient().setPreWriteHook((bookie, ledgerId, entryId) -> { - writesHittingBookies.complete(null); - return new CompletableFuture(); - }); - LedgerHandle lh = new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - CompletableFuture future = lh.appendAsync("entry1".getBytes()); - writesHittingBookies.get(); - - lh.close(); - try { - future.get(); - Assert.fail("That write shouldn't have succeeded"); - } catch (ExecutionException ee) { - Assert.assertEquals(ee.getCause().getClass(), BKException.BKLedgerClosedException.class); - } - Assert.assertTrue(lh.getLedgerMetadata().isClosed()); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().size(), 1); - Assert.assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3)); - Assert.assertEquals(lh.getLedgerMetadata().getLastEntryId(), LedgerHandle.INVALID_ENTRY_ID); - Assert.assertEquals(lh.getLedgerMetadata().getLength(), 0); + } + + @Test + void metadataCloseMarkedInRecoveryWhileClosing() throws Exception { + MockClientContext clientCtx = MockClientContext.create(); + Versioned md = + ClientUtil.setupLedger(clientCtx, 10L, LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(3).withAckQuorumSize(2) + .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); + + LedgerHandle lh = + new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + long lac = lh.append("entry1".getBytes()); + long length = lh.getLength(); + + CompletableFuture closeInProgress = new CompletableFuture<>(); + CompletableFuture blockClose = new CompletableFuture<>(); + clientCtx.getMockLedgerManager().setPreWriteHook((ledgerId, metadata) -> { + // block the write trying to do the first close + if (metadata.isClosed()) { + closeInProgress.complete(null); + return blockClose; + } else { + return FutureUtils.value(null); + } + }); + CompletableFuture closeFuture = lh.closeAsync(); + closeInProgress.get(); + + ClientUtil.transformMetadata(clientCtx, 10L, + (metadata) -> LedgerMetadataBuilder.from(metadata).withInRecoveryState().build()); + + blockClose.complete(null); + + closeFuture.get(); // should override in recovery, since this handle knows what it has written + assertTrue(lh.getLedgerMetadata().isClosed()); + assertEquals(1, lh.getLedgerMetadata().getAllEnsembles().size()); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3)); + assertEquals(lh.getLedgerMetadata().getLastEntryId(), lac); + assertEquals(lh.getLedgerMetadata().getLength(), length); + } + + @Test + void closeWhileAddInProgress() throws Exception { + MockClientContext clientCtx = MockClientContext.create(); + Versioned md = + ClientUtil.setupLedger(clientCtx, 10L, LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(3).withAckQuorumSize(2) + .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); + // block all entry writes from completing + CompletableFuture writesHittingBookies = new CompletableFuture<>(); + clientCtx.getMockBookieClient().setPreWriteHook((bookie, ledgerId, entryId) -> { + writesHittingBookies.complete(null); + return new CompletableFuture(); + }); + LedgerHandle lh = + new LedgerHandle(clientCtx, 10L, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + CompletableFuture future = lh.appendAsync("entry1".getBytes()); + writesHittingBookies.get(); + + lh.close(); + try { + future.get(); + fail("That write shouldn't have succeeded"); + } catch (ExecutionException ee) { + assertEquals(BKException.BKLedgerClosedException.class, ee.getCause().getClass()); } - - @Test - public void testDoubleCloseOnHandle() throws Exception { - long ledgerId = 123L; - MockClientContext clientCtx = MockClientContext.create(); - - Versioned md = ClientUtil.setupLedger(clientCtx, ledgerId, - LedgerMetadataBuilder.create() - .withEnsembleSize(3).withWriteQuorumSize(3).withAckQuorumSize(3) + assertTrue(lh.getLedgerMetadata().isClosed()); + assertEquals(1, lh.getLedgerMetadata().getAllEnsembles().size()); + assertEquals(lh.getLedgerMetadata().getAllEnsembles().get(0L), Lists.newArrayList(b1, b2, b3)); + assertEquals(LedgerHandle.INVALID_ENTRY_ID, lh.getLedgerMetadata().getLastEntryId()); + assertEquals(0, lh.getLedgerMetadata().getLength()); + } + + @Test + void doubleCloseOnHandle() throws Exception { + long ledgerId = 123L; + MockClientContext clientCtx = MockClientContext.create(); + + Versioned md = + ClientUtil + .setupLedger(clientCtx, ledgerId, LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(3).withAckQuorumSize(3) .newEnsembleEntry(0L, Lists.newArrayList(b1, b2, b3))); - CompletableFuture metadataPromise = new CompletableFuture<>(); - CompletableFuture clientPromise = new CompletableFuture<>(); + CompletableFuture metadataPromise = new CompletableFuture<>(); + CompletableFuture clientPromise = new CompletableFuture<>(); - LedgerHandle writer = new LedgerHandle(clientCtx, ledgerId, md, - BookKeeper.DigestType.CRC32C, - ClientUtil.PASSWD, WriteFlag.NONE); - long eid1 = writer.append("entry1".getBytes()); + LedgerHandle writer = + new LedgerHandle(clientCtx, ledgerId, md, BookKeeper.DigestType.CRC32C, ClientUtil.PASSWD, + WriteFlag.NONE); + long eid1 = writer.append("entry1".getBytes()); - log.info("block writes from completing on bookies and metadata"); - clientCtx.getMockBookieClient().setPostWriteHook((bookie, lid, eid) -> clientPromise); - clientCtx.getMockLedgerManager().setPreWriteHook((lid, metadata) -> metadataPromise); + log.info("block writes from completing on bookies and metadata"); + clientCtx.getMockBookieClient().setPostWriteHook((bookie, lid, eid) -> clientPromise); + clientCtx.getMockLedgerManager().setPreWriteHook((lid, metadata) -> metadataPromise); - log.info("try to add another entry, it will block"); - writer.appendAsync("entry2".getBytes()); + log.info("try to add another entry, it will block"); + writer.appendAsync("entry2".getBytes()); - log.info("attempt one close, should block forever"); - CompletableFuture firstClose = writer.closeAsync(); + log.info("attempt one close, should block forever"); + CompletableFuture firstClose = writer.closeAsync(); - log.info("attempt second close, should not finish before first one"); - CompletableFuture secondClose = writer.closeAsync(); + log.info("attempt second close, should not finish before first one"); + CompletableFuture secondClose = writer.closeAsync(); - Thread.sleep(500); // give it a chance to complete, the request jumps around threads - Assert.assertFalse(firstClose.isDone()); - Assert.assertFalse(secondClose.isDone()); - } + Thread.sleep(500); // give it a chance to complete, the request jumps around threads + assertFalse(firstClose.isDone()); + assertFalse(secondClose.isDone()); + } } - diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/LedgerCloseTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/LedgerCloseTest.java index 496e61f8b44..acfba3ff860 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/LedgerCloseTest.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/LedgerCloseTest.java @@ -18,8 +18,8 @@ package org.apache.bookkeeper.client; import static java.nio.charset.StandardCharsets.UTF_8; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; import io.netty.buffer.ByteBuf; import java.io.IOException; @@ -43,7 +43,7 @@ import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteCallback; import org.apache.bookkeeper.test.BookKeeperClusterTestCase; import org.apache.bookkeeper.test.TestCallbacks.AddCallbackFuture; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,216 +53,219 @@ @SuppressWarnings("deprecation") public class LedgerCloseTest extends BookKeeperClusterTestCase { - private static final Logger LOG = LoggerFactory.getLogger(LedgerCloseTest.class); + static final int READ_TIMEOUT = 1; - static final int READ_TIMEOUT = 1; + private static final Logger LOG = LoggerFactory.getLogger(LedgerCloseTest.class); - final DigestType digestType; + final DigestType digestType; - public LedgerCloseTest() { - super(6); - this.digestType = DigestType.CRC32; - // set timeout to a large value which disable it. - baseClientConf.setReadTimeout(99999); - baseConf.setGcWaitTime(999999); - } - - @Test - public void testLedgerCloseWithConsistentLength() throws Exception { - ClientConfiguration conf = new ClientConfiguration(); - conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); - conf.setReadTimeout(1); + public LedgerCloseTest() { + super(6); + this.digestType = DigestType.CRC32; + // set timeout to a large value which disable it. + baseClientConf.setReadTimeout(99999); + baseConf.setGcWaitTime(999999); + } - BookKeeper bkc = new BookKeeper(conf); - LedgerHandle lh = bkc.createLedger(6, 3, DigestType.CRC32, new byte[] {}); - final CountDownLatch latch = new CountDownLatch(1); - stopBKCluster(); - final AtomicInteger i = new AtomicInteger(0xdeadbeef); - AsyncCallback.AddCallback cb = new AsyncCallback.AddCallback() { - @Override - public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) { - i.set(rc); - latch.countDown(); - } - }; - lh.asyncAddEntry("Test Entry".getBytes(), cb, null); - latch.await(); - assertEquals(i.get(), BKException.Code.NotEnoughBookiesException); - assertEquals(0, lh.getLength()); - assertEquals(LedgerHandle.INVALID_ENTRY_ID, lh.getLastAddConfirmed()); - startBKCluster(zkUtil.getMetadataServiceUri()); - LedgerHandle newLh = bkc.openLedger(lh.getId(), DigestType.CRC32, new byte[] {}); - assertEquals(0, newLh.getLength()); - assertEquals(LedgerHandle.INVALID_ENTRY_ID, newLh.getLastAddConfirmed()); - } + @Test + void ledgerCloseWithConsistentLength() throws Exception { + ClientConfiguration conf = new ClientConfiguration(); + conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + conf.setReadTimeout(1); - @Test - public void testLedgerCloseDuringUnrecoverableErrors() throws Exception { - int numEntries = 3; - LedgerHandle lh = bkc.createLedger(3, 3, 3, digestType, "".getBytes()); - verifyMetadataConsistency(numEntries, lh); - } + BookKeeper bkc = new BookKeeper(conf); + LedgerHandle lh = bkc.createLedger(6, 3, DigestType.CRC32, new byte[]{}); + final CountDownLatch latch = new CountDownLatch(1); + stopBKCluster(); + final AtomicInteger i = new AtomicInteger(0xdeadbeef); + AsyncCallback.AddCallback cb = new AsyncCallback.AddCallback() { + @Override + public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) { + i.set(rc); + latch.countDown(); + } + }; + lh.asyncAddEntry("Test Entry".getBytes(), cb, null); + latch.await(); + assertEquals(BKException.Code.NotEnoughBookiesException, i.get()); + assertEquals(0, lh.getLength()); + assertEquals(LedgerHandle.INVALID_ENTRY_ID, lh.getLastAddConfirmed()); + startBKCluster(zkUtil.getMetadataServiceUri()); + LedgerHandle newLh = bkc.openLedger(lh.getId(), DigestType.CRC32, new byte[]{}); + assertEquals(0, newLh.getLength()); + assertEquals(LedgerHandle.INVALID_ENTRY_ID, newLh.getLastAddConfirmed()); + } - @Test - public void testLedgerCheckerShouldnotSelectInvalidLastFragments() throws Exception { - int numEntries = 10; - LedgerHandle lh = bkc.createLedger(3, 3, 3, digestType, "".getBytes()); - // Add some entries before bookie failures - for (int i = 0; i < numEntries; i++) { - lh.addEntry("data".getBytes()); - } - numEntries = 4; // add n*ensemleSize+1 entries async after bookies - // failed. - verifyMetadataConsistency(numEntries, lh); + @Test + void ledgerCloseDuringUnrecoverableErrors() throws Exception { + int numEntries = 3; + LedgerHandle lh = bkc.createLedger(3, 3, 3, digestType, "".getBytes()); + verifyMetadataConsistency(numEntries, lh); + } - LedgerChecker checker = new LedgerChecker(bkc); - CheckerCallback cb = new CheckerCallback(); - checker.checkLedger(lh, cb); - Set result = cb.waitAndGetResult(); - assertEquals("No fragments should be selected", 0, result.size()); + @Test + void ledgerCheckerShouldnotSelectInvalidLastFragments() throws Exception { + int numEntries = 10; + LedgerHandle lh = bkc.createLedger(3, 3, 3, digestType, "".getBytes()); + // Add some entries before bookie failures + for (int i = 0; i < numEntries; i++) { + lh.addEntry("data".getBytes()); } + numEntries = 4; // add n*ensemleSize+1 entries async after bookies + // failed. + verifyMetadataConsistency(numEntries, lh); - class CheckerCallback implements GenericCallback> { - private Set result = null; - private CountDownLatch latch = new CountDownLatch(1); + LedgerChecker checker = new LedgerChecker(bkc); + CheckerCallback cb = new CheckerCallback(); + checker.checkLedger(lh, cb); + Set result = cb.waitAndGetResult(); + assertEquals(0, result.size(), "No fragments should be selected"); + } - public void operationComplete(int rc, Set result) { - this.result = result; - latch.countDown(); - } + private void verifyMetadataConsistency(int numEntries, LedgerHandle lh) throws Exception { + final CountDownLatch addDoneLatch = new CountDownLatch(1); + final CountDownLatch deadIOLatch = new CountDownLatch(1); + final CountDownLatch recoverDoneLatch = new CountDownLatch(1); + final CountDownLatch failedLatch = new CountDownLatch(1); + // kill first bookie to replace with a unauthorize bookie + BookieId bookie = lh.getCurrentEnsemble().get(0); + ServerConfiguration conf = killBookie(bookie); + // replace a unauthorize bookie + startUnauthorizedBookie(conf, addDoneLatch); + // kill second bookie to replace with a dead bookie + bookie = lh.getCurrentEnsemble().get(1); + conf = killBookie(bookie); + // replace a slow dead bookie + startDeadBookie(conf, deadIOLatch); - Set waitAndGetResult() throws InterruptedException { - latch.await(); - return result; + // tried to add entries + for (int i = 0; i < numEntries; i++) { + lh.asyncAddEntry("data".getBytes(), new AddCallback() { + @Override + public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) { + if (BKException.Code.OK != rc) { + failedLatch.countDown(); + deadIOLatch.countDown(); + } + if (0 == entryId) { + try { + recoverDoneLatch.await(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } } + }, null); } + // add finished + addDoneLatch.countDown(); + // wait until entries failed due to UnauthorizedAccessException + failedLatch.await(); + // simulate the ownership of this ledger is transfer to another host + LOG.info("Recover ledger {}.", lh.getId()); + ClientConfiguration newConf = new ClientConfiguration(); + newConf.addConfiguration(baseClientConf); + BookKeeper newBkc = new BookKeeperTestClient(newConf.setReadTimeout(1)); + LedgerHandle recoveredLh = newBkc.openLedger(lh.getId(), digestType, "".getBytes()); + LOG.info("Recover ledger {} done.", lh.getId()); + recoverDoneLatch.countDown(); + // wait a bit until add operations failed from second bookie due to IOException + TimeUnit.SECONDS.sleep(5); + // open the ledger again to make sure we ge the right last confirmed. + LedgerHandle newLh = newBkc.openLedger(lh.getId(), digestType, "".getBytes()); + assertEquals(recoveredLh.getLastAddConfirmed(), newLh.getLastAddConfirmed(), + "Metadata should be consistent across different opened ledgers"); + } - private void verifyMetadataConsistency(int numEntries, LedgerHandle lh) - throws Exception { - final CountDownLatch addDoneLatch = new CountDownLatch(1); - final CountDownLatch deadIOLatch = new CountDownLatch(1); - final CountDownLatch recoverDoneLatch = new CountDownLatch(1); - final CountDownLatch failedLatch = new CountDownLatch(1); - // kill first bookie to replace with a unauthorize bookie - BookieId bookie = lh.getCurrentEnsemble().get(0); - ServerConfiguration conf = killBookie(bookie); - // replace a unauthorize bookie - startUnauthorizedBookie(conf, addDoneLatch); - // kill second bookie to replace with a dead bookie - bookie = lh.getCurrentEnsemble().get(1); - conf = killBookie(bookie); - // replace a slow dead bookie - startDeadBookie(conf, deadIOLatch); + private void startUnauthorizedBookie(ServerConfiguration conf, final CountDownLatch latch) + throws Exception { + Bookie sBookie = new TestBookieImpl(conf) { + @Override + public void addEntry(ByteBuf entry, boolean ackBeforeSync, WriteCallback cb, Object ctx, + byte[] masterKey) + throws IOException, BookieException { + try { + latch.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + throw BookieException.create(BookieException.Code.UnauthorizedAccessException); + } - // tried to add entries - for (int i = 0; i < numEntries; i++) { - lh.asyncAddEntry("data".getBytes(), new AddCallback() { - @Override - public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) { - if (BKException.Code.OK != rc) { - failedLatch.countDown(); - deadIOLatch.countDown(); - } - if (0 == entryId) { - try { - recoverDoneLatch.await(); - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - } - } - } - }, null); + @Override + public void recoveryAddEntry(ByteBuf entry, WriteCallback cb, Object ctx, byte[] masterKey) + throws IOException, BookieException { + throw new IOException("Dead bookie for recovery adds."); + } + }; + startAndAddBookie(conf, sBookie); + } + + // simulate slow adds, then become normal when recover, + // so no ensemble change when recovering ledger on this bookie. + private void startDeadBookie(ServerConfiguration conf, final CountDownLatch latch) + throws Exception { + Bookie dBookie = new TestBookieImpl(conf) { + @Override + public void addEntry(ByteBuf entry, boolean ackBeforeSync, WriteCallback cb, Object ctx, + byte[] masterKey) + throws IOException, BookieException { + try { + latch.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); } - // add finished - addDoneLatch.countDown(); - // wait until entries failed due to UnauthorizedAccessException - failedLatch.await(); - // simulate the ownership of this ledger is transfer to another host - LOG.info("Recover ledger {}.", lh.getId()); - ClientConfiguration newConf = new ClientConfiguration(); - newConf.addConfiguration(baseClientConf); - BookKeeper newBkc = new BookKeeperTestClient(newConf.setReadTimeout(1)); - LedgerHandle recoveredLh = newBkc.openLedger(lh.getId(), digestType, "".getBytes()); - LOG.info("Recover ledger {} done.", lh.getId()); - recoverDoneLatch.countDown(); - // wait a bit until add operations failed from second bookie due to IOException - TimeUnit.SECONDS.sleep(5); - // open the ledger again to make sure we ge the right last confirmed. - LedgerHandle newLh = newBkc.openLedger(lh.getId(), digestType, "".getBytes()); - assertEquals("Metadata should be consistent across different opened ledgers", - recoveredLh.getLastAddConfirmed(), newLh.getLastAddConfirmed()); - } + // simulate slow adds. + throw new IOException("Dead bookie"); + } + }; + startAndAddBookie(conf, dBookie); + } - private void startUnauthorizedBookie(ServerConfiguration conf, final CountDownLatch latch) - throws Exception { - Bookie sBookie = new TestBookieImpl(conf) { - @Override - public void addEntry(ByteBuf entry, boolean ackBeforeSync, WriteCallback cb, Object ctx, byte[] masterKey) - throws IOException, BookieException { - try { - latch.await(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - throw BookieException.create(BookieException.Code.UnauthorizedAccessException); - } + @Test + void allWritesAreCompletedOnClosedLedger() throws Exception { + for (int i = 0; i < 100; i++) { + LOG.info("Iteration {}", i); - @Override - public void recoveryAddEntry(ByteBuf entry, WriteCallback cb, Object ctx, byte[] masterKey) - throws IOException, BookieException { - throw new IOException("Dead bookie for recovery adds."); - } - }; - startAndAddBookie(conf, sBookie); - } + List futures = new ArrayList(); + LedgerHandle w = bkc.createLedger(DigestType.CRC32, new byte[0]); + AddCallbackFuture f = new AddCallbackFuture(0L); + w.asyncAddEntry("foobar".getBytes(UTF_8), f, null); + f.get(); - // simulate slow adds, then become normal when recover, - // so no ensemble change when recovering ledger on this bookie. - private void startDeadBookie(ServerConfiguration conf, final CountDownLatch latch) throws Exception { - Bookie dBookie = new TestBookieImpl(conf) { - @Override - public void addEntry(ByteBuf entry, boolean ackBeforeSync, WriteCallback cb, Object ctx, byte[] masterKey) - throws IOException, BookieException { - try { - latch.await(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - // simulate slow adds. - throw new IOException("Dead bookie"); - } - }; - startAndAddBookie(conf, dBookie); + LedgerHandle r = bkc.openLedger(w.getId(), DigestType.CRC32, new byte[0]); + for (int j = 0; j < 100; j++) { + AddCallbackFuture f1 = new AddCallbackFuture(1L + j); + w.asyncAddEntry("foobar".getBytes(), f1, null); + futures.add(f1); + } + + for (AddCallbackFuture f2 : futures) { + try { + f2.get(10, TimeUnit.SECONDS); + } catch (ExecutionException ee) { + // we don't care about errors + } catch (TimeoutException te) { + LOG.error("Error on waiting completing entry {} : ", f2.getExpectedEntryId(), te); + fail("Should succeed on waiting completing entry " + f2.getExpectedEntryId()); + } + } } + } - @Test - public void testAllWritesAreCompletedOnClosedLedger() throws Exception { - for (int i = 0; i < 100; i++) { - LOG.info("Iteration {}", i); + class CheckerCallback implements GenericCallback> { - List futures = new ArrayList(); - LedgerHandle w = bkc.createLedger(DigestType.CRC32, new byte[0]); - AddCallbackFuture f = new AddCallbackFuture(0L); - w.asyncAddEntry("foobar".getBytes(UTF_8), f, null); - f.get(); + private final CountDownLatch latch = new CountDownLatch(1); + private Set result = null; - LedgerHandle r = bkc.openLedger(w.getId(), DigestType.CRC32, new byte[0]); - for (int j = 0; j < 100; j++) { - AddCallbackFuture f1 = new AddCallbackFuture(1L + j); - w.asyncAddEntry("foobar".getBytes(), f1, null); - futures.add(f1); - } + public void operationComplete(int rc, Set result) { + this.result = result; + latch.countDown(); + } - for (AddCallbackFuture f2: futures) { - try { - f2.get(10, TimeUnit.SECONDS); - } catch (ExecutionException ee) { - // we don't care about errors - } catch (TimeoutException te) { - LOG.error("Error on waiting completing entry {} : ", f2.getExpectedEntryId(), te); - fail("Should succeed on waiting completing entry " + f2.getExpectedEntryId()); - } - } - } + Set waitAndGetResult() throws InterruptedException { + latch.await(); + return result; } + } } diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/LedgerCmdTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/LedgerCmdTest.java index 6900dfbc13d..d8c6208a473 100644 --- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/LedgerCmdTest.java +++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/LedgerCmdTest.java @@ -20,7 +20,7 @@ */ package org.apache.bookkeeper.client; -import static junit.framework.TestCase.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -35,7 +35,7 @@ import org.apache.bookkeeper.test.BookKeeperClusterTestCase; import org.apache.bookkeeper.util.EntryFormatter; import org.apache.bookkeeper.util.LedgerIdFormatter; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,64 +44,65 @@ */ public class LedgerCmdTest extends BookKeeperClusterTestCase { - private static final Logger LOG = LoggerFactory.getLogger(LedgerCmdTest.class); - private DigestType digestType = DigestType.CRC32; - private static final String PASSWORD = "testPasswd"; + private static final Logger LOG = LoggerFactory.getLogger(LedgerCmdTest.class); - public LedgerCmdTest() { - super(1); - baseConf.setLedgerStorageClass(DbLedgerStorage.class.getName()); - baseConf.setGcWaitTime(60000); - baseConf.setFlushInterval(1); - } + private static final String PASSWORD = "testPasswd"; + + private final DigestType digestType = DigestType.CRC32; + public LedgerCmdTest() { + super(1); + baseConf.setLedgerStorageClass(DbLedgerStorage.class.getName()); + baseConf.setGcWaitTime(60000); + baseConf.setFlushInterval(1); + } - /** - * list of entry logger files that contains given ledgerId. - */ - @Test - public void testLedgerDbStorageCmd() throws Exception { + /** + * list of entry logger files that contains given ledgerId. + */ + @Test + void ledgerDbStorageCmd() throws Exception { - BookKeeper bk = new BookKeeper(baseClientConf, zkc); - LOG.info("Create ledger and add entries to it"); - LedgerHandle lh1 = createLedgerWithEntries(bk, 10); + BookKeeper bk = new BookKeeper(baseClientConf, zkc); + LOG.info("Create ledger and add entries to it"); + LedgerHandle lh1 = createLedgerWithEntries(bk, 10); - for (int i = 0; i < bookieCount(); i++) { - BookieAccessor.forceFlush((BookieImpl) serverByIndex(i).getBookie()); - } + for (int i = 0; i < bookieCount(); i++) { + BookieAccessor.forceFlush((BookieImpl) serverByIndex(i).getBookie()); + } - String[] argv = { "ledger", Long.toString(lh1.getId()) }; - final ServerConfiguration conf = confByIndex(0); - conf.setUseHostNameAsBookieID(true); + String[] argv = {"ledger", Long.toString(lh1.getId())}; + final ServerConfiguration conf = confByIndex(0); + conf.setUseHostNameAsBookieID(true); - BookieShell bkShell = - new BookieShell(LedgerIdFormatter.LONG_LEDGERID_FORMATTER, EntryFormatter.STRING_FORMATTER); - bkShell.setConf(conf); + BookieShell bkShell = + new BookieShell(LedgerIdFormatter.LONG_LEDGERID_FORMATTER, EntryFormatter.STRING_FORMATTER); + bkShell.setConf(conf); - assertEquals("Failed to return exit code!", 0, bkShell.run(argv)); + assertEquals(0, bkShell.run(argv), "Failed to return exit code!"); - } + } - private LedgerHandle createLedgerWithEntries(BookKeeper bk, int numOfEntries) throws Exception { - LedgerHandle lh = bk.createLedger(1, 1, digestType, PASSWORD.getBytes()); - final AtomicInteger rc = new AtomicInteger(BKException.Code.OK); - final CountDownLatch latch = new CountDownLatch(numOfEntries); - - final AddCallback cb = new AddCallback() { - public void addComplete(int rccb, LedgerHandle lh, long entryId, Object ctx) { - rc.compareAndSet(BKException.Code.OK, rccb); - latch.countDown(); - } - }; - for (int i = 0; i < numOfEntries; i++) { - lh.asyncAddEntry(("foobar" + i).getBytes(), cb, null); - } - if (!latch.await(30, TimeUnit.SECONDS)) { - throw new Exception("Entries took too long to add"); - } - if (rc.get() != BKException.Code.OK) { - throw BKException.create(rc.get()); - } - return lh; + private LedgerHandle createLedgerWithEntries(BookKeeper bk, int numOfEntries) throws Exception { + LedgerHandle lh = bk.createLedger(1, 1, digestType, PASSWORD.getBytes()); + final AtomicInteger rc = new AtomicInteger(BKException.Code.OK); + final CountDownLatch latch = new CountDownLatch(numOfEntries); + + final AddCallback cb = new AddCallback() { + public void addComplete(int rccb, LedgerHandle lh, long entryId, Object ctx) { + rc.compareAndSet(BKException.Code.OK, rccb); + latch.countDown(); + } + }; + for (int i = 0; i < numOfEntries; i++) { + lh.asyncAddEntry(("foobar" + i).getBytes(), cb, null); + } + if (!latch.await(30, TimeUnit.SECONDS)) { + throw new Exception("Entries took too long to add"); + } + if (rc.get() != BKException.Code.OK) { + throw BKException.create(rc.get()); } + return lh; + } }