From 51257d2ebe099a6c7029e7fd47ce25f4393cfb49 Mon Sep 17 00:00:00 2001 From: markrmiller Date: Thu, 11 Feb 2016 08:39:00 -0500 Subject: [PATCH] SOLR-8575: Revert while investigated. (reverted from commit 482b40f841660820f633267a21e6df44aff55346) --- solr/CHANGES.txt | 3 - .../solr/update/HdfsTransactionLog.java | 22 +-- .../org/apache/solr/update/UpdateLog.java | 4 +- .../TlogReplayBufferedWhileIndexingTest.java | 136 ------------------ ...fsTlogReplayBufferedWhileIndexingTest.java | 63 -------- 5 files changed, 13 insertions(+), 215 deletions(-) delete mode 100644 solr/core/src/test/org/apache/solr/cloud/TlogReplayBufferedWhileIndexingTest.java delete mode 100644 solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTlogReplayBufferedWhileIndexingTest.java diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 7291ad05814..7a272d52efb 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -280,9 +280,6 @@ Bug Fixes * SOLR-8640: CloudSolrClient does not send credentials for update request (noble, hoss) -* SOLR-8575: Fix HDFSLogReader replay status numbers and a performance bug where we can reopen - FSDataInputStream too often. (Mark Miller, Patrick Dvorack) - * SOLR-8651: The commitWithin parameter is not passed on for deleteById in UpdateRequest in distributed queries (Jessica Cheng Mallet via Erick Erickson) diff --git a/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java b/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java index bff3486b213..3db65c61887 100644 --- a/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java +++ b/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java @@ -390,18 +390,16 @@ public class HdfsTransactionLog extends TransactionLog { // we actually need a new reader to // see if any data was added by the writer - if (pos >= sz) { - log.info("Read available inputstream data, opening new inputstream pos={} sz={}", pos, sz); - - synchronized (HdfsTransactionLog.this) { - sz = fos.size(); - } - + if (fis.position() >= sz) { fis.close(); tlogOutStream.hflush(); - - FSDataInputStream fdis = fs.open(tlogFile); - fis = new FSDataFastInputStream(fdis, pos); + try { + FSDataInputStream fdis = fs.open(tlogFile); + fis = new FSDataFastInputStream(fdis, pos); + sz = fs.getFileStatus(tlogFile).getLen(); + } catch (IOException e) { + throw new RuntimeException(e); + } } if (pos == 0) { @@ -448,7 +446,7 @@ public class HdfsTransactionLog extends TransactionLog { @Override public long currentSize() { - return fos.size(); + return sz; } } @@ -606,3 +604,5 @@ class FSDataFastInputStream extends FastInputStream { return "readFromStream="+readFromStream +" pos="+pos +" end="+end + " bufferPos="+getBufferPos() + " position="+position() ; } } + + diff --git a/solr/core/src/java/org/apache/solr/update/UpdateLog.java b/solr/core/src/java/org/apache/solr/update/UpdateLog.java index 7e3fc9f03e4..2456c3e6f9e 100644 --- a/solr/core/src/java/org/apache/solr/update/UpdateLog.java +++ b/solr/core/src/java/org/apache/solr/update/UpdateLog.java @@ -1333,8 +1333,8 @@ public class UpdateLog implements PluginInfoInitialized { loglog.info( "log replay status {} active={} starting pos={} current pos={} current size={} % read={}", translog, activeLog, recoveryInfo.positionOfStart, cpos, csize, - Math.floor(cpos / (double) csize * 100.)); - + Math.round(cpos / (double) csize * 100.)); + } } diff --git a/solr/core/src/test/org/apache/solr/cloud/TlogReplayBufferedWhileIndexingTest.java b/solr/core/src/test/org/apache/solr/cloud/TlogReplayBufferedWhileIndexingTest.java deleted file mode 100644 index 5c03a6093d9..00000000000 --- a/solr/core/src/test/org/apache/solr/cloud/TlogReplayBufferedWhileIndexingTest.java +++ /dev/null @@ -1,136 +0,0 @@ -package org.apache.solr.cloud; - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.lucene.util.LuceneTestCase.Nightly; -import org.apache.solr.SolrTestCaseJ4.SuppressSSL; -import org.apache.lucene.util.LuceneTestCase.Slow; -import org.apache.solr.client.solrj.SolrServerException; -import org.apache.solr.client.solrj.embedded.JettySolrRunner; -import org.apache.solr.common.SolrInputDocument; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; - -@Slow -@Nightly -@SuppressSSL -public class TlogReplayBufferedWhileIndexingTest extends AbstractFullDistribZkTestBase { - - private List threads; - - public TlogReplayBufferedWhileIndexingTest() throws Exception { - super(); - sliceCount = 1; - fixShardCount(2); - schemaString = "schema15.xml"; // we need a string id - } - - @BeforeClass - public static void beforeRestartWhileUpdatingTest() throws Exception { - System.setProperty("leaderVoteWait", "300000"); - System.setProperty("solr.autoCommit.maxTime", "10000"); - System.setProperty("solr.autoSoftCommit.maxTime", "3000"); - if (System.getProperty("solr.hdfs.home") != null) useFactory("solr.StandardDirectoryFactory"); - } - - @AfterClass - public static void afterRestartWhileUpdatingTest() { - System.clearProperty("leaderVoteWait"); - System.clearProperty("solr.autoCommit.maxTime"); - System.clearProperty("solr.autoSoftCommit.maxTime"); - } - - @Test - public void test() throws Exception { - handle.clear(); - handle.put("timestamp", SKIPVAL); - - waitForRecoveriesToFinish(false); - - int numThreads = 1; - - threads = new ArrayList<>(numThreads); - - ArrayList allJetty = new ArrayList<>(); - allJetty.addAll(jettys); - allJetty.remove(shardToLeaderJetty.get("shard1").jetty); - assert allJetty.size() == 1 : allJetty.size(); - ChaosMonkey.stop(allJetty.get(0)); - - StoppableIndexingThread indexThread; - for (int i = 0; i < numThreads; i++) { - indexThread = new StoppableIndexingThread(controlClient, cloudClient, Integer.toString(i), false, 50000, 1, false); - threads.add(indexThread); - indexThread.start(); - } - - Thread.sleep(2000); - - ChaosMonkey.start(allJetty.get(0)); - - Thread.sleep(45000); - - waitForThingsToLevelOut(320); - - Thread.sleep(2000); - - waitForRecoveriesToFinish(DEFAULT_COLLECTION, cloudClient.getZkStateReader(), false, true); - - for (StoppableIndexingThread thread : threads) { - thread.safeStop(); - thread.safeStop(); - } - - waitForThingsToLevelOut(30); - - checkShardConsistency(false, false); - } - - @Override - protected void indexDoc(SolrInputDocument doc) throws IOException, - SolrServerException { - cloudClient.add(doc); - } - - - @Override - public void distribTearDown() throws Exception { - // make sure threads have been stopped... - if (threads != null) { - for (StoppableIndexingThread thread : threads) { - thread.safeStop(); - } - } - - super.distribTearDown(); - } - - // skip the randoms - they can deadlock... - @Override - protected void indexr(Object... fields) throws Exception { - SolrInputDocument doc = new SolrInputDocument(); - addFields(doc, fields); - addFields(doc, "rnd_b", true); - indexDoc(doc); - } -} diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTlogReplayBufferedWhileIndexingTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTlogReplayBufferedWhileIndexingTest.java deleted file mode 100644 index 534bb903559..00000000000 --- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTlogReplayBufferedWhileIndexingTest.java +++ /dev/null @@ -1,63 +0,0 @@ -package org.apache.solr.cloud.hdfs; - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.IOException; - -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.lucene.util.LuceneTestCase.Slow; -import org.apache.solr.cloud.TlogReplayBufferedWhileIndexingTest; -import org.apache.solr.util.BadHdfsThreadsFilter; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import com.carrotsearch.randomizedtesting.annotations.Nightly; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; - -@Slow -@Nightly -@ThreadLeakFilters(defaultFilters = true, filters = { - BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) -}) -public class HdfsTlogReplayBufferedWhileIndexingTest extends TlogReplayBufferedWhileIndexingTest { - - public HdfsTlogReplayBufferedWhileIndexingTest() throws Exception { - super(); - } - - private static MiniDFSCluster dfsCluster; - - @BeforeClass - public static void setupClass() throws Exception { - dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath()); - System.setProperty("solr.hdfs.blockcache.blocksperbank", "2048"); - } - - @AfterClass - public static void teardownClass() throws Exception { - HdfsTestUtil.teardownClass(dfsCluster); - dfsCluster = null; - } - - - @Override - protected String getDataDir(String dataDir) throws IOException { - return HdfsTestUtil.getDataDir(dfsCluster, dataDir); - } - -}