001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertTrue;
022import static org.junit.Assert.fail;
023
024import edu.umd.cs.findbugs.annotations.Nullable;
025import java.io.File;
026import java.io.IOException;
027import java.io.OutputStream;
028import java.lang.reflect.Field;
029import java.lang.reflect.Modifier;
030import java.net.BindException;
031import java.net.DatagramSocket;
032import java.net.InetAddress;
033import java.net.ServerSocket;
034import java.net.Socket;
035import java.net.UnknownHostException;
036import java.nio.charset.StandardCharsets;
037import java.security.MessageDigest;
038import java.util.ArrayList;
039import java.util.Arrays;
040import java.util.Collection;
041import java.util.Collections;
042import java.util.EnumSet;
043import java.util.HashSet;
044import java.util.Iterator;
045import java.util.List;
046import java.util.Map;
047import java.util.NavigableSet;
048import java.util.Properties;
049import java.util.Random;
050import java.util.Set;
051import java.util.TreeSet;
052import java.util.concurrent.TimeUnit;
053import java.util.concurrent.atomic.AtomicReference;
054import java.util.stream.Collectors;
055import org.apache.commons.io.FileUtils;
056import org.apache.commons.lang3.RandomStringUtils;
057import org.apache.commons.logging.impl.Jdk14Logger;
058import org.apache.commons.logging.impl.Log4JLogger;
059import org.apache.hadoop.conf.Configuration;
060import org.apache.hadoop.fs.FileSystem;
061import org.apache.hadoop.fs.Path;
062import org.apache.hadoop.hbase.ClusterMetrics.Option;
063import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
064import org.apache.hadoop.hbase.Waiter.Predicate;
065import org.apache.hadoop.hbase.client.Admin;
066import org.apache.hadoop.hbase.client.BufferedMutator;
067import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
068import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
069import org.apache.hadoop.hbase.client.Connection;
070import org.apache.hadoop.hbase.client.ConnectionFactory;
071import org.apache.hadoop.hbase.client.Consistency;
072import org.apache.hadoop.hbase.client.Delete;
073import org.apache.hadoop.hbase.client.Durability;
074import org.apache.hadoop.hbase.client.Get;
075import org.apache.hadoop.hbase.client.HBaseAdmin;
076import org.apache.hadoop.hbase.client.Hbck;
077import org.apache.hadoop.hbase.client.ImmutableHRegionInfo;
078import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
079import org.apache.hadoop.hbase.client.Put;
080import org.apache.hadoop.hbase.client.RegionInfo;
081import org.apache.hadoop.hbase.client.RegionInfoBuilder;
082import org.apache.hadoop.hbase.client.RegionLocator;
083import org.apache.hadoop.hbase.client.Result;
084import org.apache.hadoop.hbase.client.ResultScanner;
085import org.apache.hadoop.hbase.client.Scan;
086import org.apache.hadoop.hbase.client.Table;
087import org.apache.hadoop.hbase.client.TableDescriptor;
088import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
089import org.apache.hadoop.hbase.client.TableState;
090import org.apache.hadoop.hbase.fs.HFileSystem;
091import org.apache.hadoop.hbase.io.compress.Compression;
092import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
093import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
094import org.apache.hadoop.hbase.io.hfile.BlockCache;
095import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
096import org.apache.hadoop.hbase.io.hfile.HFile;
097import org.apache.hadoop.hbase.ipc.RpcServerInterface;
098import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
099import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
100import org.apache.hadoop.hbase.master.HMaster;
101import org.apache.hadoop.hbase.master.RegionState;
102import org.apache.hadoop.hbase.master.ServerManager;
103import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
104import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil;
105import org.apache.hadoop.hbase.master.assignment.RegionStateStore;
106import org.apache.hadoop.hbase.master.assignment.RegionStates;
107import org.apache.hadoop.hbase.mob.MobFileCache;
108import org.apache.hadoop.hbase.regionserver.BloomType;
109import org.apache.hadoop.hbase.regionserver.ChunkCreator;
110import org.apache.hadoop.hbase.regionserver.HRegion;
111import org.apache.hadoop.hbase.regionserver.HRegionServer;
112import org.apache.hadoop.hbase.regionserver.HStore;
113import org.apache.hadoop.hbase.regionserver.InternalScanner;
114import org.apache.hadoop.hbase.regionserver.MemStoreLABImpl;
115import org.apache.hadoop.hbase.regionserver.Region;
116import org.apache.hadoop.hbase.regionserver.RegionScanner;
117import org.apache.hadoop.hbase.regionserver.RegionServerServices;
118import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
119import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
120import org.apache.hadoop.hbase.security.User;
121import org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache;
122import org.apache.hadoop.hbase.trace.TraceUtil;
123import org.apache.hadoop.hbase.util.Bytes;
124import org.apache.hadoop.hbase.util.CommonFSUtils;
125import org.apache.hadoop.hbase.util.FSTableDescriptors;
126import org.apache.hadoop.hbase.util.FSUtils;
127import org.apache.hadoop.hbase.util.JVMClusterUtil;
128import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
129import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
130import org.apache.hadoop.hbase.util.Pair;
131import org.apache.hadoop.hbase.util.RegionSplitter;
132import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm;
133import org.apache.hadoop.hbase.util.RetryCounter;
134import org.apache.hadoop.hbase.util.Threads;
135import org.apache.hadoop.hbase.wal.WAL;
136import org.apache.hadoop.hbase.wal.WALFactory;
137import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
138import org.apache.hadoop.hbase.zookeeper.ZKConfig;
139import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
140import org.apache.hadoop.hdfs.DFSClient;
141import org.apache.hadoop.hdfs.DistributedFileSystem;
142import org.apache.hadoop.hdfs.MiniDFSCluster;
143import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
144import org.apache.hadoop.mapred.JobConf;
145import org.apache.hadoop.mapred.MiniMRCluster;
146import org.apache.hadoop.mapred.TaskLog;
147import org.apache.hadoop.minikdc.MiniKdc;
148import org.apache.log4j.LogManager;
149import org.apache.yetus.audience.InterfaceAudience;
150import org.apache.zookeeper.WatchedEvent;
151import org.apache.zookeeper.ZooKeeper;
152import org.apache.zookeeper.ZooKeeper.States;
153import org.slf4j.Logger;
154import org.slf4j.LoggerFactory;
155import org.slf4j.impl.Log4jLoggerAdapter;
156
157import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
158
159/**
160 * Facility for testing HBase. Replacement for
161 * old HBaseTestCase and HBaseClusterTestCase functionality.
162 * Create an instance and keep it around testing HBase.  This class is
163 * meant to be your one-stop shop for anything you might need testing.  Manages
164 * one cluster at a time only. Managed cluster can be an in-process
165 * {@link MiniHBaseCluster}, or a deployed cluster of type {@code DistributedHBaseCluster}.
166 * Not all methods work with the real cluster.
167 * Depends on log4j being on classpath and
168 * hbase-site.xml for logging and test-run configuration.  It does not set
169 * logging levels.
170 * In the configuration properties, default values for master-info-port and
171 * region-server-port are overridden such that a random port will be assigned (thus
172 * avoiding port contention if another local HBase instance is already running).
173 * <p>To preserve test data directories, pass the system property "hbase.testing.preserve.testdir"
174 * setting it to true.
175 */
176@InterfaceAudience.Public
177@SuppressWarnings("deprecation")
178public class HBaseTestingUtility extends HBaseZKTestingUtility {
179
180  /**
181   * System property key to get test directory value. Name is as it is because mini dfs has
182   * hard-codings to put test data here. It should NOT be used directly in HBase, as it's a property
183   * used in mini dfs.
184   * @deprecated can be used only with mini dfs
185   */
186  @Deprecated
187  private static final String TEST_DIRECTORY_KEY = "test.build.data";
188
189  public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
190  /**
191   * The default number of regions per regionserver when creating a pre-split
192   * table.
193   */
194  public static final int DEFAULT_REGIONS_PER_SERVER = 3;
195
196
197  public static final String PRESPLIT_TEST_TABLE_KEY = "hbase.test.pre-split-table";
198  public static final boolean PRESPLIT_TEST_TABLE = true;
199
200  private MiniDFSCluster dfsCluster = null;
201
202  private volatile HBaseCluster hbaseCluster = null;
203  private MiniMRCluster mrCluster = null;
204
205  /** If there is a mini cluster running for this testing utility instance. */
206  private volatile boolean miniClusterRunning;
207
208  private String hadoopLogDir;
209
210  /** Directory on test filesystem where we put the data for this instance of
211    * HBaseTestingUtility*/
212  private Path dataTestDirOnTestFS = null;
213
214  /**
215   * Shared cluster connection.
216   */
217  private volatile Connection connection;
218
219  /** Filesystem URI used for map-reduce mini-cluster setup */
220  private static String FS_URI;
221
222  /** This is for unit tests parameterized with a single boolean. */
223  public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination();
224
225  /**
226   * Checks to see if a specific port is available.
227   *
228   * @param port the port number to check for availability
229   * @return <tt>true</tt> if the port is available, or <tt>false</tt> if not
230   */
231  public static boolean available(int port) {
232    ServerSocket ss = null;
233    DatagramSocket ds = null;
234    try {
235      ss = new ServerSocket(port);
236      ss.setReuseAddress(true);
237      ds = new DatagramSocket(port);
238      ds.setReuseAddress(true);
239      return true;
240    } catch (IOException e) {
241      // Do nothing
242    } finally {
243      if (ds != null) {
244        ds.close();
245      }
246
247      if (ss != null) {
248        try {
249          ss.close();
250        } catch (IOException e) {
251          /* should not be thrown */
252        }
253      }
254    }
255
256    return false;
257  }
258
259  /**
260   * Create all combinations of Bloom filters and compression algorithms for
261   * testing.
262   */
263  private static List<Object[]> bloomAndCompressionCombinations() {
264    List<Object[]> configurations = new ArrayList<>();
265    for (Compression.Algorithm comprAlgo :
266         HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
267      for (BloomType bloomType : BloomType.values()) {
268        configurations.add(new Object[] { comprAlgo, bloomType });
269      }
270    }
271    return Collections.unmodifiableList(configurations);
272  }
273
274  /**
275   * Create combination of memstoreTS and tags
276   */
277  private static List<Object[]> memStoreTSAndTagsCombination() {
278    List<Object[]> configurations = new ArrayList<>();
279    configurations.add(new Object[] { false, false });
280    configurations.add(new Object[] { false, true });
281    configurations.add(new Object[] { true, false });
282    configurations.add(new Object[] { true, true });
283    return Collections.unmodifiableList(configurations);
284  }
285
286  public static List<Object[]> memStoreTSTagsAndOffheapCombination() {
287    List<Object[]> configurations = new ArrayList<>();
288    configurations.add(new Object[] { false, false, true });
289    configurations.add(new Object[] { false, false, false });
290    configurations.add(new Object[] { false, true, true });
291    configurations.add(new Object[] { false, true, false });
292    configurations.add(new Object[] { true, false, true });
293    configurations.add(new Object[] { true, false, false });
294    configurations.add(new Object[] { true, true, true });
295    configurations.add(new Object[] { true, true, false });
296    return Collections.unmodifiableList(configurations);
297  }
298
299  public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
300      bloomAndCompressionCombinations();
301
302
303  /**
304   * <p>Create an HBaseTestingUtility using a default configuration.
305   *
306   * <p>Initially, all tmp files are written to a local test data directory.
307   * Once {@link #startMiniDFSCluster} is called, either directly or via
308   * {@link #startMiniCluster()}, tmp data will be written to the DFS directory instead.
309   *
310   * <p>Previously, there was a distinction between the type of utility returned by
311   * {@link #createLocalHTU()} and this constructor; this is no longer the case. All
312   * HBaseTestingUtility objects will behave as local until a DFS cluster is started,
313   * at which point they will switch to using mini DFS for storage.
314   */
315  public HBaseTestingUtility() {
316    this(HBaseConfiguration.create());
317  }
318
319  /**
320   * <p>Create an HBaseTestingUtility using a given configuration.
321   *
322   * <p>Initially, all tmp files are written to a local test data directory.
323   * Once {@link #startMiniDFSCluster} is called, either directly or via
324   * {@link #startMiniCluster()}, tmp data will be written to the DFS directory instead.
325   *
326   * <p>Previously, there was a distinction between the type of utility returned by
327   * {@link #createLocalHTU()} and this constructor; this is no longer the case. All
328   * HBaseTestingUtility objects will behave as local until a DFS cluster is started,
329   * at which point they will switch to using mini DFS for storage.
330   *
331   * @param conf The configuration to use for further operations
332   */
333  public HBaseTestingUtility(@Nullable Configuration conf) {
334    super(conf);
335
336    // a hbase checksum verification failure will cause unit tests to fail
337    ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
338
339    // Save this for when setting default file:// breaks things
340    if (this.conf.get("fs.defaultFS") != null) {
341      this.conf.set("original.defaultFS", this.conf.get("fs.defaultFS"));
342    }
343    if (this.conf.get(HConstants.HBASE_DIR) != null) {
344      this.conf.set("original.hbase.dir", this.conf.get(HConstants.HBASE_DIR));
345    }
346    // Every cluster is a local cluster until we start DFS
347    // Note that conf could be null, but this.conf will not be
348    String dataTestDir = getDataTestDir().toString();
349    this.conf.set("fs.defaultFS","file:///");
350    this.conf.set(HConstants.HBASE_DIR, "file://" + dataTestDir);
351    LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);
352    this.conf.setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE,false);
353    // If the value for random ports isn't set set it to true, thus making
354    // tests opt-out for random port assignment
355    this.conf.setBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS,
356        this.conf.getBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS, true));
357  }
358
359  /**
360   * @deprecated use {@link HBaseTestingUtility#HBaseTestingUtility()} instead
361   * @return a normal HBaseTestingUtility
362   */
363  @Deprecated
364  public static HBaseTestingUtility createLocalHTU() {
365    return new HBaseTestingUtility();
366  }
367
368  /**
369   * @deprecated use {@link HBaseTestingUtility#HBaseTestingUtility(Configuration)} instead
370   * @return a normal HBaseTestingUtility
371   */
372  @Deprecated
373  public static HBaseTestingUtility createLocalHTU(Configuration c) {
374    return new HBaseTestingUtility(c);
375  }
376
377  /**
378   * Close both the region {@code r} and it's underlying WAL. For use in tests.
379   */
380  public static void closeRegionAndWAL(final Region r) throws IOException {
381    closeRegionAndWAL((HRegion)r);
382  }
383
384  /**
385   * Close both the HRegion {@code r} and it's underlying WAL. For use in tests.
386   */
387  public static void closeRegionAndWAL(final HRegion r) throws IOException {
388    if (r == null) return;
389    r.close();
390    if (r.getWAL() == null) return;
391    r.getWAL().close();
392  }
393
394  /**
395   * Returns this classes's instance of {@link Configuration}.  Be careful how
396   * you use the returned Configuration since {@link Connection} instances
397   * can be shared.  The Map of Connections is keyed by the Configuration.  If
398   * say, a Connection was being used against a cluster that had been shutdown,
399   * see {@link #shutdownMiniCluster()}, then the Connection will no longer
400   * be wholesome.  Rather than use the return direct, its usually best to
401   * make a copy and use that.  Do
402   * <code>Configuration c = new Configuration(INSTANCE.getConfiguration());</code>
403   * @return Instance of Configuration.
404   */
405  @Override
406  public Configuration getConfiguration() {
407    return super.getConfiguration();
408  }
409
410  public void setHBaseCluster(HBaseCluster hbaseCluster) {
411    this.hbaseCluster = hbaseCluster;
412  }
413
414  /**
415   * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}.
416   * Give it a random name so can have many concurrent tests running if
417   * we need to.  It needs to amend the {@link #TEST_DIRECTORY_KEY}
418   * System property, as it's what minidfscluster bases
419   * it data dir on.  Moding a System property is not the way to do concurrent
420   * instances -- another instance could grab the temporary
421   * value unintentionally -- but not anything can do about it at moment;
422   * single instance only is how the minidfscluster works.
423   *
424   * We also create the underlying directory for
425   *  hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values
426   *  in the conf, and as a system property for hadoop.tmp.dir
427   *
428   * @return The calculated data test build directory, if newly-created.
429   */
430  @Override
431  protected Path setupDataTestDir() {
432    Path testPath = super.setupDataTestDir();
433    if (null == testPath) {
434      return null;
435    }
436
437    createSubDirAndSystemProperty(
438      "hadoop.log.dir",
439      testPath, "hadoop-log-dir");
440
441    // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
442    //  we want our own value to ensure uniqueness on the same machine
443    createSubDirAndSystemProperty(
444      "hadoop.tmp.dir",
445      testPath, "hadoop-tmp-dir");
446
447    // Read and modified in org.apache.hadoop.mapred.MiniMRCluster
448    createSubDir(
449      "mapreduce.cluster.local.dir",
450      testPath, "mapred-local-dir");
451
452    return testPath;
453  }
454
455  private void createSubDirAndSystemProperty(
456    String propertyName, Path parent, String subDirName){
457
458    String sysValue = System.getProperty(propertyName);
459
460    if (sysValue != null) {
461      // There is already a value set. So we do nothing but hope
462      //  that there will be no conflicts
463      LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
464        sysValue + " so I do NOT create it in " + parent);
465      String confValue = conf.get(propertyName);
466      if (confValue != null && !confValue.endsWith(sysValue)){
467       LOG.warn(
468         propertyName + " property value differs in configuration and system: "+
469         "Configuration="+confValue+" while System="+sysValue+
470         " Erasing configuration value by system value."
471       );
472      }
473      conf.set(propertyName, sysValue);
474    } else {
475      // Ok, it's not set, so we create it as a subdirectory
476      createSubDir(propertyName, parent, subDirName);
477      System.setProperty(propertyName, conf.get(propertyName));
478    }
479  }
480
481  /**
482   * @return Where to write test data on the test filesystem; Returns working directory
483   * for the test filesystem by default
484   * @see #setupDataTestDirOnTestFS()
485   * @see #getTestFileSystem()
486   */
487  private Path getBaseTestDirOnTestFS() throws IOException {
488    FileSystem fs = getTestFileSystem();
489    return new Path(fs.getWorkingDirectory(), "test-data");
490  }
491
492  /**
493   * @return META table descriptor
494   * @deprecated since 2.0 version and will be removed in 3.0 version.
495   *             use {@link #getMetaTableDescriptorBuilder()}
496   */
497  @Deprecated
498  public HTableDescriptor getMetaTableDescriptor() {
499    return new ImmutableHTableDescriptor(getMetaTableDescriptorBuilder().build());
500  }
501
502  /**
503   * @return META table descriptor
504   */
505  public TableDescriptorBuilder getMetaTableDescriptorBuilder() {
506    try {
507      return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);
508    } catch (IOException e) {
509      throw new RuntimeException("Unable to create META table descriptor", e);
510    }
511  }
512
513  /**
514   * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
515   * to write temporary test data. Call this method after setting up the mini dfs cluster
516   * if the test relies on it.
517   * @return a unique path in the test filesystem
518   */
519  public Path getDataTestDirOnTestFS() throws IOException {
520    if (dataTestDirOnTestFS == null) {
521      setupDataTestDirOnTestFS();
522    }
523
524    return dataTestDirOnTestFS;
525  }
526
527  /**
528   * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
529   * to write temporary test data. Call this method after setting up the mini dfs cluster
530   * if the test relies on it.
531   * @return a unique path in the test filesystem
532   * @param subdirName name of the subdir to create under the base test dir
533   */
534  public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
535    return new Path(getDataTestDirOnTestFS(), subdirName);
536  }
537
538  /**
539   * Sets up a path in test filesystem to be used by tests.
540   * Creates a new directory if not already setup.
541   */
542  private void setupDataTestDirOnTestFS() throws IOException {
543    if (dataTestDirOnTestFS != null) {
544      LOG.warn("Data test on test fs dir already setup in "
545          + dataTestDirOnTestFS.toString());
546      return;
547    }
548    dataTestDirOnTestFS = getNewDataTestDirOnTestFS();
549  }
550
551  /**
552   * Sets up a new path in test filesystem to be used by tests.
553   */
554  private Path getNewDataTestDirOnTestFS() throws IOException {
555    //The file system can be either local, mini dfs, or if the configuration
556    //is supplied externally, it can be an external cluster FS. If it is a local
557    //file system, the tests should use getBaseTestDir, otherwise, we can use
558    //the working directory, and create a unique sub dir there
559    FileSystem fs = getTestFileSystem();
560    Path newDataTestDir;
561    String randomStr = getRandomUUID().toString();
562    if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
563      newDataTestDir = new Path(getDataTestDir(), randomStr);
564      File dataTestDir = new File(newDataTestDir.toString());
565      if (deleteOnExit()) dataTestDir.deleteOnExit();
566    } else {
567      Path base = getBaseTestDirOnTestFS();
568      newDataTestDir = new Path(base, randomStr);
569      if (deleteOnExit()) fs.deleteOnExit(newDataTestDir);
570    }
571    return newDataTestDir;
572  }
573
574  /**
575   * Cleans the test data directory on the test filesystem.
576   * @return True if we removed the test dirs
577   * @throws IOException
578   */
579  public boolean cleanupDataTestDirOnTestFS() throws IOException {
580    boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
581    if (ret)
582      dataTestDirOnTestFS = null;
583    return ret;
584  }
585
586  /**
587   * Cleans a subdirectory under the test data directory on the test filesystem.
588   * @return True if we removed child
589   * @throws IOException
590   */
591  public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
592    Path cpath = getDataTestDirOnTestFS(subdirName);
593    return getTestFileSystem().delete(cpath, true);
594  }
595
596  /**
597   * Start a minidfscluster.
598   * @param servers How many DNs to start.
599   * @throws Exception
600   * @see #shutdownMiniDFSCluster()
601   * @return The mini dfs cluster created.
602   */
603  public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
604    return startMiniDFSCluster(servers, null);
605  }
606
607  /**
608   * Start a minidfscluster.
609   * This is useful if you want to run datanode on distinct hosts for things
610   * like HDFS block location verification.
611   * If you start MiniDFSCluster without host names, all instances of the
612   * datanodes will have the same host name.
613   * @param hosts hostnames DNs to run on.
614   * @throws Exception
615   * @see #shutdownMiniDFSCluster()
616   * @return The mini dfs cluster created.
617   */
618  public MiniDFSCluster startMiniDFSCluster(final String hosts[])
619  throws Exception {
620    if ( hosts != null && hosts.length != 0) {
621      return startMiniDFSCluster(hosts.length, hosts);
622    } else {
623      return startMiniDFSCluster(1, null);
624    }
625  }
626
627  /**
628   * Start a minidfscluster.
629   * Can only create one.
630   * @param servers How many DNs to start.
631   * @param hosts hostnames DNs to run on.
632   * @throws Exception
633   * @see #shutdownMiniDFSCluster()
634   * @return The mini dfs cluster created.
635   */
636  public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
637  throws Exception {
638    return startMiniDFSCluster(servers, null, hosts);
639  }
640
641  private void setFs() throws IOException {
642    if(this.dfsCluster == null){
643      LOG.info("Skipping setting fs because dfsCluster is null");
644      return;
645    }
646    FileSystem fs = this.dfsCluster.getFileSystem();
647    FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
648
649    // re-enable this check with dfs
650    conf.unset(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE);
651  }
652
653  public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])
654      throws Exception {
655    createDirsAndSetProperties();
656    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
657
658    // Error level to skip some warnings specific to the minicluster. See HBASE-4709
659    org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
660        setLevel(org.apache.log4j.Level.ERROR);
661    org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
662        setLevel(org.apache.log4j.Level.ERROR);
663
664    TraceUtil.initTracer(conf);
665
666    this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
667        true, null, racks, hosts, null);
668
669    // Set this just-started cluster as our filesystem.
670    setFs();
671
672    // Wait for the cluster to be totally up
673    this.dfsCluster.waitClusterUp();
674
675    //reset the test directory for test file system
676    dataTestDirOnTestFS = null;
677    String dataTestDir = getDataTestDir().toString();
678    conf.set(HConstants.HBASE_DIR, dataTestDir);
679    LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);
680
681    return this.dfsCluster;
682  }
683
684  public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOException {
685    createDirsAndSetProperties();
686    dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
687        null, null, null);
688    return dfsCluster;
689  }
690
691  /** This is used before starting HDFS and map-reduce mini-clusters */
692  private void createDirsAndSetProperties() throws IOException {
693    setupClusterTestDir();
694    conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
695    System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
696    createDirAndSetProperty("cache_data", "test.cache.data");
697    createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
698    hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
699    createDirAndSetProperty("mapred_local", "mapreduce.cluster.local.dir");
700    createDirAndSetProperty("mapred_temp", "mapreduce.cluster.temp.dir");
701    enableShortCircuit();
702
703    Path root = getDataTestDirOnTestFS("hadoop");
704    conf.set(MapreduceTestingShim.getMROutputDirProp(),
705      new Path(root, "mapred-output-dir").toString());
706    conf.set("mapreduce.jobtracker.system.dir", new Path(root, "mapred-system-dir").toString());
707    conf.set("mapreduce.jobtracker.staging.root.dir",
708      new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
709    conf.set("mapreduce.job.working.dir", new Path(root, "mapred-working-dir").toString());
710    conf.set("yarn.app.mapreduce.am.staging-dir",
711      new Path(root, "mapreduce-am-staging-root-dir").toString());
712  }
713
714  /**
715   *  Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating
716   *  new column families. Default to false.
717   */
718  public boolean isNewVersionBehaviorEnabled(){
719    final String propName = "hbase.tests.new.version.behavior";
720    String v = System.getProperty(propName);
721    if (v != null){
722      return Boolean.parseBoolean(v);
723    }
724    return false;
725  }
726
727  /**
728   *  Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.
729   *  This allows to specify this parameter on the command line.
730   *   If not set, default is true.
731   */
732  public boolean isReadShortCircuitOn(){
733    final String propName = "hbase.tests.use.shortcircuit.reads";
734    String readOnProp = System.getProperty(propName);
735    if (readOnProp != null){
736      return  Boolean.parseBoolean(readOnProp);
737    } else {
738      return conf.getBoolean(propName, false);
739    }
740  }
741
742  /** Enable the short circuit read, unless configured differently.
743   * Set both HBase and HDFS settings, including skipping the hdfs checksum checks.
744   */
745  private void enableShortCircuit() {
746    if (isReadShortCircuitOn()) {
747      String curUser = System.getProperty("user.name");
748      LOG.info("read short circuit is ON for user " + curUser);
749      // read short circuit, for hdfs
750      conf.set("dfs.block.local-path-access.user", curUser);
751      // read short circuit, for hbase
752      conf.setBoolean("dfs.client.read.shortcircuit", true);
753      // Skip checking checksum, for the hdfs client and the datanode
754      conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
755    } else {
756      LOG.info("read short circuit is OFF");
757    }
758  }
759
760  private String createDirAndSetProperty(final String relPath, String property) {
761    String path = getDataTestDir(relPath).toString();
762    System.setProperty(property, path);
763    conf.set(property, path);
764    new File(path).mkdirs();
765    LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
766    return path;
767  }
768
769  /**
770   * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
771   * or does nothing.
772   * @throws IOException
773   */
774  public void shutdownMiniDFSCluster() throws IOException {
775    if (this.dfsCluster != null) {
776      // The below throws an exception per dn, AsynchronousCloseException.
777      this.dfsCluster.shutdown();
778      dfsCluster = null;
779      dataTestDirOnTestFS = null;
780      FSUtils.setFsDefault(this.conf, new Path("file:///"));
781    }
782  }
783
784  /**
785   * Start up a minicluster of hbase, dfs, and zookeeper where WAL's walDir is created separately.
786   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
787   * @param createWALDir Whether to create a new WAL directory.
788   * @return The mini HBase cluster created.
789   * @see #shutdownMiniCluster()
790   * @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
791   */
792  @Deprecated
793  public MiniHBaseCluster startMiniCluster(boolean createWALDir) throws Exception {
794    StartMiniClusterOption option = StartMiniClusterOption.builder()
795        .createWALDir(createWALDir).build();
796    return startMiniCluster(option);
797  }
798
799  /**
800   * Start up a minicluster of hbase, dfs, and zookeeper.
801   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
802   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
803   * @param createRootDir Whether to create a new root or data directory path.
804   * @return The mini HBase cluster created.
805   * @see #shutdownMiniCluster()
806   * @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
807   */
808  @Deprecated
809  public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir)
810  throws Exception {
811    StartMiniClusterOption option = StartMiniClusterOption.builder()
812        .numRegionServers(numSlaves).numDataNodes(numSlaves).createRootDir(createRootDir).build();
813    return startMiniCluster(option);
814  }
815
816  /**
817   * Start up a minicluster of hbase, dfs, and zookeeper.
818   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
819   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
820   * @param createRootDir Whether to create a new root or data directory path.
821   * @param createWALDir Whether to create a new WAL directory.
822   * @return The mini HBase cluster created.
823   * @see #shutdownMiniCluster()
824   * @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
825   */
826  @Deprecated
827  public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir,
828      boolean createWALDir) throws Exception {
829    StartMiniClusterOption option = StartMiniClusterOption.builder()
830        .numRegionServers(numSlaves).numDataNodes(numSlaves).createRootDir(createRootDir)
831        .createWALDir(createWALDir).build();
832    return startMiniCluster(option);
833  }
834
835  /**
836   * Start up a minicluster of hbase, dfs, and zookeeper.
837   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
838   * @param numMasters Master node number.
839   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
840   * @param createRootDir Whether to create a new root or data directory path.
841   * @return The mini HBase cluster created.
842   * @see #shutdownMiniCluster()
843   * @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
844   */
845  @Deprecated
846  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, boolean createRootDir)
847    throws Exception {
848    StartMiniClusterOption option = StartMiniClusterOption.builder()
849        .numMasters(numMasters).numRegionServers(numSlaves).createRootDir(createRootDir)
850        .numDataNodes(numSlaves).build();
851    return startMiniCluster(option);
852  }
853
854  /**
855   * Start up a minicluster of hbase, dfs, and zookeeper.
856   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
857   * @param numMasters Master node number.
858   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
859   * @return The mini HBase cluster created.
860   * @see #shutdownMiniCluster()
861   * @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
862   */
863  @Deprecated
864  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves) throws Exception {
865    StartMiniClusterOption option = StartMiniClusterOption.builder()
866        .numMasters(numMasters).numRegionServers(numSlaves).numDataNodes(numSlaves).build();
867    return startMiniCluster(option);
868  }
869
870  /**
871   * Start up a minicluster of hbase, dfs, and zookeeper.
872   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
873   * @param numMasters Master node number.
874   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
875   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
876   *                      HDFS data node number.
877   * @param createRootDir Whether to create a new root or data directory path.
878   * @return The mini HBase cluster created.
879   * @see #shutdownMiniCluster()
880   * @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
881   */
882  @Deprecated
883  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts,
884      boolean createRootDir) throws Exception {
885    StartMiniClusterOption option = StartMiniClusterOption.builder()
886        .numMasters(numMasters).numRegionServers(numSlaves).createRootDir(createRootDir)
887        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
888    return startMiniCluster(option);
889  }
890
891  /**
892   * Start up a minicluster of hbase, dfs, and zookeeper.
893   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
894   * @param numMasters Master node number.
895   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
896   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
897   *                      HDFS data node number.
898   * @return The mini HBase cluster created.
899   * @see #shutdownMiniCluster()
900   * @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
901   */
902  @Deprecated
903  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts)
904      throws Exception {
905    StartMiniClusterOption option = StartMiniClusterOption.builder()
906        .numMasters(numMasters).numRegionServers(numSlaves)
907        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
908    return startMiniCluster(option);
909  }
910
911  /**
912   * Start up a minicluster of hbase, dfs, and zookeeper.
913   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
914   * @param numMasters Master node number.
915   * @param numRegionServers Number of region servers.
916   * @param numDataNodes Number of datanodes.
917   * @return The mini HBase cluster created.
918   * @see #shutdownMiniCluster()
919   * @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
920   */
921  @Deprecated
922  public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes)
923      throws Exception {
924    StartMiniClusterOption option = StartMiniClusterOption.builder()
925        .numMasters(numMasters).numRegionServers(numRegionServers).numDataNodes(numDataNodes)
926        .build();
927    return startMiniCluster(option);
928  }
929
930  /**
931   * Start up a minicluster of hbase, dfs, and zookeeper.
932   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
933   * @param numMasters Master node number.
934   * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
935   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
936   *                      HDFS data node number.
937   * @param masterClass The class to use as HMaster, or null for default.
938   * @param rsClass The class to use as HRegionServer, or null for default.
939   * @return The mini HBase cluster created.
940   * @see #shutdownMiniCluster()
941   * @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
942   */
943  @Deprecated
944  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts,
945      Class<? extends HMaster> masterClass,
946      Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass)
947      throws Exception {
948    StartMiniClusterOption option = StartMiniClusterOption.builder()
949        .numMasters(numMasters).masterClass(masterClass)
950        .numRegionServers(numSlaves).rsClass(rsClass)
951        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts)
952        .build();
953    return startMiniCluster(option);
954  }
955
956  /**
957   * Start up a minicluster of hbase, dfs, and zookeeper.
958   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
959   * @param numMasters Master node number.
960   * @param numRegionServers Number of region servers.
961   * @param numDataNodes Number of datanodes.
962   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
963   *                      HDFS data node number.
964   * @param masterClass The class to use as HMaster, or null for default.
965   * @param rsClass The class to use as HRegionServer, or null for default.
966   * @return The mini HBase cluster created.
967   * @see #shutdownMiniCluster()
968   * @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
969   */
970  @Deprecated
971  public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes,
972      String[] dataNodeHosts, Class<? extends HMaster> masterClass,
973      Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass)
974    throws Exception {
975    StartMiniClusterOption option = StartMiniClusterOption.builder()
976        .numMasters(numMasters).masterClass(masterClass)
977        .numRegionServers(numRegionServers).rsClass(rsClass)
978        .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts)
979        .build();
980    return startMiniCluster(option);
981  }
982
983  /**
984   * Start up a minicluster of hbase, dfs, and zookeeper.
985   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
986   * @param numMasters Master node number.
987   * @param numRegionServers Number of region servers.
988   * @param numDataNodes Number of datanodes.
989   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
990   *                      HDFS data node number.
991   * @param masterClass The class to use as HMaster, or null for default.
992   * @param rsClass The class to use as HRegionServer, or null for default.
993   * @param createRootDir Whether to create a new root or data directory path.
994   * @param createWALDir Whether to create a new WAL directory.
995   * @return The mini HBase cluster created.
996   * @see #shutdownMiniCluster()
997   * @deprecated Use {@link #startMiniCluster(StartMiniClusterOption)} instead.
998   */
999  @Deprecated
1000  public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes,
1001      String[] dataNodeHosts, Class<? extends HMaster> masterClass,
1002      Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass, boolean createRootDir,
1003      boolean createWALDir) throws Exception {
1004    StartMiniClusterOption option = StartMiniClusterOption.builder()
1005        .numMasters(numMasters).masterClass(masterClass)
1006        .numRegionServers(numRegionServers).rsClass(rsClass)
1007        .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts)
1008        .createRootDir(createRootDir).createWALDir(createWALDir)
1009        .build();
1010    return startMiniCluster(option);
1011  }
1012
1013  /**
1014   * Start up a minicluster of hbase, dfs and zookeeper clusters with given slave node number.
1015   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1016   * @param numSlaves slave node number, for both HBase region server and HDFS data node.
1017   * @see #startMiniCluster(StartMiniClusterOption option)
1018   * @see #shutdownMiniDFSCluster()
1019   */
1020  public MiniHBaseCluster startMiniCluster(int numSlaves) throws Exception {
1021    StartMiniClusterOption option = StartMiniClusterOption.builder()
1022        .numRegionServers(numSlaves).numDataNodes(numSlaves).build();
1023    return startMiniCluster(option);
1024  }
1025
1026  /**
1027   * Start up a minicluster of hbase, dfs and zookeeper all using default options.
1028   * Option default value can be found in {@link StartMiniClusterOption.Builder}.
1029   * @see #startMiniCluster(StartMiniClusterOption option)
1030   * @see #shutdownMiniDFSCluster()
1031   */
1032  public MiniHBaseCluster startMiniCluster() throws Exception {
1033    return startMiniCluster(StartMiniClusterOption.builder().build());
1034  }
1035
1036  /**
1037   * Start up a mini cluster of hbase, optionally dfs and zookeeper if needed.
1038   * It modifies Configuration.  It homes the cluster data directory under a random
1039   * subdirectory in a directory under System property test.build.data, to be cleaned up on exit.
1040   * @see #shutdownMiniDFSCluster()
1041   */
1042  public MiniHBaseCluster startMiniCluster(StartMiniClusterOption option) throws Exception {
1043    LOG.info("Starting up minicluster with option: {}", option);
1044
1045    // If we already put up a cluster, fail.
1046    if (miniClusterRunning) {
1047      throw new IllegalStateException("A mini-cluster is already running");
1048    }
1049    miniClusterRunning = true;
1050
1051    setupClusterTestDir();
1052    System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
1053
1054    // Bring up mini dfs cluster. This spews a bunch of warnings about missing
1055    // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
1056    if (dfsCluster == null) {
1057      LOG.info("STARTING DFS");
1058      dfsCluster = startMiniDFSCluster(option.getNumDataNodes(), option.getDataNodeHosts());
1059    } else {
1060      LOG.info("NOT STARTING DFS");
1061    }
1062
1063    // Start up a zk cluster.
1064    if (getZkCluster() == null) {
1065      startMiniZKCluster(option.getNumZkServers());
1066    }
1067
1068    // Start the MiniHBaseCluster
1069    return startMiniHBaseCluster(option);
1070  }
1071
1072  /**
1073   * Starts up mini hbase cluster.
1074   * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
1075   * This is useful when doing stepped startup of clusters.
1076   * @return Reference to the hbase mini hbase cluster.
1077   * @see #startMiniCluster(StartMiniClusterOption)
1078   * @see #shutdownMiniHBaseCluster()
1079   */
1080  public MiniHBaseCluster startMiniHBaseCluster(StartMiniClusterOption option)
1081      throws IOException, InterruptedException {
1082    // Now do the mini hbase cluster.  Set the hbase.rootdir in config.
1083    createRootDir(option.isCreateRootDir());
1084    if (option.isCreateWALDir()) {
1085      createWALRootDir();
1086    }
1087    // Set the hbase.fs.tmp.dir config to make sure that we have some default value. This is
1088    // for tests that do not read hbase-defaults.xml
1089    setHBaseFsTmpDir();
1090
1091    // These settings will make the server waits until this exact number of
1092    // regions servers are connected.
1093    if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
1094      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, option.getNumRegionServers());
1095    }
1096    if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
1097      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, option.getNumRegionServers());
1098    }
1099
1100    Configuration c = new Configuration(this.conf);
1101    TraceUtil.initTracer(c);
1102    this.hbaseCluster =
1103        new MiniHBaseCluster(c, option.getNumMasters(), option.getNumRegionServers(),
1104            option.getRsPorts(), option.getMasterClass(), option.getRsClass());
1105    // Don't leave here till we've done a successful scan of the hbase:meta
1106    Table t = getConnection().getTable(TableName.META_TABLE_NAME);
1107    ResultScanner s = t.getScanner(new Scan());
1108    while (s.next() != null) {
1109      continue;
1110    }
1111    s.close();
1112    t.close();
1113
1114    getAdmin(); // create immediately the hbaseAdmin
1115    LOG.info("Minicluster is up; activeMaster={}", getHBaseCluster().getMaster());
1116
1117    return (MiniHBaseCluster) hbaseCluster;
1118  }
1119
1120  /**
1121   * Starts up mini hbase cluster using default options.
1122   * Default options can be found in {@link StartMiniClusterOption.Builder}.
1123   * @see #startMiniHBaseCluster(StartMiniClusterOption)
1124   * @see #shutdownMiniHBaseCluster()
1125   */
1126  public MiniHBaseCluster startMiniHBaseCluster() throws IOException, InterruptedException {
1127    return startMiniHBaseCluster(StartMiniClusterOption.builder().build());
1128  }
1129
1130  /**
1131   * Starts up mini hbase cluster.
1132   * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
1133   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1134   * @param numMasters Master node number.
1135   * @param numRegionServers Number of region servers.
1136   * @return The mini HBase cluster created.
1137   * @see #shutdownMiniHBaseCluster()
1138   * @deprecated Use {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1139   */
1140  @Deprecated
1141  public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers)
1142      throws IOException, InterruptedException {
1143    StartMiniClusterOption option = StartMiniClusterOption.builder()
1144        .numMasters(numMasters).numRegionServers(numRegionServers).build();
1145    return startMiniHBaseCluster(option);
1146  }
1147
1148  /**
1149   * Starts up mini hbase cluster.
1150   * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
1151   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1152   * @param numMasters Master node number.
1153   * @param numRegionServers Number of region servers.
1154   * @param rsPorts Ports that RegionServer should use.
1155   * @return The mini HBase cluster created.
1156   * @see #shutdownMiniHBaseCluster()
1157   * @deprecated Use {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1158   */
1159  @Deprecated
1160  public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers,
1161      List<Integer> rsPorts) throws IOException, InterruptedException {
1162    StartMiniClusterOption option = StartMiniClusterOption.builder()
1163        .numMasters(numMasters).numRegionServers(numRegionServers).rsPorts(rsPorts).build();
1164    return startMiniHBaseCluster(option);
1165  }
1166
1167  /**
1168   * Starts up mini hbase cluster.
1169   * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
1170   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1171   * @param numMasters Master node number.
1172   * @param numRegionServers Number of region servers.
1173   * @param rsPorts Ports that RegionServer should use.
1174   * @param masterClass The class to use as HMaster, or null for default.
1175   * @param rsClass The class to use as HRegionServer, or null for default.
1176   * @param createRootDir Whether to create a new root or data directory path.
1177   * @param createWALDir Whether to create a new WAL directory.
1178   * @return The mini HBase cluster created.
1179   * @see #shutdownMiniHBaseCluster()
1180   * @deprecated Use {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1181   */
1182  @Deprecated
1183  public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers,
1184      List<Integer> rsPorts, Class<? extends HMaster> masterClass,
1185      Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass,
1186      boolean createRootDir, boolean createWALDir) throws IOException, InterruptedException {
1187    StartMiniClusterOption option = StartMiniClusterOption.builder()
1188        .numMasters(numMasters).masterClass(masterClass)
1189        .numRegionServers(numRegionServers).rsClass(rsClass).rsPorts(rsPorts)
1190        .createRootDir(createRootDir).createWALDir(createWALDir).build();
1191    return startMiniHBaseCluster(option);
1192  }
1193
1194  /**
1195   * Starts the hbase cluster up again after shutting it down previously in a
1196   * test.  Use this if you want to keep dfs/zk up and just stop/start hbase.
1197   * @param servers number of region servers
1198   */
1199  public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
1200    this.restartHBaseCluster(servers, null);
1201  }
1202
1203  public void restartHBaseCluster(int servers, List<Integer> ports)
1204      throws IOException, InterruptedException {
1205    if (hbaseAdmin != null) {
1206      hbaseAdmin.close();
1207      hbaseAdmin = null;
1208    }
1209    if (this.connection != null) {
1210      this.connection.close();
1211      this.connection = null;
1212    }
1213    this.hbaseCluster = new MiniHBaseCluster(this.conf, 1, servers, ports, null, null);
1214    // Don't leave here till we've done a successful scan of the hbase:meta
1215    Connection conn = ConnectionFactory.createConnection(this.conf);
1216    Table t = conn.getTable(TableName.META_TABLE_NAME);
1217    ResultScanner s = t.getScanner(new Scan());
1218    while (s.next() != null) {
1219      // do nothing
1220    }
1221    LOG.info("HBase has been restarted");
1222    s.close();
1223    t.close();
1224    conn.close();
1225  }
1226
1227  /**
1228   * @return Current mini hbase cluster. Only has something in it after a call
1229   * to {@link #startMiniCluster()}.
1230   * @see #startMiniCluster()
1231   */
1232  public MiniHBaseCluster getMiniHBaseCluster() {
1233    if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
1234      return (MiniHBaseCluster)this.hbaseCluster;
1235    }
1236    throw new RuntimeException(hbaseCluster + " not an instance of " +
1237                               MiniHBaseCluster.class.getName());
1238  }
1239
1240  /**
1241   * Stops mini hbase, zk, and hdfs clusters.
1242   * @throws IOException
1243   * @see #startMiniCluster(int)
1244   */
1245  public void shutdownMiniCluster() throws Exception {
1246    LOG.info("Shutting down minicluster");
1247    shutdownMiniHBaseCluster();
1248    shutdownMiniDFSCluster();
1249    shutdownMiniZKCluster();
1250
1251    cleanupTestDir();
1252    miniClusterRunning = false;
1253    LOG.info("Minicluster is down");
1254  }
1255
1256  /**
1257   * Shutdown HBase mini cluster.Does not shutdown zk or dfs if running.
1258   * @throws java.io.IOException in case command is unsuccessful
1259   */
1260  public void shutdownMiniHBaseCluster() throws IOException {
1261    cleanup();
1262    if (this.hbaseCluster != null) {
1263      this.hbaseCluster.shutdown();
1264      // Wait till hbase is down before going on to shutdown zk.
1265      this.hbaseCluster.waitUntilShutDown();
1266      this.hbaseCluster = null;
1267    }
1268    if (zooKeeperWatcher != null) {
1269      zooKeeperWatcher.close();
1270      zooKeeperWatcher = null;
1271    }
1272  }
1273
1274  /**
1275   * Abruptly Shutdown HBase mini cluster. Does not shutdown zk or dfs if running.
1276   * @throws java.io.IOException throws in case command is unsuccessful
1277   */
1278  public void killMiniHBaseCluster() throws IOException {
1279    cleanup();
1280    if (this.hbaseCluster != null) {
1281      getMiniHBaseCluster().killAll();
1282      this.hbaseCluster = null;
1283    }
1284    if (zooKeeperWatcher != null) {
1285      zooKeeperWatcher.close();
1286      zooKeeperWatcher = null;
1287    }
1288  }
1289
1290  // close hbase admin, close current connection and reset MIN MAX configs for RS.
1291  private void cleanup() throws IOException {
1292    if (hbaseAdmin != null) {
1293      hbaseAdmin.close();
1294      hbaseAdmin = null;
1295    }
1296    if (this.connection != null) {
1297      this.connection.close();
1298      this.connection = null;
1299    }
1300    // unset the configuration for MIN and MAX RS to start
1301    conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1302    conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
1303  }
1304
1305  /**
1306   * Returns the path to the default root dir the minicluster uses. If <code>create</code>
1307   * is true, a new root directory path is fetched irrespective of whether it has been fetched
1308   * before or not. If false, previous path is used.
1309   * Note: this does not cause the root dir to be created.
1310   * @return Fully qualified path for the default hbase root dir
1311   * @throws IOException
1312   */
1313  public Path getDefaultRootDirPath(boolean create) throws IOException {
1314    if (!create) {
1315      return getDataTestDirOnTestFS();
1316    } else {
1317      return getNewDataTestDirOnTestFS();
1318    }
1319  }
1320
1321  /**
1322   * Same as {{@link HBaseTestingUtility#getDefaultRootDirPath(boolean create)}
1323   * except that <code>create</code> flag is false.
1324   * Note: this does not cause the root dir to be created.
1325   * @return Fully qualified path for the default hbase root dir
1326   * @throws IOException
1327   */
1328  public Path getDefaultRootDirPath() throws IOException {
1329    return getDefaultRootDirPath(false);
1330  }
1331
1332  /**
1333   * Creates an hbase rootdir in user home directory.  Also creates hbase
1334   * version file.  Normally you won't make use of this method.  Root hbasedir
1335   * is created for you as part of mini cluster startup.  You'd only use this
1336   * method if you were doing manual operation.
1337   * @param create This flag decides whether to get a new
1338   * root or data directory path or not, if it has been fetched already.
1339   * Note : Directory will be made irrespective of whether path has been fetched or not.
1340   * If directory already exists, it will be overwritten
1341   * @return Fully qualified path to hbase root dir
1342   * @throws IOException
1343   */
1344  public Path createRootDir(boolean create) throws IOException {
1345    FileSystem fs = FileSystem.get(this.conf);
1346    Path hbaseRootdir = getDefaultRootDirPath(create);
1347    FSUtils.setRootDir(this.conf, hbaseRootdir);
1348    fs.mkdirs(hbaseRootdir);
1349    FSUtils.setVersion(fs, hbaseRootdir);
1350    return hbaseRootdir;
1351  }
1352
1353  /**
1354   * Same as {@link HBaseTestingUtility#createRootDir(boolean create)}
1355   * except that <code>create</code> flag is false.
1356   * @return Fully qualified path to hbase root dir
1357   * @throws IOException
1358   */
1359  public Path createRootDir() throws IOException {
1360    return createRootDir(false);
1361  }
1362
1363  /**
1364   * Creates a hbase walDir in the user's home directory.
1365   * Normally you won't make use of this method. Root hbaseWALDir
1366   * is created for you as part of mini cluster startup. You'd only use this
1367   * method if you were doing manual operation.
1368   *
1369   * @return Fully qualified path to hbase root dir
1370   * @throws IOException
1371  */
1372  public Path createWALRootDir() throws IOException {
1373    FileSystem fs = FileSystem.get(this.conf);
1374    Path walDir = getNewDataTestDirOnTestFS();
1375    FSUtils.setWALRootDir(this.conf, walDir);
1376    fs.mkdirs(walDir);
1377    return walDir;
1378  }
1379
1380  private void setHBaseFsTmpDir() throws IOException {
1381    String hbaseFsTmpDirInString = this.conf.get("hbase.fs.tmp.dir");
1382    if (hbaseFsTmpDirInString == null) {
1383      this.conf.set("hbase.fs.tmp.dir",  getDataTestDirOnTestFS("hbase-staging").toString());
1384      LOG.info("Setting hbase.fs.tmp.dir to " + this.conf.get("hbase.fs.tmp.dir"));
1385    } else {
1386      LOG.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString);
1387    }
1388  }
1389
1390  /**
1391   * Flushes all caches in the mini hbase cluster
1392   * @throws IOException
1393   */
1394  public void flush() throws IOException {
1395    getMiniHBaseCluster().flushcache();
1396  }
1397
1398  /**
1399   * Flushes all caches in the mini hbase cluster
1400   * @throws IOException
1401   */
1402  public void flush(TableName tableName) throws IOException {
1403    getMiniHBaseCluster().flushcache(tableName);
1404  }
1405
1406  /**
1407   * Compact all regions in the mini hbase cluster
1408   * @throws IOException
1409   */
1410  public void compact(boolean major) throws IOException {
1411    getMiniHBaseCluster().compact(major);
1412  }
1413
1414  /**
1415   * Compact all of a table's reagion in the mini hbase cluster
1416   * @throws IOException
1417   */
1418  public void compact(TableName tableName, boolean major) throws IOException {
1419    getMiniHBaseCluster().compact(tableName, major);
1420  }
1421
1422  /**
1423   * Create a table.
1424   * @param tableName
1425   * @param family
1426   * @return A Table instance for the created table.
1427   * @throws IOException
1428   */
1429  public Table createTable(TableName tableName, String family)
1430  throws IOException{
1431    return createTable(tableName, new String[]{family});
1432  }
1433
1434  /**
1435   * Create a table.
1436   * @param tableName
1437   * @param families
1438   * @return A Table instance for the created table.
1439   * @throws IOException
1440   */
1441  public Table createTable(TableName tableName, String[] families)
1442  throws IOException {
1443    List<byte[]> fams = new ArrayList<>(families.length);
1444    for (String family : families) {
1445      fams.add(Bytes.toBytes(family));
1446    }
1447    return createTable(tableName, fams.toArray(new byte[0][]));
1448  }
1449
1450  /**
1451   * Create a table.
1452   * @param tableName
1453   * @param family
1454   * @return A Table instance for the created table.
1455   * @throws IOException
1456   */
1457  public Table createTable(TableName tableName, byte[] family)
1458  throws IOException{
1459    return createTable(tableName, new byte[][]{family});
1460  }
1461
1462  /**
1463   * Create a table with multiple regions.
1464   * @param tableName
1465   * @param family
1466   * @param numRegions
1467   * @return A Table instance for the created table.
1468   * @throws IOException
1469   */
1470  public Table createMultiRegionTable(TableName tableName, byte[] family, int numRegions)
1471      throws IOException {
1472    if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1473    byte[] startKey = Bytes.toBytes("aaaaa");
1474    byte[] endKey = Bytes.toBytes("zzzzz");
1475    byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1476
1477    return createTable(tableName, new byte[][] { family }, splitKeys);
1478  }
1479
1480  /**
1481   * Create a table.
1482   * @param tableName
1483   * @param families
1484   * @return A Table instance for the created table.
1485   * @throws IOException
1486   */
1487  public Table createTable(TableName tableName, byte[][] families)
1488  throws IOException {
1489    return createTable(tableName, families, (byte[][]) null);
1490  }
1491
1492  /**
1493   * Create a table with multiple regions.
1494   * @param tableName
1495   * @param families
1496   * @return A Table instance for the created table.
1497   * @throws IOException
1498   */
1499  public Table createMultiRegionTable(TableName tableName, byte[][] families) throws IOException {
1500    return createTable(tableName, families, KEYS_FOR_HBA_CREATE_TABLE);
1501  }
1502
1503  /**
1504   * Create a table.
1505   * @param tableName
1506   * @param families
1507   * @param splitKeys
1508   * @return A Table instance for the created table.
1509   * @throws IOException
1510   */
1511  public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys)
1512      throws IOException {
1513    return createTable(tableName, families, splitKeys, 1, new Configuration(getConfiguration()));
1514  }
1515
1516  /**
1517   * Create a table.
1518   * @param tableName the table name
1519   * @param families the families
1520   * @param splitKeys the splitkeys
1521   * @param replicaCount the region replica count
1522   * @return A Table instance for the created table.
1523   * @throws IOException throws IOException
1524   */
1525  public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys,
1526      int replicaCount) throws IOException {
1527    return createTable(tableName, families, splitKeys, replicaCount,
1528      new Configuration(getConfiguration()));
1529  }
1530
1531  public Table createTable(TableName tableName, byte[][] families,
1532      int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1533  throws IOException{
1534    HTableDescriptor desc = createTableDescriptor(tableName, families, numVersions);
1535
1536    getAdmin().createTable(desc, startKey, endKey, numRegions);
1537    // HBaseAdmin only waits for regions to appear in hbase:meta we
1538    // should wait until they are assigned
1539    waitUntilAllRegionsAssigned(tableName);
1540    return getConnection().getTable(tableName);
1541  }
1542
1543  /**
1544   * Create a table.
1545   * @param htd
1546   * @param families
1547   * @param c Configuration to use
1548   * @return A Table instance for the created table.
1549   * @throws IOException
1550   */
1551  public Table createTable(TableDescriptor htd, byte[][] families, Configuration c)
1552  throws IOException {
1553    return createTable(htd, families, null, c);
1554  }
1555
1556  /**
1557   * Create a table.
1558   * @param htd table descriptor
1559   * @param families array of column families
1560   * @param splitKeys array of split keys
1561   * @param c Configuration to use
1562   * @return A Table instance for the created table.
1563   * @throws IOException if getAdmin or createTable fails
1564   */
1565  public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys,
1566      Configuration c) throws IOException {
1567    // Disable blooms (they are on by default as of 0.95) but we disable them here because
1568    // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1569    // on is interfering.
1570    return createTable(htd, families, splitKeys, BloomType.NONE, HConstants.DEFAULT_BLOCKSIZE, c);
1571  }
1572
1573  /**
1574   * Create a table.
1575   * @param htd table descriptor
1576   * @param families array of column families
1577   * @param splitKeys array of split keys
1578   * @param type Bloom type
1579   * @param blockSize block size
1580   * @param c Configuration to use
1581   * @return A Table instance for the created table.
1582   * @throws IOException if getAdmin or createTable fails
1583   */
1584
1585  public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys,
1586      BloomType type, int blockSize, Configuration c) throws IOException {
1587    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
1588    for (byte[] family : families) {
1589      ColumnFamilyDescriptorBuilder cfdb = ColumnFamilyDescriptorBuilder.newBuilder(family)
1590        .setBloomFilterType(type)
1591        .setBlocksize(blockSize);
1592      if (isNewVersionBehaviorEnabled()) {
1593          cfdb.setNewVersionBehavior(true);
1594      }
1595      builder.setColumnFamily(cfdb.build());
1596    }
1597    TableDescriptor td = builder.build();
1598    getAdmin().createTable(td, splitKeys);
1599    // HBaseAdmin only waits for regions to appear in hbase:meta
1600    // we should wait until they are assigned
1601    waitUntilAllRegionsAssigned(td.getTableName());
1602    return getConnection().getTable(td.getTableName());
1603  }
1604
1605  /**
1606   * Create a table.
1607   * @param htd table descriptor
1608   * @param splitRows array of split keys
1609   * @return A Table instance for the created table.
1610   * @throws IOException
1611   */
1612  public Table createTable(TableDescriptor htd, byte[][] splitRows)
1613      throws IOException {
1614    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
1615    if (isNewVersionBehaviorEnabled()) {
1616      for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) {
1617         builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family)
1618           .setNewVersionBehavior(true).build());
1619      }
1620    }
1621    getAdmin().createTable(builder.build(), splitRows);
1622    // HBaseAdmin only waits for regions to appear in hbase:meta
1623    // we should wait until they are assigned
1624    waitUntilAllRegionsAssigned(htd.getTableName());
1625    return getConnection().getTable(htd.getTableName());
1626  }
1627
1628  /**
1629   * Create a table.
1630   * @param tableName the table name
1631   * @param families the families
1632   * @param splitKeys the split keys
1633   * @param replicaCount the replica count
1634   * @param c Configuration to use
1635   * @return A Table instance for the created table.
1636   * @throws IOException
1637   */
1638  public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys,
1639      int replicaCount, final Configuration c) throws IOException {
1640    HTableDescriptor htd = new HTableDescriptor(tableName);
1641    htd.setRegionReplication(replicaCount);
1642    return createTable(htd, families, splitKeys, c);
1643  }
1644
1645  /**
1646   * Create a table.
1647   * @param tableName
1648   * @param family
1649   * @param numVersions
1650   * @return A Table instance for the created table.
1651   * @throws IOException
1652   */
1653  public Table createTable(TableName tableName, byte[] family, int numVersions)
1654  throws IOException {
1655    return createTable(tableName, new byte[][]{family}, numVersions);
1656  }
1657
1658  /**
1659   * Create a table.
1660   * @param tableName
1661   * @param families
1662   * @param numVersions
1663   * @return A Table instance for the created table.
1664   * @throws IOException
1665   */
1666  public Table createTable(TableName tableName, byte[][] families, int numVersions)
1667      throws IOException {
1668    return createTable(tableName, families, numVersions, (byte[][]) null);
1669  }
1670
1671  /**
1672   * Create a table.
1673   * @param tableName
1674   * @param families
1675   * @param numVersions
1676   * @param splitKeys
1677   * @return A Table instance for the created table.
1678   * @throws IOException
1679   */
1680  public Table createTable(TableName tableName, byte[][] families, int numVersions,
1681      byte[][] splitKeys) throws IOException {
1682    HTableDescriptor desc = new HTableDescriptor(tableName);
1683    for (byte[] family : families) {
1684      HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1685      if (isNewVersionBehaviorEnabled()) {
1686        hcd.setNewVersionBehavior(true);
1687      }
1688      desc.addFamily(hcd);
1689    }
1690    getAdmin().createTable(desc, splitKeys);
1691    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1692    // assigned
1693    waitUntilAllRegionsAssigned(tableName);
1694    return getConnection().getTable(tableName);
1695  }
1696
1697  /**
1698   * Create a table with multiple regions.
1699   * @param tableName
1700   * @param families
1701   * @param numVersions
1702   * @return A Table instance for the created table.
1703   * @throws IOException
1704   */
1705  public Table createMultiRegionTable(TableName tableName, byte[][] families, int numVersions)
1706      throws IOException {
1707    return createTable(tableName, families, numVersions, KEYS_FOR_HBA_CREATE_TABLE);
1708  }
1709
1710  /**
1711   * Create a table.
1712   * @param tableName
1713   * @param families
1714   * @param numVersions
1715   * @param blockSize
1716   * @return A Table instance for the created table.
1717   * @throws IOException
1718   */
1719  public Table createTable(TableName tableName, byte[][] families,
1720    int numVersions, int blockSize) throws IOException {
1721    HTableDescriptor desc = new HTableDescriptor(tableName);
1722    for (byte[] family : families) {
1723      HColumnDescriptor hcd = new HColumnDescriptor(family)
1724          .setMaxVersions(numVersions)
1725          .setBlocksize(blockSize);
1726      if (isNewVersionBehaviorEnabled()) {
1727        hcd.setNewVersionBehavior(true);
1728      }
1729      desc.addFamily(hcd);
1730    }
1731    getAdmin().createTable(desc);
1732    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1733    // assigned
1734    waitUntilAllRegionsAssigned(tableName);
1735    return getConnection().getTable(tableName);
1736  }
1737
1738  public Table createTable(TableName tableName, byte[][] families,
1739      int numVersions, int blockSize, String cpName) throws IOException {
1740      HTableDescriptor desc = new HTableDescriptor(tableName);
1741      for (byte[] family : families) {
1742        HColumnDescriptor hcd = new HColumnDescriptor(family)
1743            .setMaxVersions(numVersions)
1744            .setBlocksize(blockSize);
1745        if (isNewVersionBehaviorEnabled()) {
1746          hcd.setNewVersionBehavior(true);
1747        }
1748        desc.addFamily(hcd);
1749      }
1750      if(cpName != null) {
1751        desc.addCoprocessor(cpName);
1752      }
1753      getAdmin().createTable(desc);
1754      // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1755      // assigned
1756      waitUntilAllRegionsAssigned(tableName);
1757      return getConnection().getTable(tableName);
1758    }
1759
1760  /**
1761   * Create a table.
1762   * @param tableName
1763   * @param families
1764   * @param numVersions
1765   * @return A Table instance for the created table.
1766   * @throws IOException
1767   */
1768  public Table createTable(TableName tableName, byte[][] families,
1769      int[] numVersions)
1770  throws IOException {
1771    HTableDescriptor desc = new HTableDescriptor(tableName);
1772    int i = 0;
1773    for (byte[] family : families) {
1774      HColumnDescriptor hcd = new HColumnDescriptor(family)
1775          .setMaxVersions(numVersions[i]);
1776      if (isNewVersionBehaviorEnabled()) {
1777        hcd.setNewVersionBehavior(true);
1778      }
1779      desc.addFamily(hcd);
1780      i++;
1781    }
1782    getAdmin().createTable(desc);
1783    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1784    // assigned
1785    waitUntilAllRegionsAssigned(tableName);
1786    return getConnection().getTable(tableName);
1787  }
1788
1789  /**
1790   * Create a table.
1791   * @param tableName
1792   * @param family
1793   * @param splitRows
1794   * @return A Table instance for the created table.
1795   * @throws IOException
1796   */
1797  public Table createTable(TableName tableName, byte[] family, byte[][] splitRows)
1798      throws IOException {
1799    HTableDescriptor desc = new HTableDescriptor(tableName);
1800    HColumnDescriptor hcd = new HColumnDescriptor(family);
1801    if (isNewVersionBehaviorEnabled()) {
1802      hcd.setNewVersionBehavior(true);
1803    }
1804    desc.addFamily(hcd);
1805    getAdmin().createTable(desc, splitRows);
1806    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1807    // assigned
1808    waitUntilAllRegionsAssigned(tableName);
1809    return getConnection().getTable(tableName);
1810  }
1811
1812  /**
1813   * Create a table with multiple regions.
1814   * @param tableName
1815   * @param family
1816   * @return A Table instance for the created table.
1817   * @throws IOException
1818   */
1819  public Table createMultiRegionTable(TableName tableName, byte[] family) throws IOException {
1820    return createTable(tableName, family, KEYS_FOR_HBA_CREATE_TABLE);
1821  }
1822
1823  /**
1824   * Modify a table, synchronous. Waiting logic similar to that of {@code admin.rb#alter_status}.
1825   */
1826  @SuppressWarnings("serial")
1827  public static void modifyTableSync(Admin admin, TableDescriptor desc)
1828      throws IOException, InterruptedException {
1829    admin.modifyTable(desc);
1830    Pair<Integer, Integer> status = new Pair<Integer, Integer>() {{
1831      setFirst(0);
1832      setSecond(0);
1833    }};
1834    int i = 0;
1835    do {
1836      status = admin.getAlterStatus(desc.getTableName());
1837      if (status.getSecond() != 0) {
1838        LOG.debug(status.getSecond() - status.getFirst() + "/" + status.getSecond()
1839          + " regions updated.");
1840        Thread.sleep(1 * 1000L);
1841      } else {
1842        LOG.debug("All regions updated.");
1843        break;
1844      }
1845    } while (status.getFirst() != 0 && i++ < 500);
1846    if (status.getFirst() != 0) {
1847      throw new IOException("Failed to update all regions even after 500 seconds.");
1848    }
1849  }
1850
1851  /**
1852   * Set the number of Region replicas.
1853   */
1854  public static void setReplicas(Admin admin, TableName table, int replicaCount)
1855      throws IOException, InterruptedException {
1856    admin.disableTable(table);
1857    HTableDescriptor desc = new HTableDescriptor(admin.getTableDescriptor(table));
1858    desc.setRegionReplication(replicaCount);
1859    admin.modifyTable(desc.getTableName(), desc);
1860    admin.enableTable(table);
1861  }
1862
1863  /**
1864   * Drop an existing table
1865   * @param tableName existing table
1866   */
1867  public void deleteTable(TableName tableName) throws IOException {
1868    try {
1869      getAdmin().disableTable(tableName);
1870    } catch (TableNotEnabledException e) {
1871      LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1872    }
1873    getAdmin().deleteTable(tableName);
1874  }
1875
1876  /**
1877   * Drop an existing table
1878   * @param tableName existing table
1879   */
1880  public void deleteTableIfAny(TableName tableName) throws IOException {
1881    try {
1882      deleteTable(tableName);
1883    } catch (TableNotFoundException e) {
1884      // ignore
1885    }
1886  }
1887
1888  // ==========================================================================
1889  // Canned table and table descriptor creation
1890  // TODO replace HBaseTestCase
1891
1892  public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1893  public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1894  public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1895  public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1896  private static final int MAXVERSIONS = 3;
1897
1898  public static final char FIRST_CHAR = 'a';
1899  public static final char LAST_CHAR = 'z';
1900  public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1901  public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1902
1903  @Deprecated
1904  public HTableDescriptor createTableDescriptor(final String name,
1905      final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
1906    return this.createTableDescriptor(TableName.valueOf(name), minVersions, versions, ttl,
1907        keepDeleted);
1908  }
1909
1910  /**
1911   * Create a table of name <code>name</code>.
1912   * @param name Name to give table.
1913   * @return Column descriptor.
1914   */
1915  @Deprecated
1916  public HTableDescriptor createTableDescriptor(final String name) {
1917    return createTableDescriptor(TableName.valueOf(name),  HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1918        MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1919  }
1920
1921  public HTableDescriptor createTableDescriptor(final TableName name,
1922      final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
1923    HTableDescriptor htd = new HTableDescriptor(name);
1924    for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1925      HColumnDescriptor hcd = new HColumnDescriptor(cfName)
1926          .setMinVersions(minVersions)
1927          .setMaxVersions(versions)
1928          .setKeepDeletedCells(keepDeleted)
1929          .setBlockCacheEnabled(false)
1930          .setTimeToLive(ttl);
1931      if (isNewVersionBehaviorEnabled()) {
1932          hcd.setNewVersionBehavior(true);
1933      }
1934      htd.addFamily(hcd);
1935    }
1936    return htd;
1937  }
1938
1939  /**
1940   * Create a table of name <code>name</code>.
1941   * @param name Name to give table.
1942   * @return Column descriptor.
1943   */
1944  public HTableDescriptor createTableDescriptor(final TableName name) {
1945    return createTableDescriptor(name,  HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1946        MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1947  }
1948
1949  public HTableDescriptor createTableDescriptor(final TableName tableName,
1950      byte[] family) {
1951    return createTableDescriptor(tableName, new byte[][] {family}, 1);
1952  }
1953
1954  public HTableDescriptor createTableDescriptor(final TableName tableName,
1955      byte[][] families, int maxVersions) {
1956    HTableDescriptor desc = new HTableDescriptor(tableName);
1957    for (byte[] family : families) {
1958      HColumnDescriptor hcd = new HColumnDescriptor(family)
1959          .setMaxVersions(maxVersions);
1960      if (isNewVersionBehaviorEnabled()) {
1961          hcd.setNewVersionBehavior(true);
1962      }
1963      desc.addFamily(hcd);
1964    }
1965    return desc;
1966  }
1967
1968  /**
1969   * Create an HRegion that writes to the local tmp dirs
1970   * @param desc a table descriptor indicating which table the region belongs to
1971   * @param startKey the start boundary of the region
1972   * @param endKey the end boundary of the region
1973   * @return a region that writes to local dir for testing
1974   * @throws IOException
1975   */
1976  public HRegion createLocalHRegion(TableDescriptor desc, byte [] startKey,
1977      byte [] endKey)
1978  throws IOException {
1979    HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1980    return createLocalHRegion(hri, desc);
1981  }
1982
1983  /**
1984   * Create an HRegion that writes to the local tmp dirs. Creates the WAL for you. Be sure to call
1985   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when you're finished with it.
1986   */
1987  public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc) throws IOException {
1988    return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), desc);
1989  }
1990
1991  /**
1992   * Create an HRegion that writes to the local tmp dirs with specified wal
1993   * @param info regioninfo
1994   * @param desc table descriptor
1995   * @param wal wal for this region.
1996   * @return created hregion
1997   * @throws IOException
1998   */
1999  public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc, WAL wal)
2000      throws IOException {
2001    return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, wal);
2002  }
2003
2004  /**
2005   * @param tableName
2006   * @param startKey
2007   * @param stopKey
2008   * @param callingMethod
2009   * @param conf
2010   * @param isReadOnly
2011   * @param families
2012   * @throws IOException
2013   * @return A region on which you must call
2014             {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
2015   * @deprecated use
2016   * {@link #createLocalHRegion(TableName, byte[], byte[], boolean, Durability, WAL, byte[]...)}
2017   */
2018  @Deprecated
2019  public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
2020      String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
2021      WAL wal, byte[]... families) throws IOException {
2022    return this
2023        .createLocalHRegion(TableName.valueOf(tableName), startKey, stopKey, isReadOnly, durability,
2024            wal, families);
2025  }
2026
2027  /**
2028   * @param tableName
2029   * @param startKey
2030   * @param stopKey
2031   * @param isReadOnly
2032   * @param families
2033   * @return A region on which you must call
2034   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
2035   * @throws IOException
2036   */
2037  public HRegion createLocalHRegion(TableName tableName, byte[] startKey, byte[] stopKey,
2038      boolean isReadOnly, Durability durability, WAL wal, byte[]... families) throws IOException {
2039    return createLocalHRegionWithInMemoryFlags(tableName,startKey, stopKey, isReadOnly,
2040        durability, wal, null, families);
2041  }
2042
2043  public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] startKey,
2044      byte[] stopKey,
2045      boolean isReadOnly, Durability durability, WAL wal, boolean[] compactedMemStore,
2046      byte[]... families)
2047      throws IOException {
2048    HTableDescriptor htd = new HTableDescriptor(tableName);
2049    htd.setReadOnly(isReadOnly);
2050    int i=0;
2051    for (byte[] family : families) {
2052      HColumnDescriptor hcd = new HColumnDescriptor(family);
2053      if(compactedMemStore != null && i < compactedMemStore.length) {
2054        hcd.setInMemoryCompaction(MemoryCompactionPolicy.BASIC);
2055      } else {
2056        hcd.setInMemoryCompaction(MemoryCompactionPolicy.NONE);
2057
2058      }
2059      i++;
2060      // Set default to be three versions.
2061      hcd.setMaxVersions(Integer.MAX_VALUE);
2062      htd.addFamily(hcd);
2063    }
2064    htd.setDurability(durability);
2065    HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
2066    return createLocalHRegion(info, htd, wal);
2067  }
2068
2069  //
2070  // ==========================================================================
2071
2072  /**
2073   * Provide an existing table name to truncate.
2074   * Scans the table and issues a delete for each row read.
2075   * @param tableName existing table
2076   * @return HTable to that new table
2077   * @throws IOException
2078   */
2079  public Table deleteTableData(TableName tableName) throws IOException {
2080    Table table = getConnection().getTable(tableName);
2081    Scan scan = new Scan();
2082    ResultScanner resScan = table.getScanner(scan);
2083    for(Result res : resScan) {
2084      Delete del = new Delete(res.getRow());
2085      table.delete(del);
2086    }
2087    resScan = table.getScanner(scan);
2088    resScan.close();
2089    return table;
2090  }
2091
2092  /**
2093   * Truncate a table using the admin command.
2094   * Effectively disables, deletes, and recreates the table.
2095   * @param tableName table which must exist.
2096   * @param preserveRegions keep the existing split points
2097   * @return HTable for the new table
2098   */
2099  public Table truncateTable(final TableName tableName, final boolean preserveRegions) throws
2100      IOException {
2101    Admin admin = getAdmin();
2102    if (!admin.isTableDisabled(tableName)) {
2103      admin.disableTable(tableName);
2104    }
2105    admin.truncateTable(tableName, preserveRegions);
2106    return getConnection().getTable(tableName);
2107  }
2108
2109  /**
2110   * Truncate a table using the admin command.
2111   * Effectively disables, deletes, and recreates the table.
2112   * For previous behavior of issuing row deletes, see
2113   * deleteTableData.
2114   * Expressly does not preserve regions of existing table.
2115   * @param tableName table which must exist.
2116   * @return HTable for the new table
2117   */
2118  public Table truncateTable(final TableName tableName) throws IOException {
2119    return truncateTable(tableName, false);
2120  }
2121
2122  /**
2123   * Load table with rows from 'aaa' to 'zzz'.
2124   * @param t Table
2125   * @param f Family
2126   * @return Count of rows loaded.
2127   * @throws IOException
2128   */
2129  public int loadTable(final Table t, final byte[] f) throws IOException {
2130    return loadTable(t, new byte[][] {f});
2131  }
2132
2133  /**
2134   * Load table with rows from 'aaa' to 'zzz'.
2135   * @param t Table
2136   * @param f Family
2137   * @return Count of rows loaded.
2138   * @throws IOException
2139   */
2140  public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException {
2141    return loadTable(t, new byte[][] {f}, null, writeToWAL);
2142  }
2143
2144  /**
2145   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2146   * @param t Table
2147   * @param f Array of Families to load
2148   * @return Count of rows loaded.
2149   * @throws IOException
2150   */
2151  public int loadTable(final Table t, final byte[][] f) throws IOException {
2152    return loadTable(t, f, null);
2153  }
2154
2155  /**
2156   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2157   * @param t Table
2158   * @param f Array of Families to load
2159   * @param value the values of the cells. If null is passed, the row key is used as value
2160   * @return Count of rows loaded.
2161   * @throws IOException
2162   */
2163  public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOException {
2164    return loadTable(t, f, value, true);
2165  }
2166
2167  /**
2168   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2169   * @param t Table
2170   * @param f Array of Families to load
2171   * @param value the values of the cells. If null is passed, the row key is used as value
2172   * @return Count of rows loaded.
2173   * @throws IOException
2174   */
2175  public int loadTable(final Table t, final byte[][] f, byte[] value,
2176      boolean writeToWAL) throws IOException {
2177    List<Put> puts = new ArrayList<>();
2178    for (byte[] row : HBaseTestingUtility.ROWS) {
2179      Put put = new Put(row);
2180      put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
2181      for (int i = 0; i < f.length; i++) {
2182        byte[] value1 = value != null ? value : row;
2183        put.addColumn(f[i], f[i], value1);
2184      }
2185      puts.add(put);
2186    }
2187    t.put(puts);
2188    return puts.size();
2189  }
2190
2191  /** A tracker for tracking and validating table rows
2192   * generated with {@link HBaseTestingUtility#loadTable(Table, byte[])}
2193   */
2194  public static class SeenRowTracker {
2195    int dim = 'z' - 'a' + 1;
2196    int[][][] seenRows = new int[dim][dim][dim]; //count of how many times the row is seen
2197    byte[] startRow;
2198    byte[] stopRow;
2199
2200    public SeenRowTracker(byte[] startRow, byte[] stopRow) {
2201      this.startRow = startRow;
2202      this.stopRow = stopRow;
2203    }
2204
2205    void reset() {
2206      for (byte[] row : ROWS) {
2207        seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
2208      }
2209    }
2210
2211    int i(byte b) {
2212      return b - 'a';
2213    }
2214
2215    public void addRow(byte[] row) {
2216      seenRows[i(row[0])][i(row[1])][i(row[2])]++;
2217    }
2218
2219    /** Validate that all the rows between startRow and stopRow are seen exactly once, and
2220     * all other rows none
2221     */
2222    public void validate() {
2223      for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2224        for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2225          for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2226            int count = seenRows[i(b1)][i(b2)][i(b3)];
2227            int expectedCount = 0;
2228            if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
2229                && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
2230              expectedCount = 1;
2231            }
2232            if (count != expectedCount) {
2233              String row = new String(new byte[] {b1,b2,b3}, StandardCharsets.UTF_8);
2234              throw new RuntimeException("Row:" + row + " has a seen count of " + count + " " +
2235                  "instead of " + expectedCount);
2236            }
2237          }
2238        }
2239      }
2240    }
2241  }
2242
2243  public int loadRegion(final HRegion r, final byte[] f) throws IOException {
2244    return loadRegion(r, f, false);
2245  }
2246
2247  public int loadRegion(final Region r, final byte[] f) throws IOException {
2248    return loadRegion((HRegion)r, f);
2249  }
2250
2251  /**
2252   * Load region with rows from 'aaa' to 'zzz'.
2253   * @param r Region
2254   * @param f Family
2255   * @param flush flush the cache if true
2256   * @return Count of rows loaded.
2257   * @throws IOException
2258   */
2259  public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
2260  throws IOException {
2261    byte[] k = new byte[3];
2262    int rowCount = 0;
2263    for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2264      for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2265        for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2266          k[0] = b1;
2267          k[1] = b2;
2268          k[2] = b3;
2269          Put put = new Put(k);
2270          put.setDurability(Durability.SKIP_WAL);
2271          put.addColumn(f, null, k);
2272          if (r.getWAL() == null) {
2273            put.setDurability(Durability.SKIP_WAL);
2274          }
2275          int preRowCount = rowCount;
2276          int pause = 10;
2277          int maxPause = 1000;
2278          while (rowCount == preRowCount) {
2279            try {
2280              r.put(put);
2281              rowCount++;
2282            } catch (RegionTooBusyException e) {
2283              pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
2284              Threads.sleep(pause);
2285            }
2286          }
2287        }
2288      }
2289      if (flush) {
2290        r.flush(true);
2291      }
2292    }
2293    return rowCount;
2294  }
2295
2296  public void loadNumericRows(final Table t, final byte[] f, int startRow, int endRow)
2297      throws IOException {
2298    for (int i = startRow; i < endRow; i++) {
2299      byte[] data = Bytes.toBytes(String.valueOf(i));
2300      Put put = new Put(data);
2301      put.addColumn(f, null, data);
2302      t.put(put);
2303    }
2304  }
2305
2306  public void loadRandomRows(final Table t, final byte[] f, int rowSize, int totalRows)
2307      throws IOException {
2308    Random r = new Random();
2309    byte[] row = new byte[rowSize];
2310    for (int i = 0; i < totalRows; i++) {
2311      r.nextBytes(row);
2312      Put put = new Put(row);
2313      put.addColumn(f, new byte[]{0}, new byte[]{0});
2314      t.put(put);
2315    }
2316  }
2317
2318  public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow,
2319      int replicaId)
2320      throws IOException {
2321    for (int i = startRow; i < endRow; i++) {
2322      String failMsg = "Failed verification of row :" + i;
2323      byte[] data = Bytes.toBytes(String.valueOf(i));
2324      Get get = new Get(data);
2325      get.setReplicaId(replicaId);
2326      get.setConsistency(Consistency.TIMELINE);
2327      Result result = table.get(get);
2328      assertTrue(failMsg, result.containsColumn(f, null));
2329      assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
2330      Cell cell = result.getColumnLatestCell(f, null);
2331      assertTrue(failMsg,
2332        Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
2333          cell.getValueLength()));
2334    }
2335  }
2336
2337  public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow)
2338      throws IOException {
2339    verifyNumericRows((HRegion)region, f, startRow, endRow);
2340  }
2341
2342  public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow)
2343      throws IOException {
2344    verifyNumericRows(region, f, startRow, endRow, true);
2345  }
2346
2347  public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow,
2348      final boolean present) throws IOException {
2349    verifyNumericRows((HRegion)region, f, startRow, endRow, present);
2350  }
2351
2352  public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow,
2353      final boolean present) throws IOException {
2354    for (int i = startRow; i < endRow; i++) {
2355      String failMsg = "Failed verification of row :" + i;
2356      byte[] data = Bytes.toBytes(String.valueOf(i));
2357      Result result = region.get(new Get(data));
2358
2359      boolean hasResult = result != null && !result.isEmpty();
2360      assertEquals(failMsg + result, present, hasResult);
2361      if (!present) continue;
2362
2363      assertTrue(failMsg, result.containsColumn(f, null));
2364      assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
2365      Cell cell = result.getColumnLatestCell(f, null);
2366      assertTrue(failMsg,
2367        Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
2368          cell.getValueLength()));
2369    }
2370  }
2371
2372  public void deleteNumericRows(final Table t, final byte[] f, int startRow, int endRow)
2373      throws IOException {
2374    for (int i = startRow; i < endRow; i++) {
2375      byte[] data = Bytes.toBytes(String.valueOf(i));
2376      Delete delete = new Delete(data);
2377      delete.addFamily(f);
2378      t.delete(delete);
2379    }
2380  }
2381
2382  /**
2383   * Return the number of rows in the given table.
2384   */
2385  public int countRows(final Table table) throws IOException {
2386    return countRows(table, new Scan());
2387  }
2388
2389  public int countRows(final Table table, final Scan scan) throws IOException {
2390    try (ResultScanner results = table.getScanner(scan)) {
2391      int count = 0;
2392      while (results.next() != null) {
2393        count++;
2394      }
2395      return count;
2396    }
2397  }
2398
2399  public int countRows(final Table table, final byte[]... families) throws IOException {
2400    Scan scan = new Scan();
2401    for (byte[] family: families) {
2402      scan.addFamily(family);
2403    }
2404    return countRows(table, scan);
2405  }
2406
2407  /**
2408   * Return the number of rows in the given table.
2409   */
2410  public int countRows(final TableName tableName) throws IOException {
2411    Table table = getConnection().getTable(tableName);
2412    try {
2413      return countRows(table);
2414    } finally {
2415      table.close();
2416    }
2417  }
2418
2419  public int countRows(final Region region) throws IOException {
2420    return countRows(region, new Scan());
2421  }
2422
2423  public int countRows(final Region region, final Scan scan) throws IOException {
2424    InternalScanner scanner = region.getScanner(scan);
2425    try {
2426      return countRows(scanner);
2427    } finally {
2428      scanner.close();
2429    }
2430  }
2431
2432  public int countRows(final InternalScanner scanner) throws IOException {
2433    int scannedCount = 0;
2434    List<Cell> results = new ArrayList<>();
2435    boolean hasMore = true;
2436    while (hasMore) {
2437      hasMore = scanner.next(results);
2438      scannedCount += results.size();
2439      results.clear();
2440    }
2441    return scannedCount;
2442  }
2443
2444  /**
2445   * Return an md5 digest of the entire contents of a table.
2446   */
2447  public String checksumRows(final Table table) throws Exception {
2448
2449    Scan scan = new Scan();
2450    ResultScanner results = table.getScanner(scan);
2451    MessageDigest digest = MessageDigest.getInstance("MD5");
2452    for (Result res : results) {
2453      digest.update(res.getRow());
2454    }
2455    results.close();
2456    return digest.toString();
2457  }
2458
2459  /** All the row values for the data loaded by {@link #loadTable(Table, byte[])} */
2460  public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3]; // ~52KB
2461  static {
2462    int i = 0;
2463    for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2464      for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2465        for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2466          ROWS[i][0] = b1;
2467          ROWS[i][1] = b2;
2468          ROWS[i][2] = b3;
2469          i++;
2470        }
2471      }
2472    }
2473  }
2474
2475  public static final byte[][] KEYS = {
2476    HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
2477    Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
2478    Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
2479    Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
2480    Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2481    Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
2482    Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
2483    Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
2484    Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
2485  };
2486
2487  public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
2488      Bytes.toBytes("bbb"),
2489      Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
2490      Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
2491      Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
2492      Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2493      Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
2494      Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
2495      Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
2496      Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
2497  };
2498
2499  /**
2500   * Create rows in hbase:meta for regions of the specified table with the specified
2501   * start keys.  The first startKey should be a 0 length byte array if you
2502   * want to form a proper range of regions.
2503   * @param conf
2504   * @param htd
2505   * @param startKeys
2506   * @return list of region info for regions added to meta
2507   * @throws IOException
2508   * @deprecated since 2.0 version and will be removed in 3.0 version.
2509   *             use {@link #createMultiRegionsInMeta(Configuration, TableDescriptor, byte[][])}
2510   */
2511  @Deprecated
2512  public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
2513      final HTableDescriptor htd, byte [][] startKeys) throws IOException {
2514    return createMultiRegionsInMeta(conf, (TableDescriptor) htd, startKeys)
2515        .stream().map(ImmutableHRegionInfo::new).collect(Collectors.toList());
2516  }
2517  /**
2518   * Create rows in hbase:meta for regions of the specified table with the specified
2519   * start keys.  The first startKey should be a 0 length byte array if you
2520   * want to form a proper range of regions.
2521   * @param conf
2522   * @param htd
2523   * @param startKeys
2524   * @return list of region info for regions added to meta
2525   * @throws IOException
2526   */
2527  public List<RegionInfo> createMultiRegionsInMeta(final Configuration conf,
2528      final TableDescriptor htd, byte [][] startKeys)
2529  throws IOException {
2530    Table meta = getConnection().getTable(TableName.META_TABLE_NAME);
2531    Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2532    List<RegionInfo> newRegions = new ArrayList<>(startKeys.length);
2533    MetaTableAccessor
2534        .updateTableState(getConnection(), htd.getTableName(), TableState.State.ENABLED);
2535    // add custom ones
2536    for (int i = 0; i < startKeys.length; i++) {
2537      int j = (i + 1) % startKeys.length;
2538      RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName())
2539          .setStartKey(startKeys[i])
2540          .setEndKey(startKeys[j])
2541          .build();
2542      MetaTableAccessor.addRegionToMeta(getConnection(), hri);
2543      newRegions.add(hri);
2544    }
2545
2546    meta.close();
2547    return newRegions;
2548  }
2549
2550  /**
2551   * Create an unmanaged WAL. Be sure to close it when you're through.
2552   */
2553  public static WAL createWal(final Configuration conf, final Path rootDir, final RegionInfo hri)
2554      throws IOException {
2555    // The WAL subsystem will use the default rootDir rather than the passed in rootDir
2556    // unless I pass along via the conf.
2557    Configuration confForWAL = new Configuration(conf);
2558    confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
2559    return new WALFactory(confForWAL, "hregion-" + RandomStringUtils.randomNumeric(8)).getWAL(hri);
2560  }
2561
2562
2563  /**
2564   * Create a region with it's own WAL. Be sure to call
2565   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2566   */
2567  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2568      final Configuration conf, final TableDescriptor htd) throws IOException {
2569    return createRegionAndWAL(info, rootDir, conf, htd, true);
2570  }
2571
2572  /**
2573   * Create a region with it's own WAL. Be sure to call
2574   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2575   */
2576  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2577      final Configuration conf, final TableDescriptor htd, BlockCache blockCache)
2578      throws IOException {
2579    HRegion region = createRegionAndWAL(info, rootDir, conf, htd, false);
2580    region.setBlockCache(blockCache);
2581    region.initialize();
2582    return region;
2583  }
2584  /**
2585   * Create a region with it's own WAL. Be sure to call
2586   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2587   */
2588  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2589      final Configuration conf, final TableDescriptor htd, MobFileCache mobFileCache)
2590      throws IOException {
2591    HRegion region = createRegionAndWAL(info, rootDir, conf, htd, false);
2592    region.setMobFileCache(mobFileCache);
2593    region.initialize();
2594    return region;
2595  }
2596
2597  /**
2598   * Create a region with it's own WAL. Be sure to call
2599   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2600   */
2601  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2602      final Configuration conf, final TableDescriptor htd, boolean initialize)
2603      throws IOException {
2604    ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
2605    WAL wal = createWal(conf, rootDir, info);
2606    return HRegion.createHRegion(info, rootDir, conf, htd, wal, initialize);
2607  }
2608
2609  /**
2610   * Returns all rows from the hbase:meta table.
2611   *
2612   * @throws IOException When reading the rows fails.
2613   */
2614  public List<byte[]> getMetaTableRows() throws IOException {
2615    // TODO: Redo using MetaTableAccessor class
2616    Table t = getConnection().getTable(TableName.META_TABLE_NAME);
2617    List<byte[]> rows = new ArrayList<>();
2618    ResultScanner s = t.getScanner(new Scan());
2619    for (Result result : s) {
2620      LOG.info("getMetaTableRows: row -> " +
2621        Bytes.toStringBinary(result.getRow()));
2622      rows.add(result.getRow());
2623    }
2624    s.close();
2625    t.close();
2626    return rows;
2627  }
2628
2629  /**
2630   * Returns all rows from the hbase:meta table for a given user table
2631   *
2632   * @throws IOException When reading the rows fails.
2633   */
2634  public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2635    // TODO: Redo using MetaTableAccessor.
2636    Table t = getConnection().getTable(TableName.META_TABLE_NAME);
2637    List<byte[]> rows = new ArrayList<>();
2638    ResultScanner s = t.getScanner(new Scan());
2639    for (Result result : s) {
2640      RegionInfo info = MetaTableAccessor.getRegionInfo(result);
2641      if (info == null) {
2642        LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2643        // TODO figure out what to do for this new hosed case.
2644        continue;
2645      }
2646
2647      if (info.getTable().equals(tableName)) {
2648        LOG.info("getMetaTableRows: row -> " +
2649            Bytes.toStringBinary(result.getRow()) + info);
2650        rows.add(result.getRow());
2651      }
2652    }
2653    s.close();
2654    t.close();
2655    return rows;
2656  }
2657
2658  /**
2659   * Returns all regions of the specified table
2660   *
2661   * @param tableName the table name
2662   * @return all regions of the specified table
2663   * @throws IOException when getting the regions fails.
2664   */
2665  private List<RegionInfo> getRegions(TableName tableName) throws IOException {
2666    try (Admin admin = getConnection().getAdmin()) {
2667      return admin.getRegions(tableName);
2668    }
2669  }
2670
2671  /*
2672   * Find any other region server which is different from the one identified by parameter
2673   * @param rs
2674   * @return another region server
2675   */
2676  public HRegionServer getOtherRegionServer(HRegionServer rs) {
2677    for (JVMClusterUtil.RegionServerThread rst :
2678      getMiniHBaseCluster().getRegionServerThreads()) {
2679      if (!(rst.getRegionServer() == rs)) {
2680        return rst.getRegionServer();
2681      }
2682    }
2683    return null;
2684  }
2685
2686  /**
2687   * Tool to get the reference to the region server object that holds the
2688   * region of the specified user table.
2689   * @param tableName user table to lookup in hbase:meta
2690   * @return region server that holds it, null if the row doesn't exist
2691   * @throws IOException
2692   * @throws InterruptedException
2693   */
2694  public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2695      throws IOException, InterruptedException {
2696    List<RegionInfo> regions = getRegions(tableName);
2697    if (regions == null || regions.isEmpty()) {
2698      return null;
2699    }
2700    LOG.debug("Found " + regions.size() + " regions for table " +
2701        tableName);
2702
2703    byte[] firstRegionName = regions.stream()
2704        .filter(r -> !r.isOffline())
2705        .map(RegionInfo::getRegionName)
2706        .findFirst()
2707        .orElseThrow(() -> new IOException("online regions not found in table " + tableName));
2708
2709    LOG.debug("firstRegionName=" + Bytes.toString(firstRegionName));
2710    long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2711      HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2712    int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2713      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2714    RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2715    while(retrier.shouldRetry()) {
2716      int index = getMiniHBaseCluster().getServerWith(firstRegionName);
2717      if (index != -1) {
2718        return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2719      }
2720      // Came back -1.  Region may not be online yet.  Sleep a while.
2721      retrier.sleepUntilNextRetry();
2722    }
2723    return null;
2724  }
2725
2726  /**
2727   * Starts a <code>MiniMRCluster</code> with a default number of
2728   * <code>TaskTracker</code>'s.
2729   *
2730   * @throws IOException When starting the cluster fails.
2731   */
2732  public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2733    // Set a very high max-disk-utilization percentage to avoid the NodeManagers from failing.
2734    conf.setIfUnset(
2735        "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage",
2736        "99.0");
2737    startMiniMapReduceCluster(2);
2738    return mrCluster;
2739  }
2740
2741  /**
2742   * Tasktracker has a bug where changing the hadoop.log.dir system property
2743   * will not change its internal static LOG_DIR variable.
2744   */
2745  private void forceChangeTaskLogDir() {
2746    Field logDirField;
2747    try {
2748      logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2749      logDirField.setAccessible(true);
2750
2751      Field modifiersField = Field.class.getDeclaredField("modifiers");
2752      modifiersField.setAccessible(true);
2753      modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2754
2755      logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2756    } catch (SecurityException e) {
2757      throw new RuntimeException(e);
2758    } catch (NoSuchFieldException e) {
2759      // TODO Auto-generated catch block
2760      throw new RuntimeException(e);
2761    } catch (IllegalArgumentException e) {
2762      throw new RuntimeException(e);
2763    } catch (IllegalAccessException e) {
2764      throw new RuntimeException(e);
2765    }
2766  }
2767
2768  /**
2769   * Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
2770   * filesystem.
2771   * @param servers  The number of <code>TaskTracker</code>'s to start.
2772   * @throws IOException When starting the cluster fails.
2773   */
2774  private void startMiniMapReduceCluster(final int servers) throws IOException {
2775    if (mrCluster != null) {
2776      throw new IllegalStateException("MiniMRCluster is already running");
2777    }
2778    LOG.info("Starting mini mapreduce cluster...");
2779    setupClusterTestDir();
2780    createDirsAndSetProperties();
2781
2782    forceChangeTaskLogDir();
2783
2784    //// hadoop2 specific settings
2785    // Tests were failing because this process used 6GB of virtual memory and was getting killed.
2786    // we up the VM usable so that processes don't get killed.
2787    conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2788
2789    // Tests were failing due to MAPREDUCE-4880 / MAPREDUCE-4607 against hadoop 2.0.2-alpha and
2790    // this avoids the problem by disabling speculative task execution in tests.
2791    conf.setBoolean("mapreduce.map.speculative", false);
2792    conf.setBoolean("mapreduce.reduce.speculative", false);
2793    ////
2794
2795    // Allow the user to override FS URI for this map-reduce cluster to use.
2796    mrCluster = new MiniMRCluster(servers,
2797      FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2798      null, null, new JobConf(this.conf));
2799    JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2800    if (jobConf == null) {
2801      jobConf = mrCluster.createJobConf();
2802    }
2803
2804    jobConf.set("mapreduce.cluster.local.dir",
2805      conf.get("mapreduce.cluster.local.dir")); //Hadoop MiniMR overwrites this while it should not
2806    LOG.info("Mini mapreduce cluster started");
2807
2808    // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
2809    // Our HBase MR jobs need several of these settings in order to properly run.  So we copy the
2810    // necessary config properties here.  YARN-129 required adding a few properties.
2811    conf.set("mapreduce.jobtracker.address", jobConf.get("mapreduce.jobtracker.address"));
2812    // this for mrv2 support; mr1 ignores this
2813    conf.set("mapreduce.framework.name", "yarn");
2814    conf.setBoolean("yarn.is.minicluster", true);
2815    String rmAddress = jobConf.get("yarn.resourcemanager.address");
2816    if (rmAddress != null) {
2817      conf.set("yarn.resourcemanager.address", rmAddress);
2818    }
2819    String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2820    if (historyAddress != null) {
2821      conf.set("mapreduce.jobhistory.address", historyAddress);
2822    }
2823    String schedulerAddress =
2824      jobConf.get("yarn.resourcemanager.scheduler.address");
2825    if (schedulerAddress != null) {
2826      conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2827    }
2828    String mrJobHistoryWebappAddress =
2829      jobConf.get("mapreduce.jobhistory.webapp.address");
2830    if (mrJobHistoryWebappAddress != null) {
2831      conf.set("mapreduce.jobhistory.webapp.address", mrJobHistoryWebappAddress);
2832    }
2833    String yarnRMWebappAddress =
2834      jobConf.get("yarn.resourcemanager.webapp.address");
2835    if (yarnRMWebappAddress != null) {
2836      conf.set("yarn.resourcemanager.webapp.address", yarnRMWebappAddress);
2837    }
2838  }
2839
2840  /**
2841   * Stops the previously started <code>MiniMRCluster</code>.
2842   */
2843  public void shutdownMiniMapReduceCluster() {
2844    if (mrCluster != null) {
2845      LOG.info("Stopping mini mapreduce cluster...");
2846      mrCluster.shutdown();
2847      mrCluster = null;
2848      LOG.info("Mini mapreduce cluster stopped");
2849    }
2850    // Restore configuration to point to local jobtracker
2851    conf.set("mapreduce.jobtracker.address", "local");
2852  }
2853
2854  /**
2855   * Create a stubbed out RegionServerService, mainly for getting FS.
2856   */
2857  public RegionServerServices createMockRegionServerService() throws IOException {
2858    return createMockRegionServerService((ServerName)null);
2859  }
2860
2861  /**
2862   * Create a stubbed out RegionServerService, mainly for getting FS.
2863   * This version is used by TestTokenAuthentication
2864   */
2865  public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws
2866      IOException {
2867    final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2868    rss.setFileSystem(getTestFileSystem());
2869    rss.setRpcServer(rpc);
2870    return rss;
2871  }
2872
2873  /**
2874   * Create a stubbed out RegionServerService, mainly for getting FS.
2875   * This version is used by TestOpenRegionHandler
2876   */
2877  public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2878    final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2879    rss.setFileSystem(getTestFileSystem());
2880    return rss;
2881  }
2882
2883  /**
2884   * Switches the logger for the given class to DEBUG level.
2885   *
2886   * @param clazz  The class for which to switch to debug logging.
2887   */
2888  public void enableDebug(Class<?> clazz) {
2889    Logger l = LoggerFactory.getLogger(clazz);
2890    if (l instanceof Log4JLogger) {
2891      ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2892    } else if (l instanceof Log4jLoggerAdapter) {
2893      LogManager.getLogger(clazz).setLevel(org.apache.log4j.Level.DEBUG);
2894    } else if (l instanceof Jdk14Logger) {
2895      ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2896    }
2897  }
2898
2899  /**
2900   * Expire the Master's session
2901   * @throws Exception
2902   */
2903  public void expireMasterSession() throws Exception {
2904    HMaster master = getMiniHBaseCluster().getMaster();
2905    expireSession(master.getZooKeeper(), false);
2906  }
2907
2908  /**
2909   * Expire a region server's session
2910   * @param index which RS
2911   */
2912  public void expireRegionServerSession(int index) throws Exception {
2913    HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2914    expireSession(rs.getZooKeeper(), false);
2915    decrementMinRegionServerCount();
2916  }
2917
2918  private void decrementMinRegionServerCount() {
2919    // decrement the count for this.conf, for newly spwaned master
2920    // this.hbaseCluster shares this configuration too
2921    decrementMinRegionServerCount(getConfiguration());
2922
2923    // each master thread keeps a copy of configuration
2924    for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2925      decrementMinRegionServerCount(master.getMaster().getConfiguration());
2926    }
2927  }
2928
2929  private void decrementMinRegionServerCount(Configuration conf) {
2930    int currentCount = conf.getInt(
2931        ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2932    if (currentCount != -1) {
2933      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2934          Math.max(currentCount - 1, 1));
2935    }
2936  }
2937
2938  public void expireSession(ZKWatcher nodeZK) throws Exception {
2939   expireSession(nodeZK, false);
2940  }
2941
2942  /**
2943   * Expire a ZooKeeper session as recommended in ZooKeeper documentation
2944   * http://hbase.apache.org/book.html#trouble.zookeeper
2945   * There are issues when doing this:
2946   * [1] http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html
2947   * [2] https://issues.apache.org/jira/browse/ZOOKEEPER-1105
2948   *
2949   * @param nodeZK - the ZK watcher to expire
2950   * @param checkStatus - true to check if we can create a Table with the
2951   *                    current configuration.
2952   */
2953  public void expireSession(ZKWatcher nodeZK, boolean checkStatus)
2954    throws Exception {
2955    Configuration c = new Configuration(this.conf);
2956    String quorumServers = ZKConfig.getZKQuorumServersString(c);
2957    ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2958    byte[] password = zk.getSessionPasswd();
2959    long sessionID = zk.getSessionId();
2960
2961    // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
2962    //  so we create a first watcher to be sure that the
2963    //  event was sent. We expect that if our watcher receives the event
2964    //  other watchers on the same machine will get is as well.
2965    // When we ask to close the connection, ZK does not close it before
2966    //  we receive all the events, so don't have to capture the event, just
2967    //  closing the connection should be enough.
2968    ZooKeeper monitor = new ZooKeeper(quorumServers,
2969      1000, new org.apache.zookeeper.Watcher(){
2970      @Override
2971      public void process(WatchedEvent watchedEvent) {
2972        LOG.info("Monitor ZKW received event="+watchedEvent);
2973      }
2974    } , sessionID, password);
2975
2976    // Making it expire
2977    ZooKeeper newZK = new ZooKeeper(quorumServers,
2978        1000, EmptyWatcher.instance, sessionID, password);
2979
2980    //ensure that we have connection to the server before closing down, otherwise
2981    //the close session event will be eaten out before we start CONNECTING state
2982    long start = System.currentTimeMillis();
2983    while (newZK.getState() != States.CONNECTED
2984         && System.currentTimeMillis() - start < 1000) {
2985       Thread.sleep(1);
2986    }
2987    newZK.close();
2988    LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2989
2990    // Now closing & waiting to be sure that the clients get it.
2991    monitor.close();
2992
2993    if (checkStatus) {
2994      getConnection().getTable(TableName.META_TABLE_NAME).close();
2995    }
2996  }
2997
2998  /**
2999   * Get the Mini HBase cluster.
3000   *
3001   * @return hbase cluster
3002   * @see #getHBaseClusterInterface()
3003   */
3004  public MiniHBaseCluster getHBaseCluster() {
3005    return getMiniHBaseCluster();
3006  }
3007
3008  /**
3009   * Returns the HBaseCluster instance.
3010   * <p>Returned object can be any of the subclasses of HBaseCluster, and the
3011   * tests referring this should not assume that the cluster is a mini cluster or a
3012   * distributed one. If the test only works on a mini cluster, then specific
3013   * method {@link #getMiniHBaseCluster()} can be used instead w/o the
3014   * need to type-cast.
3015   */
3016  public HBaseCluster getHBaseClusterInterface() {
3017    //implementation note: we should rename this method as #getHBaseCluster(),
3018    //but this would require refactoring 90+ calls.
3019    return hbaseCluster;
3020  }
3021
3022  /**
3023   * Get a Connection to the cluster.
3024   * Not thread-safe (This class needs a lot of work to make it thread-safe).
3025   * @return A Connection that can be shared. Don't close. Will be closed on shutdown of cluster.
3026   * @throws IOException
3027   */
3028  public Connection getConnection() throws IOException {
3029    if (this.connection == null) {
3030      this.connection = ConnectionFactory.createConnection(this.conf);
3031    }
3032    return this.connection;
3033  }
3034
3035  /**
3036   * Returns a Admin instance.
3037   * This instance is shared between HBaseTestingUtility instance users. Closing it has no effect,
3038   * it will be closed automatically when the cluster shutdowns
3039   *
3040   * @return HBaseAdmin instance which is guaranteed to support only {@link Admin} interface.
3041   *   Functions in HBaseAdmin not provided by {@link Admin} interface can be changed/deleted
3042   *   anytime.
3043   * @deprecated Since 2.0. Will be removed in 3.0. Use {@link #getAdmin()} instead.
3044   */
3045  @Deprecated
3046  public synchronized HBaseAdmin getHBaseAdmin()
3047  throws IOException {
3048    if (hbaseAdmin == null){
3049      this.hbaseAdmin = (HBaseAdmin) getConnection().getAdmin();
3050    }
3051    return hbaseAdmin;
3052  }
3053
3054  /**
3055   * Returns an Admin instance which is shared between HBaseTestingUtility instance users.
3056   * Closing it has no effect, it will be closed automatically when the cluster shutdowns
3057   */
3058  public synchronized Admin getAdmin() throws IOException {
3059    if (hbaseAdmin == null){
3060      this.hbaseAdmin = (HBaseAdmin) getConnection().getAdmin();
3061    }
3062    return hbaseAdmin;
3063  }
3064
3065  private HBaseAdmin hbaseAdmin = null;
3066
3067  /**
3068   * Returns an {@link Hbck} instance. Needs be closed when done.
3069   */
3070  public Hbck getHbck() throws IOException {
3071    return getConnection().getHbck();
3072  }
3073
3074  /**
3075   * Unassign the named region.
3076   *
3077   * @param regionName  The region to unassign.
3078   */
3079  public void unassignRegion(String regionName) throws IOException {
3080    unassignRegion(Bytes.toBytes(regionName));
3081  }
3082
3083  /**
3084   * Unassign the named region.
3085   *
3086   * @param regionName  The region to unassign.
3087   */
3088  public void unassignRegion(byte[] regionName) throws IOException {
3089    getAdmin().unassign(regionName, true);
3090  }
3091
3092  /**
3093   * Closes the region containing the given row.
3094   *
3095   * @param row  The row to find the containing region.
3096   * @param table  The table to find the region.
3097   */
3098  public void unassignRegionByRow(String row, RegionLocator table) throws IOException {
3099    unassignRegionByRow(Bytes.toBytes(row), table);
3100  }
3101
3102  /**
3103   * Closes the region containing the given row.
3104   *
3105   * @param row  The row to find the containing region.
3106   * @param table  The table to find the region.
3107   * @throws IOException
3108   */
3109  public void unassignRegionByRow(byte[] row, RegionLocator table) throws IOException {
3110    HRegionLocation hrl = table.getRegionLocation(row);
3111    unassignRegion(hrl.getRegionInfo().getRegionName());
3112  }
3113
3114  /*
3115   * Retrieves a splittable region randomly from tableName
3116   *
3117   * @param tableName name of table
3118   * @param maxAttempts maximum number of attempts, unlimited for value of -1
3119   * @return the HRegion chosen, null if none was found within limit of maxAttempts
3120   */
3121  public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
3122    List<HRegion> regions = getHBaseCluster().getRegions(tableName);
3123    int regCount = regions.size();
3124    Set<Integer> attempted = new HashSet<>();
3125    int idx;
3126    int attempts = 0;
3127    do {
3128      regions = getHBaseCluster().getRegions(tableName);
3129      if (regCount != regions.size()) {
3130        // if there was region movement, clear attempted Set
3131        attempted.clear();
3132      }
3133      regCount = regions.size();
3134      // There are chances that before we get the region for the table from an RS the region may
3135      // be going for CLOSE.  This may be because online schema change is enabled
3136      if (regCount > 0) {
3137        idx = random.nextInt(regCount);
3138        // if we have just tried this region, there is no need to try again
3139        if (attempted.contains(idx))
3140          continue;
3141        try {
3142          regions.get(idx).checkSplit();
3143          return regions.get(idx);
3144        } catch (Exception ex) {
3145          LOG.warn("Caught exception", ex);
3146          attempted.add(idx);
3147        }
3148      }
3149      attempts++;
3150    } while (maxAttempts == -1 || attempts < maxAttempts);
3151    return null;
3152  }
3153
3154  public MiniDFSCluster getDFSCluster() {
3155    return dfsCluster;
3156  }
3157
3158  public void setDFSCluster(MiniDFSCluster cluster) throws IllegalStateException, IOException {
3159    setDFSCluster(cluster, true);
3160  }
3161
3162  /**
3163   * Set the MiniDFSCluster
3164   * @param cluster cluster to use
3165   * @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before
3166   * it is set.
3167   * @throws IllegalStateException if the passed cluster is up when it is required to be down
3168   * @throws IOException if the FileSystem could not be set from the passed dfs cluster
3169   */
3170  public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown)
3171      throws IllegalStateException, IOException {
3172    if (dfsCluster != null && requireDown && dfsCluster.isClusterUp()) {
3173      throw new IllegalStateException("DFSCluster is already running! Shut it down first.");
3174    }
3175    this.dfsCluster = cluster;
3176    this.setFs();
3177  }
3178
3179  public FileSystem getTestFileSystem() throws IOException {
3180    return HFileSystem.get(conf);
3181  }
3182
3183  /**
3184   * Wait until all regions in a table have been assigned.  Waits default timeout before giving up
3185   * (30 seconds).
3186   * @param table Table to wait on.
3187   * @throws InterruptedException
3188   * @throws IOException
3189   */
3190  public void waitTableAvailable(TableName table)
3191      throws InterruptedException, IOException {
3192    waitTableAvailable(table.getName(), 30000);
3193  }
3194
3195  public void waitTableAvailable(TableName table, long timeoutMillis)
3196      throws InterruptedException, IOException {
3197    waitFor(timeoutMillis, predicateTableAvailable(table));
3198  }
3199
3200  /**
3201   * Wait until all regions in a table have been assigned
3202   * @param table Table to wait on.
3203   * @param timeoutMillis Timeout.
3204   * @throws InterruptedException
3205   * @throws IOException
3206   */
3207  public void waitTableAvailable(byte[] table, long timeoutMillis)
3208  throws InterruptedException, IOException {
3209    waitFor(timeoutMillis, predicateTableAvailable(TableName.valueOf(table)));
3210  }
3211
3212  public String explainTableAvailability(TableName tableName) throws IOException {
3213    String msg = explainTableState(tableName, TableState.State.ENABLED) + ", ";
3214    if (getHBaseCluster().getMaster().isAlive()) {
3215      Map<RegionInfo, ServerName> assignments =
3216          getHBaseCluster().getMaster().getAssignmentManager().getRegionStates()
3217              .getRegionAssignments();
3218      final List<Pair<RegionInfo, ServerName>> metaLocations =
3219          MetaTableAccessor.getTableRegionsAndLocations(connection, tableName);
3220      for (Pair<RegionInfo, ServerName> metaLocation : metaLocations) {
3221        RegionInfo hri = metaLocation.getFirst();
3222        ServerName sn = metaLocation.getSecond();
3223        if (!assignments.containsKey(hri)) {
3224          msg += ", region " + hri
3225              + " not assigned, but found in meta, it expected to be on " + sn;
3226
3227        } else if (sn == null) {
3228          msg += ",  region " + hri
3229              + " assigned,  but has no server in meta";
3230        } else if (!sn.equals(assignments.get(hri))) {
3231          msg += ",  region " + hri
3232              + " assigned,  but has different servers in meta and AM ( " +
3233              sn + " <> " + assignments.get(hri);
3234        }
3235      }
3236    }
3237    return msg;
3238  }
3239
3240  public String explainTableState(final TableName table, TableState.State state)
3241      throws IOException {
3242    TableState tableState = MetaTableAccessor.getTableState(connection, table);
3243    if (tableState == null) {
3244      return "TableState in META: No table state in META for table " + table
3245          + " last state in meta (including deleted is " + findLastTableState(table) + ")";
3246    } else if (!tableState.inStates(state)) {
3247      return "TableState in META: Not " + state + " state, but " + tableState;
3248    } else {
3249      return "TableState in META: OK";
3250    }
3251  }
3252
3253  @Nullable
3254  public TableState findLastTableState(final TableName table) throws IOException {
3255    final AtomicReference<TableState> lastTableState = new AtomicReference<>(null);
3256    MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
3257      @Override
3258      public boolean visit(Result r) throws IOException {
3259        if (!Arrays.equals(r.getRow(), table.getName()))
3260          return false;
3261        TableState state = MetaTableAccessor.getTableState(r);
3262        if (state != null)
3263          lastTableState.set(state);
3264        return true;
3265      }
3266    };
3267    MetaTableAccessor
3268        .scanMeta(connection, null, null,
3269            MetaTableAccessor.QueryType.TABLE,
3270            Integer.MAX_VALUE, visitor);
3271    return lastTableState.get();
3272  }
3273
3274  /**
3275   * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
3276   * regions have been all assigned.  Will timeout after default period (30 seconds)
3277   * Tolerates nonexistent table.
3278   * @param table Table to wait on.
3279   * @param table
3280   * @throws InterruptedException
3281   * @throws IOException
3282   */
3283  public void waitTableEnabled(TableName table)
3284      throws InterruptedException, IOException {
3285    waitTableEnabled(table, 30000);
3286  }
3287
3288  /**
3289   * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
3290   * regions have been all assigned.
3291   * @see #waitTableEnabled(TableName, long)
3292   * @param table Table to wait on.
3293   * @param timeoutMillis Time to wait on it being marked enabled.
3294   * @throws InterruptedException
3295   * @throws IOException
3296   */
3297  public void waitTableEnabled(byte[] table, long timeoutMillis)
3298  throws InterruptedException, IOException {
3299    waitTableEnabled(TableName.valueOf(table), timeoutMillis);
3300  }
3301
3302  public void waitTableEnabled(TableName table, long timeoutMillis)
3303  throws IOException {
3304    waitFor(timeoutMillis, predicateTableEnabled(table));
3305  }
3306
3307  /**
3308   * Waits for a table to be 'disabled'.  Disabled means that table is set as 'disabled'
3309   * Will timeout after default period (30 seconds)
3310   * @param table Table to wait on.
3311   * @throws InterruptedException
3312   * @throws IOException
3313   */
3314  public void waitTableDisabled(byte[] table)
3315          throws InterruptedException, IOException {
3316    waitTableDisabled(table, 30000);
3317  }
3318
3319  public void waitTableDisabled(TableName table, long millisTimeout)
3320          throws InterruptedException, IOException {
3321    waitFor(millisTimeout, predicateTableDisabled(table));
3322  }
3323
3324  /**
3325   * Waits for a table to be 'disabled'.  Disabled means that table is set as 'disabled'
3326   * @param table Table to wait on.
3327   * @param timeoutMillis Time to wait on it being marked disabled.
3328   * @throws InterruptedException
3329   * @throws IOException
3330   */
3331  public void waitTableDisabled(byte[] table, long timeoutMillis)
3332          throws InterruptedException, IOException {
3333    waitTableDisabled(TableName.valueOf(table), timeoutMillis);
3334  }
3335
3336  /**
3337   * Make sure that at least the specified number of region servers
3338   * are running
3339   * @param num minimum number of region servers that should be running
3340   * @return true if we started some servers
3341   * @throws IOException
3342   */
3343  public boolean ensureSomeRegionServersAvailable(final int num)
3344      throws IOException {
3345    boolean startedServer = false;
3346    MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
3347    for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
3348      LOG.info("Started new server=" + hbaseCluster.startRegionServer());
3349      startedServer = true;
3350    }
3351
3352    return startedServer;
3353  }
3354
3355
3356  /**
3357   * Make sure that at least the specified number of region servers
3358   * are running. We don't count the ones that are currently stopping or are
3359   * stopped.
3360   * @param num minimum number of region servers that should be running
3361   * @return true if we started some servers
3362   * @throws IOException
3363   */
3364  public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
3365    throws IOException {
3366    boolean startedServer = ensureSomeRegionServersAvailable(num);
3367
3368    int nonStoppedServers = 0;
3369    for (JVMClusterUtil.RegionServerThread rst :
3370      getMiniHBaseCluster().getRegionServerThreads()) {
3371
3372      HRegionServer hrs = rst.getRegionServer();
3373      if (hrs.isStopping() || hrs.isStopped()) {
3374        LOG.info("A region server is stopped or stopping:"+hrs);
3375      } else {
3376        nonStoppedServers++;
3377      }
3378    }
3379    for (int i=nonStoppedServers; i<num; ++i) {
3380      LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
3381      startedServer = true;
3382    }
3383    return startedServer;
3384  }
3385
3386
3387  /**
3388   * This method clones the passed <code>c</code> configuration setting a new
3389   * user into the clone.  Use it getting new instances of FileSystem.  Only
3390   * works for DistributedFileSystem w/o Kerberos.
3391   * @param c Initial configuration
3392   * @param differentiatingSuffix Suffix to differentiate this user from others.
3393   * @return A new configuration instance with a different user set into it.
3394   * @throws IOException
3395   */
3396  public static User getDifferentUser(final Configuration c,
3397    final String differentiatingSuffix)
3398  throws IOException {
3399    FileSystem currentfs = FileSystem.get(c);
3400    if (!(currentfs instanceof DistributedFileSystem) || User.isHBaseSecurityEnabled(c)) {
3401      return User.getCurrent();
3402    }
3403    // Else distributed filesystem.  Make a new instance per daemon.  Below
3404    // code is taken from the AppendTestUtil over in hdfs.
3405    String username = User.getCurrent().getName() +
3406      differentiatingSuffix;
3407    User user = User.createUserForTesting(c, username,
3408        new String[]{"supergroup"});
3409    return user;
3410  }
3411
3412  public static NavigableSet<String> getAllOnlineRegions(MiniHBaseCluster cluster)
3413      throws IOException {
3414    NavigableSet<String> online = new TreeSet<>();
3415    for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
3416      try {
3417        for (RegionInfo region :
3418            ProtobufUtil.getOnlineRegions(rst.getRegionServer().getRSRpcServices())) {
3419          online.add(region.getRegionNameAsString());
3420        }
3421      } catch (RegionServerStoppedException e) {
3422        // That's fine.
3423      }
3424    }
3425    for (MasterThread mt : cluster.getLiveMasterThreads()) {
3426      try {
3427        for (RegionInfo region :
3428            ProtobufUtil.getOnlineRegions(mt.getMaster().getRSRpcServices())) {
3429          online.add(region.getRegionNameAsString());
3430        }
3431      } catch (RegionServerStoppedException e) {
3432        // That's fine.
3433      } catch (ServerNotRunningYetException e) {
3434        // That's fine.
3435      }
3436    }
3437    return online;
3438  }
3439
3440  /**
3441   * Set maxRecoveryErrorCount in DFSClient.  In 0.20 pre-append its hard-coded to 5 and
3442   * makes tests linger.  Here is the exception you'll see:
3443   * <pre>
3444   * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/wal.1276627923013 block
3445   * blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block
3446   * blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683
3447   * failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
3448   * </pre>
3449   * @param stream A DFSClient.DFSOutputStream.
3450   * @param max
3451   * @throws NoSuchFieldException
3452   * @throws SecurityException
3453   * @throws IllegalAccessException
3454   * @throws IllegalArgumentException
3455   */
3456  public static void setMaxRecoveryErrorCount(final OutputStream stream,
3457      final int max) {
3458    try {
3459      Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
3460      for (Class<?> clazz: clazzes) {
3461        String className = clazz.getSimpleName();
3462        if (className.equals("DFSOutputStream")) {
3463          if (clazz.isInstance(stream)) {
3464            Field maxRecoveryErrorCountField =
3465              stream.getClass().getDeclaredField("maxRecoveryErrorCount");
3466            maxRecoveryErrorCountField.setAccessible(true);
3467            maxRecoveryErrorCountField.setInt(stream, max);
3468            break;
3469          }
3470        }
3471      }
3472    } catch (Exception e) {
3473      LOG.info("Could not set max recovery field", e);
3474    }
3475  }
3476
3477  /**
3478   * Uses directly the assignment manager to assign the region. and waits until the specified region
3479   * has completed assignment.
3480   * @return true if the region is assigned false otherwise.
3481   */
3482  public boolean assignRegion(final RegionInfo regionInfo)
3483      throws IOException, InterruptedException {
3484    final AssignmentManager am = getHBaseCluster().getMaster().getAssignmentManager();
3485    am.assign(regionInfo);
3486    return AssignmentTestingUtil.waitForAssignment(am, regionInfo);
3487  }
3488
3489  /**
3490   * Move region to destination server and wait till region is completely moved and online
3491   *
3492   * @param destRegion region to move
3493   * @param destServer destination server of the region
3494   * @throws InterruptedException
3495   * @throws IOException
3496   */
3497  public void moveRegionAndWait(RegionInfo destRegion, ServerName destServer)
3498      throws InterruptedException, IOException {
3499    HMaster master = getMiniHBaseCluster().getMaster();
3500    // TODO: Here we start the move. The move can take a while.
3501    getAdmin().move(destRegion.getEncodedNameAsBytes(), destServer);
3502    while (true) {
3503      ServerName serverName = master.getAssignmentManager().getRegionStates()
3504          .getRegionServerOfRegion(destRegion);
3505      if (serverName != null && serverName.equals(destServer)) {
3506        assertRegionOnServer(destRegion, serverName, 2000);
3507        break;
3508      }
3509      Thread.sleep(10);
3510    }
3511  }
3512
3513  /**
3514   * Wait until all regions for a table in hbase:meta have a non-empty
3515   * info:server, up to a configuable timeout value (default is 60 seconds)
3516   * This means all regions have been deployed,
3517   * master has been informed and updated hbase:meta with the regions deployed
3518   * server.
3519   * @param tableName the table name
3520   * @throws IOException
3521   */
3522  public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
3523    waitUntilAllRegionsAssigned(tableName,
3524      this.conf.getLong("hbase.client.sync.wait.timeout.msec", 60000));
3525  }
3526
3527  /**
3528   * Waith until all system table's regions get assigned
3529   * @throws IOException
3530   */
3531  public void waitUntilAllSystemRegionsAssigned() throws IOException {
3532    waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
3533    waitUntilAllRegionsAssigned(TableName.NAMESPACE_TABLE_NAME);
3534  }
3535
3536  /**
3537   * Wait until all regions for a table in hbase:meta have a non-empty
3538   * info:server, or until timeout.  This means all regions have been deployed,
3539   * master has been informed and updated hbase:meta with the regions deployed
3540   * server.
3541   * @param tableName the table name
3542   * @param timeout timeout, in milliseconds
3543   * @throws IOException
3544   */
3545  public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
3546      throws IOException {
3547    if (!TableName.isMetaTableName(tableName)) {
3548      try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) {
3549        LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " +
3550            timeout + "ms");
3551        waitFor(timeout, 200, true, new ExplainingPredicate<IOException>() {
3552          @Override
3553          public String explainFailure() throws IOException {
3554            return explainTableAvailability(tableName);
3555          }
3556
3557          @Override
3558          public boolean evaluate() throws IOException {
3559            Scan scan = new Scan();
3560            scan.addFamily(HConstants.CATALOG_FAMILY);
3561            boolean tableFound = false;
3562            try (ResultScanner s = meta.getScanner(scan)) {
3563              for (Result r; (r = s.next()) != null;) {
3564                byte[] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
3565                HRegionInfo info = HRegionInfo.parseFromOrNull(b);
3566                if (info != null && info.getTable().equals(tableName)) {
3567                  // Get server hosting this region from catalog family. Return false if no server
3568                  // hosting this region, or if the server hosting this region was recently killed
3569                  // (for fault tolerance testing).
3570                  tableFound = true;
3571                  byte[] server =
3572                      r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
3573                  if (server == null) {
3574                    return false;
3575                  } else {
3576                    byte[] startCode =
3577                        r.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER);
3578                    ServerName serverName =
3579                        ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + "," +
3580                            Bytes.toLong(startCode));
3581                    if (!getHBaseClusterInterface().isDistributedCluster() &&
3582                        getHBaseCluster().isKilledRS(serverName)) {
3583                      return false;
3584                    }
3585                  }
3586                  if (RegionStateStore.getRegionState(r,
3587                    info.getReplicaId()) != RegionState.State.OPEN) {
3588                    return false;
3589                  }
3590                }
3591              }
3592            }
3593            if (!tableFound) {
3594              LOG.warn("Didn't find the entries for table " + tableName + " in meta, already deleted?");
3595            }
3596            return tableFound;
3597          }
3598        });
3599      }
3600    }
3601    LOG.info("All regions for table " + tableName + " assigned to meta. Checking AM states.");
3602    // check from the master state if we are using a mini cluster
3603    if (!getHBaseClusterInterface().isDistributedCluster()) {
3604      // So, all regions are in the meta table but make sure master knows of the assignments before
3605      // returning -- sometimes this can lag.
3606      HMaster master = getHBaseCluster().getMaster();
3607      final RegionStates states = master.getAssignmentManager().getRegionStates();
3608      waitFor(timeout, 200, new ExplainingPredicate<IOException>() {
3609        @Override
3610        public String explainFailure() throws IOException {
3611          return explainTableAvailability(tableName);
3612        }
3613
3614        @Override
3615        public boolean evaluate() throws IOException {
3616          List<RegionInfo> hris = states.getRegionsOfTable(tableName);
3617          return hris != null && !hris.isEmpty();
3618        }
3619      });
3620    }
3621    LOG.info("All regions for table " + tableName + " assigned.");
3622  }
3623
3624  /**
3625   * Do a small get/scan against one store. This is required because store
3626   * has no actual methods of querying itself, and relies on StoreScanner.
3627   */
3628  public static List<Cell> getFromStoreFile(HStore store,
3629                                                Get get) throws IOException {
3630    Scan scan = new Scan(get);
3631    InternalScanner scanner = (InternalScanner) store.getScanner(scan,
3632        scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()),
3633        // originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set
3634        // readpoint 0.
3635        0);
3636
3637    List<Cell> result = new ArrayList<>();
3638    scanner.next(result);
3639    if (!result.isEmpty()) {
3640      // verify that we are on the row we want:
3641      Cell kv = result.get(0);
3642      if (!CellUtil.matchingRows(kv, get.getRow())) {
3643        result.clear();
3644      }
3645    }
3646    scanner.close();
3647    return result;
3648  }
3649
3650  /**
3651   * Create region split keys between startkey and endKey
3652   *
3653   * @param startKey
3654   * @param endKey
3655   * @param numRegions the number of regions to be created. it has to be greater than 3.
3656   * @return
3657   */
3658  public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
3659    assertTrue(numRegions>3);
3660    byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
3661    byte [][] result = new byte[tmpSplitKeys.length+1][];
3662    System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length);
3663    result[0] = HConstants.EMPTY_BYTE_ARRAY;
3664    return result;
3665  }
3666
3667  /**
3668   * Do a small get/scan against one store. This is required because store
3669   * has no actual methods of querying itself, and relies on StoreScanner.
3670   */
3671  public static List<Cell> getFromStoreFile(HStore store,
3672                                                byte [] row,
3673                                                NavigableSet<byte[]> columns
3674                                                ) throws IOException {
3675    Get get = new Get(row);
3676    Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
3677    s.put(store.getColumnFamilyDescriptor().getName(), columns);
3678
3679    return getFromStoreFile(store,get);
3680  }
3681
3682  public static void assertKVListsEqual(String additionalMsg,
3683      final List<? extends Cell> expected,
3684      final List<? extends Cell> actual) {
3685    final int eLen = expected.size();
3686    final int aLen = actual.size();
3687    final int minLen = Math.min(eLen, aLen);
3688
3689    int i;
3690    for (i = 0; i < minLen
3691        && CellComparator.getInstance().compare(expected.get(i), actual.get(i)) == 0;
3692        ++i) {}
3693
3694    if (additionalMsg == null) {
3695      additionalMsg = "";
3696    }
3697    if (!additionalMsg.isEmpty()) {
3698      additionalMsg = ". " + additionalMsg;
3699    }
3700
3701    if (eLen != aLen || i != minLen) {
3702      throw new AssertionError(
3703          "Expected and actual KV arrays differ at position " + i + ": " +
3704          safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
3705          safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
3706    }
3707  }
3708
3709  public static <T> String safeGetAsStr(List<T> lst, int i) {
3710    if (0 <= i && i < lst.size()) {
3711      return lst.get(i).toString();
3712    } else {
3713      return "<out_of_range>";
3714    }
3715  }
3716
3717  public String getClusterKey() {
3718    return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
3719        + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
3720        + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
3721            HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
3722  }
3723
3724  /** Creates a random table with the given parameters */
3725  public Table createRandomTable(TableName tableName,
3726      final Collection<String> families,
3727      final int maxVersions,
3728      final int numColsPerRow,
3729      final int numFlushes,
3730      final int numRegions,
3731      final int numRowsPerFlush)
3732      throws IOException, InterruptedException {
3733
3734    LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
3735        " regions, " + numFlushes + " storefiles per region, " +
3736        numRowsPerFlush + " rows per flush, maxVersions=" +  maxVersions +
3737        "\n");
3738
3739    final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
3740    final int numCF = families.size();
3741    final byte[][] cfBytes = new byte[numCF][];
3742    {
3743      int cfIndex = 0;
3744      for (String cf : families) {
3745        cfBytes[cfIndex++] = Bytes.toBytes(cf);
3746      }
3747    }
3748
3749    final int actualStartKey = 0;
3750    final int actualEndKey = Integer.MAX_VALUE;
3751    final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3752    final int splitStartKey = actualStartKey + keysPerRegion;
3753    final int splitEndKey = actualEndKey - keysPerRegion;
3754    final String keyFormat = "%08x";
3755    final Table table = createTable(tableName, cfBytes,
3756        maxVersions,
3757        Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3758        Bytes.toBytes(String.format(keyFormat, splitEndKey)),
3759        numRegions);
3760
3761    if (hbaseCluster != null) {
3762      getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3763    }
3764
3765    BufferedMutator mutator = getConnection().getBufferedMutator(tableName);
3766
3767    for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3768      for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3769        final byte[] row = Bytes.toBytes(String.format(keyFormat,
3770            actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3771
3772        Put put = new Put(row);
3773        Delete del = new Delete(row);
3774        for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3775          final byte[] cf = cfBytes[rand.nextInt(numCF)];
3776          final long ts = rand.nextInt();
3777          final byte[] qual = Bytes.toBytes("col" + iCol);
3778          if (rand.nextBoolean()) {
3779            final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3780                "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3781                ts + "_random_" + rand.nextLong());
3782            put.addColumn(cf, qual, ts, value);
3783          } else if (rand.nextDouble() < 0.8) {
3784            del.addColumn(cf, qual, ts);
3785          } else {
3786            del.addColumns(cf, qual, ts);
3787          }
3788        }
3789
3790        if (!put.isEmpty()) {
3791          mutator.mutate(put);
3792        }
3793
3794        if (!del.isEmpty()) {
3795          mutator.mutate(del);
3796        }
3797      }
3798      LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3799      mutator.flush();
3800      if (hbaseCluster != null) {
3801        getMiniHBaseCluster().flushcache(table.getName());
3802      }
3803    }
3804    mutator.close();
3805
3806    return table;
3807  }
3808
3809  private static Random random = new Random();
3810
3811  private static final PortAllocator portAllocator = new PortAllocator(random);
3812
3813  public static int randomFreePort() {
3814    return portAllocator.randomFreePort();
3815  }
3816
3817  static class PortAllocator {
3818    private static final int MIN_RANDOM_PORT = 0xc000;
3819    private static final int MAX_RANDOM_PORT = 0xfffe;
3820
3821    /** A set of ports that have been claimed using {@link #randomFreePort()}. */
3822    private final Set<Integer> takenRandomPorts = new HashSet<>();
3823
3824    private final Random random;
3825    private final AvailablePortChecker portChecker;
3826
3827    public PortAllocator(Random random) {
3828      this.random = random;
3829      this.portChecker = new AvailablePortChecker() {
3830        @Override
3831        public boolean available(int port) {
3832          try {
3833            ServerSocket sock = new ServerSocket(port);
3834            sock.close();
3835            return true;
3836          } catch (IOException ex) {
3837            return false;
3838          }
3839        }
3840      };
3841    }
3842
3843    public PortAllocator(Random random, AvailablePortChecker portChecker) {
3844      this.random = random;
3845      this.portChecker = portChecker;
3846    }
3847
3848    /**
3849     * Returns a random free port and marks that port as taken. Not thread-safe. Expected to be
3850     * called from single-threaded test setup code/
3851     */
3852    public int randomFreePort() {
3853      int port = 0;
3854      do {
3855        port = randomPort();
3856        if (takenRandomPorts.contains(port)) {
3857          port = 0;
3858          continue;
3859        }
3860        takenRandomPorts.add(port);
3861
3862        if (!portChecker.available(port)) {
3863          port = 0;
3864        }
3865      } while (port == 0);
3866      return port;
3867    }
3868
3869    /**
3870     * Returns a random port. These ports cannot be registered with IANA and are
3871     * intended for dynamic allocation (see http://bit.ly/dynports).
3872     */
3873    private int randomPort() {
3874      return MIN_RANDOM_PORT
3875          + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3876    }
3877
3878    interface AvailablePortChecker {
3879      boolean available(int port);
3880    }
3881  }
3882
3883  public static String randomMultiCastAddress() {
3884    return "226.1.1." + random.nextInt(254);
3885  }
3886
3887  public static void waitForHostPort(String host, int port)
3888      throws IOException {
3889    final int maxTimeMs = 10000;
3890    final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3891    IOException savedException = null;
3892    LOG.info("Waiting for server at " + host + ":" + port);
3893    for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3894      try {
3895        Socket sock = new Socket(InetAddress.getByName(host), port);
3896        sock.close();
3897        savedException = null;
3898        LOG.info("Server at " + host + ":" + port + " is available");
3899        break;
3900      } catch (UnknownHostException e) {
3901        throw new IOException("Failed to look up " + host, e);
3902      } catch (IOException e) {
3903        savedException = e;
3904      }
3905      Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3906    }
3907
3908    if (savedException != null) {
3909      throw savedException;
3910    }
3911  }
3912
3913  /**
3914   * Creates a pre-split table for load testing. If the table already exists,
3915   * logs a warning and continues.
3916   * @return the number of regions the table was split into
3917   */
3918  public static int createPreSplitLoadTestTable(Configuration conf,
3919      TableName tableName, byte[] columnFamily, Algorithm compression,
3920      DataBlockEncoding dataBlockEncoding) throws IOException {
3921    return createPreSplitLoadTestTable(conf, tableName,
3922      columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1,
3923      Durability.USE_DEFAULT);
3924  }
3925  /**
3926   * Creates a pre-split table for load testing. If the table already exists,
3927   * logs a warning and continues.
3928   * @return the number of regions the table was split into
3929   */
3930  public static int createPreSplitLoadTestTable(Configuration conf,
3931      TableName tableName, byte[] columnFamily, Algorithm compression,
3932      DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
3933      Durability durability)
3934          throws IOException {
3935    HTableDescriptor desc = new HTableDescriptor(tableName);
3936    desc.setDurability(durability);
3937    desc.setRegionReplication(regionReplication);
3938    HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3939    hcd.setDataBlockEncoding(dataBlockEncoding);
3940    hcd.setCompressionType(compression);
3941    return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer);
3942  }
3943
3944  /**
3945   * Creates a pre-split table for load testing. If the table already exists,
3946   * logs a warning and continues.
3947   * @return the number of regions the table was split into
3948   */
3949  public static int createPreSplitLoadTestTable(Configuration conf,
3950      TableName tableName, byte[][] columnFamilies, Algorithm compression,
3951      DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
3952      Durability durability)
3953          throws IOException {
3954    HTableDescriptor desc = new HTableDescriptor(tableName);
3955    desc.setDurability(durability);
3956    desc.setRegionReplication(regionReplication);
3957    HColumnDescriptor[] hcds = new HColumnDescriptor[columnFamilies.length];
3958    for (int i = 0; i < columnFamilies.length; i++) {
3959      HColumnDescriptor hcd = new HColumnDescriptor(columnFamilies[i]);
3960      hcd.setDataBlockEncoding(dataBlockEncoding);
3961      hcd.setCompressionType(compression);
3962      hcds[i] = hcd;
3963    }
3964    return createPreSplitLoadTestTable(conf, desc, hcds, numRegionsPerServer);
3965  }
3966
3967  /**
3968   * Creates a pre-split table for load testing. If the table already exists,
3969   * logs a warning and continues.
3970   * @return the number of regions the table was split into
3971   */
3972  public static int createPreSplitLoadTestTable(Configuration conf,
3973      TableDescriptor desc, ColumnFamilyDescriptor hcd) throws IOException {
3974    return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER);
3975  }
3976
3977  /**
3978   * Creates a pre-split table for load testing. If the table already exists,
3979   * logs a warning and continues.
3980   * @return the number of regions the table was split into
3981   */
3982  public static int createPreSplitLoadTestTable(Configuration conf,
3983      TableDescriptor desc, ColumnFamilyDescriptor hcd, int numRegionsPerServer) throws IOException {
3984    return createPreSplitLoadTestTable(conf, desc, new ColumnFamilyDescriptor[] {hcd},
3985        numRegionsPerServer);
3986  }
3987
3988  /**
3989   * Creates a pre-split table for load testing. If the table already exists,
3990   * logs a warning and continues.
3991   * @return the number of regions the table was split into
3992   */
3993  public static int createPreSplitLoadTestTable(Configuration conf,
3994      TableDescriptor desc, ColumnFamilyDescriptor[] hcds,
3995      int numRegionsPerServer) throws IOException {
3996    return createPreSplitLoadTestTable(conf, desc, hcds,
3997      new RegionSplitter.HexStringSplit(), numRegionsPerServer);
3998  }
3999
4000  /**
4001   * Creates a pre-split table for load testing. If the table already exists,
4002   * logs a warning and continues.
4003   * @return the number of regions the table was split into
4004   */
4005  public static int createPreSplitLoadTestTable(Configuration conf,
4006      TableDescriptor td, ColumnFamilyDescriptor[] cds,
4007      SplitAlgorithm splitter, int numRegionsPerServer) throws IOException {
4008    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(td);
4009    for (ColumnFamilyDescriptor cd : cds) {
4010      if (!td.hasColumnFamily(cd.getName())) {
4011        builder.setColumnFamily(cd);
4012      }
4013    }
4014    td = builder.build();
4015    int totalNumberOfRegions = 0;
4016    Connection unmanagedConnection = ConnectionFactory.createConnection(conf);
4017    Admin admin = unmanagedConnection.getAdmin();
4018
4019    try {
4020      // create a table a pre-splits regions.
4021      // The number of splits is set as:
4022      //    region servers * regions per region server).
4023      int numberOfServers =
4024          admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics()
4025              .size();
4026      if (numberOfServers == 0) {
4027        throw new IllegalStateException("No live regionservers");
4028      }
4029
4030      totalNumberOfRegions = numberOfServers * numRegionsPerServer;
4031      LOG.info("Number of live regionservers: " + numberOfServers + ", " +
4032          "pre-splitting table into " + totalNumberOfRegions + " regions " +
4033          "(regions per server: " + numRegionsPerServer + ")");
4034
4035      byte[][] splits = splitter.split(
4036          totalNumberOfRegions);
4037
4038      admin.createTable(td, splits);
4039    } catch (MasterNotRunningException e) {
4040      LOG.error("Master not running", e);
4041      throw new IOException(e);
4042    } catch (TableExistsException e) {
4043      LOG.warn("Table " + td.getTableName() +
4044          " already exists, continuing");
4045    } finally {
4046      admin.close();
4047      unmanagedConnection.close();
4048    }
4049    return totalNumberOfRegions;
4050  }
4051
4052  public static int getMetaRSPort(Connection connection) throws IOException {
4053    try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) {
4054      return locator.getRegionLocation(Bytes.toBytes("")).getPort();
4055    }
4056  }
4057
4058  /**
4059   *  Due to async racing issue, a region may not be in
4060   *  the online region list of a region server yet, after
4061   *  the assignment znode is deleted and the new assignment
4062   *  is recorded in master.
4063   */
4064  public void assertRegionOnServer(
4065      final RegionInfo hri, final ServerName server,
4066      final long timeout) throws IOException, InterruptedException {
4067    long timeoutTime = System.currentTimeMillis() + timeout;
4068    while (true) {
4069      List<RegionInfo> regions = getAdmin().getRegions(server);
4070      if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) return;
4071      long now = System.currentTimeMillis();
4072      if (now > timeoutTime) break;
4073      Thread.sleep(10);
4074    }
4075    fail("Could not find region " + hri.getRegionNameAsString()
4076      + " on server " + server);
4077  }
4078
4079  /**
4080   * Check to make sure the region is open on the specified
4081   * region server, but not on any other one.
4082   */
4083  public void assertRegionOnlyOnServer(
4084      final RegionInfo hri, final ServerName server,
4085      final long timeout) throws IOException, InterruptedException {
4086    long timeoutTime = System.currentTimeMillis() + timeout;
4087    while (true) {
4088      List<RegionInfo> regions = getAdmin().getRegions(server);
4089      if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) {
4090        List<JVMClusterUtil.RegionServerThread> rsThreads =
4091          getHBaseCluster().getLiveRegionServerThreads();
4092        for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
4093          HRegionServer rs = rsThread.getRegionServer();
4094          if (server.equals(rs.getServerName())) {
4095            continue;
4096          }
4097          Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
4098          for (HRegion r: hrs) {
4099            assertTrue("Region should not be double assigned",
4100              r.getRegionInfo().getRegionId() != hri.getRegionId());
4101          }
4102        }
4103        return; // good, we are happy
4104      }
4105      long now = System.currentTimeMillis();
4106      if (now > timeoutTime) break;
4107      Thread.sleep(10);
4108    }
4109    fail("Could not find region " + hri.getRegionNameAsString()
4110      + " on server " + server);
4111  }
4112
4113  public HRegion createTestRegion(String tableName, ColumnFamilyDescriptor cd) throws IOException {
4114    TableDescriptor td =
4115        TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build();
4116    RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build();
4117    return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), td);
4118  }
4119
4120  public HRegion createTestRegion(String tableName, ColumnFamilyDescriptor cd,
4121      BlockCache blockCache) throws IOException {
4122    TableDescriptor td =
4123        TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build();
4124    RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build();
4125    return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), td, blockCache);
4126  }
4127
4128  public void setFileSystemURI(String fsURI) {
4129    FS_URI = fsURI;
4130  }
4131
4132  /**
4133   * Returns a {@link Predicate} for checking that there are no regions in transition in master
4134   */
4135  public ExplainingPredicate<IOException> predicateNoRegionsInTransition() {
4136    return new ExplainingPredicate<IOException>() {
4137      @Override
4138      public String explainFailure() throws IOException {
4139        final RegionStates regionStates = getMiniHBaseCluster().getMaster()
4140            .getAssignmentManager().getRegionStates();
4141        return "found in transition: " + regionStates.getRegionsInTransition().toString();
4142      }
4143
4144      @Override
4145      public boolean evaluate() throws IOException {
4146        HMaster master = getMiniHBaseCluster().getMaster();
4147        if (master == null) return false;
4148        AssignmentManager am = master.getAssignmentManager();
4149        if (am == null) return false;
4150        return !am.hasRegionsInTransition();
4151      }
4152    };
4153  }
4154
4155  /**
4156   * Returns a {@link Predicate} for checking that table is enabled
4157   */
4158  public Waiter.Predicate<IOException> predicateTableEnabled(final TableName tableName) {
4159    return new ExplainingPredicate<IOException>() {
4160      @Override
4161      public String explainFailure() throws IOException {
4162        return explainTableState(tableName, TableState.State.ENABLED);
4163      }
4164
4165      @Override
4166      public boolean evaluate() throws IOException {
4167        return getAdmin().tableExists(tableName) && getAdmin().isTableEnabled(tableName);
4168      }
4169    };
4170  }
4171
4172  /**
4173   * Returns a {@link Predicate} for checking that table is enabled
4174   */
4175  public Waiter.Predicate<IOException> predicateTableDisabled(final TableName tableName) {
4176    return new ExplainingPredicate<IOException>() {
4177      @Override
4178      public String explainFailure() throws IOException {
4179        return explainTableState(tableName, TableState.State.DISABLED);
4180      }
4181
4182      @Override
4183      public boolean evaluate() throws IOException {
4184        return getAdmin().isTableDisabled(tableName);
4185      }
4186    };
4187  }
4188
4189  /**
4190   * Returns a {@link Predicate} for checking that table is enabled
4191   */
4192  public Waiter.Predicate<IOException> predicateTableAvailable(final TableName tableName) {
4193    return new ExplainingPredicate<IOException>() {
4194      @Override
4195      public String explainFailure() throws IOException {
4196        return explainTableAvailability(tableName);
4197      }
4198
4199      @Override
4200      public boolean evaluate() throws IOException {
4201        boolean tableAvailable = getAdmin().isTableAvailable(tableName);
4202        if (tableAvailable) {
4203          try (Table table = getConnection().getTable(tableName)) {
4204            TableDescriptor htd = table.getDescriptor();
4205            for (HRegionLocation loc : getConnection().getRegionLocator(tableName)
4206                .getAllRegionLocations()) {
4207              Scan scan = new Scan().withStartRow(loc.getRegionInfo().getStartKey())
4208                  .withStopRow(loc.getRegionInfo().getEndKey()).setOneRowLimit()
4209                  .setMaxResultsPerColumnFamily(1).setCacheBlocks(false);
4210              for (byte[] family : htd.getColumnFamilyNames()) {
4211                scan.addFamily(family);
4212              }
4213              try (ResultScanner scanner = table.getScanner(scan)) {
4214                scanner.next();
4215              }
4216            }
4217          }
4218        }
4219        return tableAvailable;
4220      }
4221    };
4222  }
4223
4224  /**
4225   * Wait until no regions in transition.
4226   * @param timeout How long to wait.
4227   * @throws IOException
4228   */
4229  public void waitUntilNoRegionsInTransition(final long timeout) throws IOException {
4230    waitFor(timeout, predicateNoRegionsInTransition());
4231  }
4232
4233  /**
4234   * Wait until no regions in transition. (time limit 15min)
4235   * @throws IOException
4236   */
4237  public void waitUntilNoRegionsInTransition() throws IOException {
4238    waitUntilNoRegionsInTransition(15 * 60000);
4239  }
4240
4241  /**
4242   * Wait until labels is ready in VisibilityLabelsCache.
4243   * @param timeoutMillis
4244   * @param labels
4245   */
4246  public void waitLabelAvailable(long timeoutMillis, final String... labels) {
4247    final VisibilityLabelsCache labelsCache = VisibilityLabelsCache.get();
4248    waitFor(timeoutMillis, new Waiter.ExplainingPredicate<RuntimeException>() {
4249
4250      @Override
4251      public boolean evaluate() {
4252        for (String label : labels) {
4253          if (labelsCache.getLabelOrdinal(label) == 0) {
4254            return false;
4255          }
4256        }
4257        return true;
4258      }
4259
4260      @Override
4261      public String explainFailure() {
4262        for (String label : labels) {
4263          if (labelsCache.getLabelOrdinal(label) == 0) {
4264            return label + " is not available yet";
4265          }
4266        }
4267        return "";
4268      }
4269    });
4270  }
4271
4272  /**
4273   * Create a set of column descriptors with the combination of compression,
4274   * encoding, bloom codecs available.
4275   * @return the list of column descriptors
4276   */
4277  public static List<HColumnDescriptor> generateColumnDescriptors() {
4278    return generateColumnDescriptors("");
4279  }
4280
4281  /**
4282   * Create a set of column descriptors with the combination of compression,
4283   * encoding, bloom codecs available.
4284   * @param prefix family names prefix
4285   * @return the list of column descriptors
4286   */
4287  public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
4288    List<HColumnDescriptor> htds = new ArrayList<>();
4289    long familyId = 0;
4290    for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
4291      for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
4292        for (BloomType bloomType: BloomType.values()) {
4293          String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
4294          HColumnDescriptor htd = new HColumnDescriptor(name);
4295          htd.setCompressionType(compressionType);
4296          htd.setDataBlockEncoding(encodingType);
4297          htd.setBloomFilterType(bloomType);
4298          htds.add(htd);
4299          familyId++;
4300        }
4301      }
4302    }
4303    return htds;
4304  }
4305
4306  /**
4307   * Get supported compression algorithms.
4308   * @return supported compression algorithms.
4309   */
4310  public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
4311    String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
4312    List<Compression.Algorithm> supportedAlgos = new ArrayList<>();
4313    for (String algoName : allAlgos) {
4314      try {
4315        Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
4316        algo.getCompressor();
4317        supportedAlgos.add(algo);
4318      } catch (Throwable t) {
4319        // this algo is not available
4320      }
4321    }
4322    return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
4323  }
4324
4325  public Result getClosestRowBefore(Region r, byte[] row, byte[] family) throws IOException {
4326    Scan scan = new Scan(row);
4327    scan.setSmall(true);
4328    scan.setCaching(1);
4329    scan.setReversed(true);
4330    scan.addFamily(family);
4331    try (RegionScanner scanner = r.getScanner(scan)) {
4332      List<Cell> cells = new ArrayList<>(1);
4333      scanner.next(cells);
4334      if (r.getRegionInfo().isMetaRegion() && !isTargetTable(row, cells.get(0))) {
4335        return null;
4336      }
4337      return Result.create(cells);
4338    }
4339  }
4340
4341  private boolean isTargetTable(final byte[] inRow, Cell c) {
4342    String inputRowString = Bytes.toString(inRow);
4343    int i = inputRowString.indexOf(HConstants.DELIMITER);
4344    String outputRowString = Bytes.toString(c.getRowArray(), c.getRowOffset(), c.getRowLength());
4345    int o = outputRowString.indexOf(HConstants.DELIMITER);
4346    return inputRowString.substring(0, i).equals(outputRowString.substring(0, o));
4347  }
4348
4349  /**
4350   * Sets up {@link MiniKdc} for testing security.
4351   * Uses {@link HBaseKerberosUtils} to set the given keytab file as
4352   * {@link HBaseKerberosUtils#KRB_KEYTAB_FILE}.
4353   */
4354  public MiniKdc setupMiniKdc(File keytabFile) throws Exception {
4355    Properties conf = MiniKdc.createConf();
4356    conf.put(MiniKdc.DEBUG, true);
4357    MiniKdc kdc = null;
4358    File dir = null;
4359    // There is time lag between selecting a port and trying to bind with it. It's possible that
4360    // another service captures the port in between which'll result in BindException.
4361    boolean bindException;
4362    int numTries = 0;
4363    do {
4364      try {
4365        bindException = false;
4366        dir = new File(getDataTestDir("kdc").toUri().getPath());
4367        kdc = new MiniKdc(conf, dir);
4368        kdc.start();
4369      } catch (BindException e) {
4370        FileUtils.deleteDirectory(dir);  // clean directory
4371        numTries++;
4372        if (numTries == 3) {
4373          LOG.error("Failed setting up MiniKDC. Tried " + numTries + " times.");
4374          throw e;
4375        }
4376        LOG.error("BindException encountered when setting up MiniKdc. Trying again.");
4377        bindException = true;
4378      }
4379    } while (bindException);
4380    HBaseKerberosUtils.setKeytabFileForTesting(keytabFile.getAbsolutePath());
4381    return kdc;
4382  }
4383
4384  public int getNumHFiles(final TableName tableName, final byte[] family) {
4385    int numHFiles = 0;
4386    for (RegionServerThread regionServerThread : getMiniHBaseCluster().getRegionServerThreads()) {
4387      numHFiles+= getNumHFilesForRS(regionServerThread.getRegionServer(), tableName,
4388                                    family);
4389    }
4390    return numHFiles;
4391  }
4392
4393  public int getNumHFilesForRS(final HRegionServer rs, final TableName tableName,
4394                               final byte[] family) {
4395    int numHFiles = 0;
4396    for (Region region : rs.getRegions(tableName)) {
4397      numHFiles += region.getStore(family).getStorefilesCount();
4398    }
4399    return numHFiles;
4400  }
4401
4402  public void verifyTableDescriptorIgnoreTableName(TableDescriptor ltd, TableDescriptor rtd) {
4403    assertEquals(ltd.getValues().hashCode(), rtd.getValues().hashCode());
4404    Collection<ColumnFamilyDescriptor> ltdFamilies = Arrays.asList(ltd.getColumnFamilies());
4405    Collection<ColumnFamilyDescriptor> rtdFamilies = Arrays.asList(rtd.getColumnFamilies());
4406    assertEquals(ltdFamilies.size(), rtdFamilies.size());
4407    for (Iterator<ColumnFamilyDescriptor> it = ltdFamilies.iterator(), it2 =
4408         rtdFamilies.iterator(); it.hasNext();) {
4409      assertEquals(0,
4410          ColumnFamilyDescriptor.COMPARATOR.compare(it.next(), it2.next()));
4411    }
4412  }
4413}