1 /**
2 *
3 * Licensed to the Apache Software Foundation (ASF) under one
4 * or more contributor license agreements. See the NOTICE file
5 * distributed with this work for additional information
6 * regarding copyright ownership. The ASF licenses this file
7 * to you under the Apache License, Version 2.0 (the
8 * "License"); you may not use this file except in compliance
9 * with the License. You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19 package org.apache.hadoop.hbase.client;
20
21 import com.google.protobuf.Descriptors;
22 import com.google.protobuf.Message;
23 import com.google.protobuf.Service;
24 import com.google.protobuf.ServiceException;
25
26 import org.apache.hadoop.hbase.classification.InterfaceAudience;
27 import org.apache.hadoop.hbase.classification.InterfaceStability;
28 import org.apache.hadoop.conf.Configuration;
29 import org.apache.hadoop.hbase.TableName;
30 import org.apache.hadoop.hbase.HTableDescriptor;
31 import org.apache.hadoop.hbase.KeyValue;
32 import org.apache.hadoop.hbase.client.coprocessor.Batch;
33 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
34 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
35
36 import java.io.Closeable;
37 import java.io.IOException;
38 import java.util.List;
39 import java.util.Map;
40
41 /**
42 * Used to communicate with a single HBase table.
43 * Obtain an instance from an {@link HConnection}.
44 *
45 * @since 0.21.0
46 */
47 @InterfaceAudience.Public
48 @InterfaceStability.Stable
49 public interface HTableInterface extends Closeable {
50
51 /**
52 * Gets the name of this table.
53 *
54 * @return the table name.
55 */
56 byte[] getTableName();
57
58 /**
59 * Gets the fully qualified table name instance of this table.
60 */
61 TableName getName();
62
63 /**
64 * Returns the {@link Configuration} object used by this instance.
65 * <p>
66 * The reference returned is not a copy, so any change made to it will
67 * affect this instance.
68 */
69 Configuration getConfiguration();
70
71 /**
72 * Gets the {@link HTableDescriptor table descriptor} for this table.
73 * @throws IOException if a remote or network exception occurs.
74 */
75 HTableDescriptor getTableDescriptor() throws IOException;
76
77 /**
78 * Test for the existence of columns in the table, as specified by the Get.
79 * <p>
80 *
81 * This will return true if the Get matches one or more keys, false if not.
82 * <p>
83 *
84 * This is a server-side call so it prevents any data from being transfered to
85 * the client.
86 *
87 * @param get the Get
88 * @return true if the specified Get matches one or more keys, false if not
89 * @throws IOException e
90 */
91 boolean exists(Get get) throws IOException;
92
93 /**
94 * Test for the existence of columns in the table, as specified by the Gets.
95 * <p>
96 *
97 * This will return an array of booleans. Each value will be true if the related Get matches
98 * one or more keys, false if not.
99 * <p>
100 *
101 * This is a server-side call so it prevents any data from being transfered to
102 * the client.
103 *
104 * @param gets the Gets
105 * @return Array of Boolean true if the specified Get matches one or more keys, false if not
106 * @throws IOException e
107 */
108 Boolean[] exists(List<Get> gets) throws IOException;
109
110 /**
111 * Method that does a batch call on Deletes, Gets, Puts, Increments, Appends and RowMutations.
112 * The ordering of execution of the actions is not defined. Meaning if you do a Put and a
113 * Get in the same {@link #batch} call, you will not necessarily be
114 * guaranteed that the Get returns what the Put had put.
115 *
116 * @param actions list of Get, Put, Delete, Increment, Append, RowMutations objects
117 * @param results Empty Object[], same size as actions. Provides access to partial
118 * results, in case an exception is thrown. A null in the result array means that
119 * the call for that action failed, even after retries
120 * @throws IOException
121 * @since 0.90.0
122 */
123 void batch(final List<?extends Row> actions, final Object[] results) throws IOException, InterruptedException;
124
125 /**
126 * Same as {@link #batch(List, Object[])}, but returns an array of
127 * results instead of using a results parameter reference.
128 *
129 * @param actions list of Get, Put, Delete, Increment, Append, RowMutations objects
130 * @return the results from the actions. A null in the return array means that
131 * the call for that action failed, even after retries
132 * @throws IOException
133 * @since 0.90.0
134 * @deprecated If any exception is thrown by one of the actions, there is no way to
135 * retrieve the partially executed results. Use {@link #batch(List, Object[])} instead.
136 */
137 Object[] batch(final List<? extends Row> actions) throws IOException, InterruptedException;
138
139 /**
140 * Same as {@link #batch(List, Object[])}, but with a callback.
141 * @since 0.96.0
142 */
143 <R> void batchCallback(
144 final List<? extends Row> actions, final Object[] results, final Batch.Callback<R> callback
145 )
146 throws IOException, InterruptedException;
147
148
149 /**
150 * Same as {@link #batch(List)}, but with a callback.
151 * @since 0.96.0
152 * @deprecated If any exception is thrown by one of the actions, there is no way to
153 * retrieve the partially executed results. Use
154 * {@link #batchCallback(List, Object[], org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)}
155 * instead.
156 */
157 <R> Object[] batchCallback(
158 List<? extends Row> actions, Batch.Callback<R> callback
159 ) throws IOException,
160 InterruptedException;
161
162 /**
163 * Extracts certain cells from a given row.
164 * @param get The object that specifies what data to fetch and from which row.
165 * @return The data coming from the specified row, if it exists. If the row
166 * specified doesn't exist, the {@link Result} instance returned won't
167 * contain any {@link KeyValue}, as indicated by {@link Result#isEmpty()}.
168 * @throws IOException if a remote or network exception occurs.
169 * @since 0.20.0
170 */
171 Result get(Get get) throws IOException;
172
173 /**
174 * Extracts certain cells from the given rows, in batch.
175 *
176 * @param gets The objects that specify what data to fetch and from which rows.
177 *
178 * @return The data coming from the specified rows, if it exists. If the row
179 * specified doesn't exist, the {@link Result} instance returned won't
180 * contain any {@link KeyValue}, as indicated by {@link Result#isEmpty()}.
181 * If there are any failures even after retries, there will be a null in
182 * the results array for those Gets, AND an exception will be thrown.
183 * @throws IOException if a remote or network exception occurs.
184 *
185 * @since 0.90.0
186 */
187 Result[] get(List<Get> gets) throws IOException;
188
189 /**
190 * Return the row that matches <i>row</i> exactly,
191 * or the one that immediately precedes it.
192 *
193 * @param row A row key.
194 * @param family Column family to include in the {@link Result}.
195 * @throws IOException if a remote or network exception occurs.
196 * @since 0.20.0
197 *
198 * @deprecated As of version 0.92 this method is deprecated without
199 * replacement.
200 * getRowOrBefore is used internally to find entries in hbase:meta and makes
201 * various assumptions about the table (which are true for hbase:meta but not
202 * in general) to be efficient.
203 */
204 Result getRowOrBefore(byte[] row, byte[] family) throws IOException;
205
206 /**
207 * Returns a scanner on the current table as specified by the {@link Scan}
208 * object.
209 * Note that the passed {@link Scan}'s start row and caching properties
210 * maybe changed.
211 *
212 * @param scan A configured {@link Scan} object.
213 * @return A scanner.
214 * @throws IOException if a remote or network exception occurs.
215 * @since 0.20.0
216 */
217 ResultScanner getScanner(Scan scan) throws IOException;
218
219 /**
220 * Gets a scanner on the current table for the given family.
221 *
222 * @param family The column family to scan.
223 * @return A scanner.
224 * @throws IOException if a remote or network exception occurs.
225 * @since 0.20.0
226 */
227 ResultScanner getScanner(byte[] family) throws IOException;
228
229 /**
230 * Gets a scanner on the current table for the given family and qualifier.
231 *
232 * @param family The column family to scan.
233 * @param qualifier The column qualifier to scan.
234 * @return A scanner.
235 * @throws IOException if a remote or network exception occurs.
236 * @since 0.20.0
237 */
238 ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException;
239
240
241 /**
242 * Puts some data in the table.
243 * <p>
244 * If {@link #isAutoFlush isAutoFlush} is false, the update is buffered
245 * until the internal buffer is full.
246 * @param put The data to put.
247 * @throws IOException if a remote or network exception occurs.
248 * @since 0.20.0
249 */
250 void put(Put put) throws IOException;
251
252 /**
253 * Puts some data in the table, in batch.
254 * <p>
255 * If {@link #isAutoFlush isAutoFlush} is false, the update is buffered
256 * until the internal buffer is full.
257 * <p>
258 * This can be used for group commit, or for submitting user defined
259 * batches. The writeBuffer will be periodically inspected while the List
260 * is processed, so depending on the List size the writeBuffer may flush
261 * not at all, or more than once.
262 * @param puts The list of mutations to apply. The batch put is done by
263 * aggregating the iteration of the Puts over the write buffer
264 * at the client-side for a single RPC call.
265 * @throws IOException if a remote or network exception occurs.
266 * @since 0.20.0
267 */
268 void put(List<Put> puts) throws IOException;
269
270 /**
271 * Atomically checks if a row/family/qualifier value matches the expected
272 * value. If it does, it adds the put. If the passed value is null, the check
273 * is for the lack of column (ie: non-existance)
274 *
275 * @param row to check
276 * @param family column family to check
277 * @param qualifier column qualifier to check
278 * @param value the expected value
279 * @param put data to put if check succeeds
280 * @throws IOException e
281 * @return true if the new put was executed, false otherwise
282 */
283 boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
284 byte[] value, Put put) throws IOException;
285
286 /**
287 * Deletes the specified cells/row.
288 *
289 * @param delete The object that specifies what to delete.
290 * @throws IOException if a remote or network exception occurs.
291 * @since 0.20.0
292 */
293 void delete(Delete delete) throws IOException;
294
295 /**
296 * Deletes the specified cells/rows in bulk.
297 * @param deletes List of things to delete. List gets modified by this
298 * method (in particular it gets re-ordered, so the order in which the elements
299 * are inserted in the list gives no guarantee as to the order in which the
300 * {@link Delete}s are executed).
301 * @throws IOException if a remote or network exception occurs. In that case
302 * the {@code deletes} argument will contain the {@link Delete} instances
303 * that have not be successfully applied.
304 * @since 0.20.1
305 */
306 void delete(List<Delete> deletes) throws IOException;
307
308 /**
309 * Atomically checks if a row/family/qualifier value matches the expected
310 * value. If it does, it adds the delete. If the passed value is null, the
311 * check is for the lack of column (ie: non-existance)
312 *
313 * @param row to check
314 * @param family column family to check
315 * @param qualifier column qualifier to check
316 * @param value the expected value
317 * @param delete data to delete if check succeeds
318 * @throws IOException e
319 * @return true if the new delete was executed, false otherwise
320 */
321 boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
322 byte[] value, Delete delete) throws IOException;
323
324 /**
325 * Performs multiple mutations atomically on a single row. Currently
326 * {@link Put} and {@link Delete} are supported.
327 *
328 * @param rm object that specifies the set of mutations to perform atomically
329 * @throws IOException
330 */
331 void mutateRow(final RowMutations rm) throws IOException;
332
333 /**
334 * Appends values to one or more columns within a single row.
335 * <p>
336 * This operation does not appear atomic to readers. Appends are done
337 * under a single row lock, so write operations to a row are synchronized, but
338 * readers do not take row locks so get and scan operations can see this
339 * operation partially completed.
340 *
341 * @param append object that specifies the columns and amounts to be used
342 * for the increment operations
343 * @throws IOException e
344 * @return values of columns after the append operation (maybe null)
345 */
346 Result append(final Append append) throws IOException;
347
348 /**
349 * Increments one or more columns within a single row.
350 * <p>
351 * This operation does not appear atomic to readers. Increments are done
352 * under a single row lock, so write operations to a row are synchronized, but
353 * readers do not take row locks so get and scan operations can see this
354 * operation partially completed.
355 *
356 * @param increment object that specifies the columns and amounts to be used
357 * for the increment operations
358 * @throws IOException e
359 * @return values of columns after the increment
360 */
361 Result increment(final Increment increment) throws IOException;
362
363 /**
364 * See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}
365 * <p>
366 * The {@link Durability} is defaulted to {@link Durability#SYNC_WAL}.
367 * @param row The row that contains the cell to increment.
368 * @param family The column family of the cell to increment.
369 * @param qualifier The column qualifier of the cell to increment.
370 * @param amount The amount to increment the cell with (or decrement, if the
371 * amount is negative).
372 * @return The new value, post increment.
373 * @throws IOException if a remote or network exception occurs.
374 */
375 long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
376 long amount) throws IOException;
377
378 /**
379 * Atomically increments a column value. If the column value already exists
380 * and is not a big-endian long, this could throw an exception. If the column
381 * value does not yet exist it is initialized to <code>amount</code> and
382 * written to the specified column.
383 *
384 * <p>Setting durability to {@link Durability#SKIP_WAL} means that in a fail
385 * scenario you will lose any increments that have not been flushed.
386 * @param row The row that contains the cell to increment.
387 * @param family The column family of the cell to increment.
388 * @param qualifier The column qualifier of the cell to increment.
389 * @param amount The amount to increment the cell with (or decrement, if the
390 * amount is negative).
391 * @param durability The persistence guarantee for this increment.
392 * @return The new value, post increment.
393 * @throws IOException if a remote or network exception occurs.
394 */
395 long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
396 long amount, Durability durability) throws IOException;
397
398 /**
399 * @deprecated Use {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}
400 */
401 @Deprecated
402 long incrementColumnValue(final byte [] row, final byte [] family,
403 final byte [] qualifier, final long amount, final boolean writeToWAL)
404 throws IOException;
405
406 /**
407 * Tells whether or not 'auto-flush' is turned on.
408 *
409 * @return {@code true} if 'auto-flush' is enabled (default), meaning
410 * {@link Put} operations don't get buffered/delayed and are immediately
411 * executed.
412 */
413 boolean isAutoFlush();
414
415 /**
416 * Executes all the buffered {@link Put} operations.
417 * <p>
418 * This method gets called once automatically for every {@link Put} or batch
419 * of {@link Put}s (when <code>put(List<Put>)</code> is used) when
420 * {@link #isAutoFlush} is {@code true}.
421 * @throws IOException if a remote or network exception occurs.
422 */
423 void flushCommits() throws IOException;
424
425 /**
426 * Releases any resources held or pending changes in internal buffers.
427 *
428 * @throws IOException if a remote or network exception occurs.
429 */
430 void close() throws IOException;
431
432 /**
433 * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the
434 * table region containing the specified row. The row given does not actually have
435 * to exist. Whichever region would contain the row based on start and end keys will
436 * be used. Note that the {@code row} parameter is also not passed to the
437 * coprocessor handler registered for this protocol, unless the {@code row}
438 * is separately passed as an argument in the service request. The parameter
439 * here is only used to locate the region used to handle the call.
440 *
441 * <p>
442 * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
443 * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
444 * </p>
445 *
446 * <div style="background-color: #cccccc; padding: 2px">
447 * <blockquote><pre>
448 * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
449 * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
450 * MyCallRequest request = MyCallRequest.newBuilder()
451 * ...
452 * .build();
453 * MyCallResponse response = service.myCall(null, request);
454 * </pre></blockquote></div>
455 *
456 * @param row The row key used to identify the remote region location
457 * @return A CoprocessorRpcChannel instance
458 */
459 CoprocessorRpcChannel coprocessorService(byte[] row);
460
461 /**
462 * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
463 * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive),
464 * and invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call}
465 * method with each {@link Service}
466 * instance.
467 *
468 * @param service the protocol buffer {@code Service} implementation to call
469 * @param startKey start region selection with region containing this row. If {@code null}, the
470 * selection will start with the first table region.
471 * @param endKey select regions up to and including the region containing this row.
472 * If {@code null}, selection will continue through the last table region.
473 * @param callable this instance's
474 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call}
475 * method will be invoked once per table region, using the {@link Service}
476 * instance connected to that region.
477 * @param <T> the {@link Service} subclass to connect to
478 * @param <R> Return type for the {@code callable} parameter's
479 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
480 * @return a map of result values keyed by region name
481 */
482 <T extends Service, R> Map<byte[],R> coprocessorService(final Class<T> service,
483 byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable)
484 throws ServiceException, Throwable;
485
486 /**
487 * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
488 * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive),
489 * and invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call}
490 * method with each {@link Service} instance.
491 *
492 * <p>
493 * The given
494 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)}
495 * method will be called with the return value from each region's
496 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation.
497 *</p>
498 *
499 * @param service the protocol buffer {@code Service} implementation to call
500 * @param startKey start region selection with region containing this row. If {@code null}, the
501 * selection will start with the first table region.
502 * @param endKey select regions up to and including the region containing this row.
503 * If {@code null}, selection will continue through the last table region.
504 * @param callable this instance's
505 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
506 * will be invoked once per table region, using the {@link Service} instance
507 * connected to that region.
508 * @param callback
509 * @param <T> the {@link Service} subclass to connect to
510 * @param <R> Return type for the {@code callable} parameter's
511 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
512 */
513 <T extends Service, R> void coprocessorService(final Class<T> service,
514 byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable,
515 final Batch.Callback<R> callback) throws ServiceException, Throwable;
516
517 /**
518 * See {@link #setAutoFlush(boolean, boolean)}
519 *
520 * @param autoFlush
521 * Whether or not to enable 'auto-flush'.
522 * @deprecated in 0.96. When called with setAutoFlush(false), this function also
523 * set clearBufferOnFail to true, which is unexpected but kept for historical reasons.
524 * Replace it with setAutoFlush(false, false) if this is exactly what you want, or by
525 * {@link #setAutoFlushTo(boolean)} for all other cases.
526 */
527 @Deprecated
528 void setAutoFlush(boolean autoFlush);
529
530 /**
531 * Turns 'auto-flush' on or off.
532 * <p>
533 * When enabled (default), {@link Put} operations don't get buffered/delayed
534 * and are immediately executed. Failed operations are not retried. This is
535 * slower but safer.
536 * <p>
537 * Turning off {@code #autoFlush} means that multiple {@link Put}s will be
538 * accepted before any RPC is actually sent to do the write operations. If the
539 * application dies before pending writes get flushed to HBase, data will be
540 * lost.
541 * <p>
542 * When you turn {@code #autoFlush} off, you should also consider the
543 * {@code #clearBufferOnFail} option. By default, asynchronous {@link Put}
544 * requests will be retried on failure until successful. However, this can
545 * pollute the writeBuffer and slow down batching performance. Additionally,
546 * you may want to issue a number of Put requests and call
547 * {@link #flushCommits()} as a barrier. In both use cases, consider setting
548 * clearBufferOnFail to true to erase the buffer after {@link #flushCommits()}
549 * has been called, regardless of success.
550 * <p>
551 * In other words, if you call {@code #setAutoFlush(false)}; HBase will retry N time for each
552 * flushCommit, including the last one when closing the table. This is NOT recommended,
553 * most of the time you want to call {@code #setAutoFlush(false, true)}.
554 *
555 * @param autoFlush
556 * Whether or not to enable 'auto-flush'.
557 * @param clearBufferOnFail
558 * Whether to keep Put failures in the writeBuffer. If autoFlush is true, then
559 * the value of this parameter is ignored and clearBufferOnFail is set to true.
560 * Setting clearBufferOnFail to false is deprecated since 0.96.
561 * @see #flushCommits
562 */
563 void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail);
564
565 /**
566 * Set the autoFlush behavior, without changing the value of {@code clearBufferOnFail}
567 */
568 void setAutoFlushTo(boolean autoFlush);
569
570 /**
571 * Returns the maximum size in bytes of the write buffer for this HTable.
572 * <p>
573 * The default value comes from the configuration parameter
574 * {@code hbase.client.write.buffer}.
575 * @return The size of the write buffer in bytes.
576 */
577 long getWriteBufferSize();
578
579 /**
580 * Sets the size of the buffer in bytes.
581 * <p>
582 * If the new size is less than the current amount of data in the
583 * write buffer, the buffer gets flushed.
584 * @param writeBufferSize The new write buffer size, in bytes.
585 * @throws IOException if a remote or network exception occurs.
586 */
587 void setWriteBufferSize(long writeBufferSize) throws IOException;
588
589 /**
590 * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
591 * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
592 * the invocations to the same region server will be batched into one call. The coprocessor
593 * service is invoked according to the service instance, method name and parameters.
594 *
595 * @param methodDescriptor
596 * the descriptor for the protobuf service method to call.
597 * @param request
598 * the method call parameters
599 * @param startKey
600 * start region selection with region containing this row. If {@code null}, the
601 * selection will start with the first table region.
602 * @param endKey
603 * select regions up to and including the region containing this row. If {@code null},
604 * selection will continue through the last table region.
605 * @param responsePrototype
606 * the proto type of the response of the method in Service.
607 * @param <R>
608 * the response type for the coprocessor Service method
609 * @throws ServiceException
610 * @throws Throwable
611 * @return a map of result values keyed by region name
612 */
613 <R extends Message> Map<byte[], R> batchCoprocessorService(
614 Descriptors.MethodDescriptor methodDescriptor, Message request,
615 byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable;
616
617 /**
618 * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
619 * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
620 * the invocations to the same region server will be batched into one call. The coprocessor
621 * service is invoked according to the service instance, method name and parameters.
622 *
623 * <p>
624 * The given
625 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
626 * method will be called with the return value from each region's invocation.
627 * </p>
628 *
629 * @param methodDescriptor
630 * the descriptor for the protobuf service method to call.
631 * @param request
632 * the method call parameters
633 * @param startKey
634 * start region selection with region containing this row. If {@code null}, the
635 * selection will start with the first table region.
636 * @param endKey
637 * select regions up to and including the region containing this row. If {@code null},
638 * selection will continue through the last table region.
639 * @param responsePrototype
640 * the proto type of the response of the method in Service.
641 * @param callback
642 * callback to invoke with the response for each region
643 * @param <R>
644 * the response type for the coprocessor Service method
645 * @throws ServiceException
646 * @throws Throwable
647 */
648 <R extends Message> void batchCoprocessorService(Descriptors.MethodDescriptor methodDescriptor,
649 Message request, byte[] startKey, byte[] endKey, R responsePrototype,
650 Batch.Callback<R> callback) throws ServiceException, Throwable;
651
652 /**
653 * Atomically checks if a row/family/qualifier value matches the expected val
654 * If it does, it performs the row mutations. If the passed value is null, t
655 * is for the lack of column (ie: non-existence)
656 *
657 * @param row to check
658 * @param family column family to check
659 * @param qualifier column qualifier to check
660 * @param compareOp the comparison operator
661 * @param value the expected value
662 * @param mutation mutations to perform if check succeeds
663 * @throws IOException e
664 * @return true if the new put was executed, false otherwise
665 */
666 boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
667 byte[] value, RowMutations mutation) throws IOException;
668 }