Java Code Examples for org.apache.accumulo.core.client.ZooKeeperInstance#getConnector()

The following examples show how to use org.apache.accumulo.core.client.ZooKeeperInstance#getConnector() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: IngestMetricsSummaryLoader.java    From datawave with Apache License 2.0 6 votes vote down vote up
@Override
protected void setup(Context context) throws IOException, InterruptedException {
    super.setup(context);
    
    Configuration conf = context.getConfiguration();
    String user = conf.get(MetricsConfig.USER);
    String password = conf.get(MetricsConfig.PASS);
    String instance = conf.get(MetricsConfig.INSTANCE);
    String zookeepers = conf.get(MetricsConfig.ZOOKEEPERS);
    
    useHourlyPrecision = HourlyPrecisionHelper.checkForHourlyPrecisionOption(context.getConfiguration(), log);
    
    try {
        ZooKeeperInstance inst = new ZooKeeperInstance(ClientConfiguration.loadDefault().withInstance(instance).withZkHosts(zookeepers));
        Connector con = inst.getConnector(user, new PasswordToken(password));
        ingestScanner = con.createScanner(conf.get(MetricsConfig.INGEST_TABLE, MetricsConfig.DEFAULT_INGEST_TABLE), Authorizations.EMPTY);
    } catch (TableNotFoundException | AccumuloException | AccumuloSecurityException e) {
        throw new IOException(e);
    }
}
 
Example 2
Source File: AccumuloGeoTableTest.java    From mrgeo with Apache License 2.0 6 votes vote down vote up
@Test
@Category(UnitTest.class)
public void testGetGeoTables() throws Exception
{
  ZooKeeperInstance zkinst = new ZooKeeperInstance(inst, zoo);
  PasswordToken pwTok = new PasswordToken(pw.getBytes());
  Connector conn = zkinst.getConnector(u, pwTok);
  Assert.assertNotNull(conn);

  PasswordToken token = new PasswordToken(pw.getBytes());

  //Authorizations auths = new Authorizations(authsStr.split(","));
  Authorizations auths = new Authorizations("A,B,C,D,ROLE_USER,U".split(","));
  System.out.println(auths.toString());
  Hashtable<String, String> ht = AccumuloUtils.getGeoTables(null, token, auths, conn);
  for (String k : ht.keySet())
  {
    System.out.println(k + " => " + ht.get(k));
  }


}
 
Example 3
Source File: MultiRFileOutputFormatter.java    From datawave with Apache License 2.0 5 votes vote down vote up
protected void setTableIdsAndConfigs() throws IOException {
    ZooKeeperInstance instance = new ZooKeeperInstance(ClientConfiguration.loadDefault().withInstance(conf.get(INSTANCE_NAME))
                    .withZkHosts(conf.get(ZOOKEEPERS)));
    Connector connector = null;
    tableConfigs = new HashMap<>();
    Iterable<String> localityGroupTables = Splitter.on(",").split(conf.get(CONFIGURE_LOCALITY_GROUPS, ""));
    try {
        connector = instance.getConnector(conf.get(USERNAME), new PasswordToken(Base64.decodeBase64(conf.get(PASSWORD))));
        
        tableIds = connector.tableOperations().tableIdMap();
        Set<String> compressionTableBlackList = getCompressionTableBlackList(conf);
        String compressionType = getCompressionType(conf);
        for (String tableName : tableIds.keySet()) {
            ConfigurationCopy tableConfig = new ConfigurationCopy(connector.tableOperations().getProperties(tableName));
            tableConfig.set(Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), (compressionTableBlackList.contains(tableName) ? Compression.COMPRESSION_NONE
                            : compressionType));
            if (Iterables.contains(localityGroupTables, tableName)) {
                Map<String,Set<Text>> localityGroups = connector.tableOperations().getLocalityGroups(tableName);
                // pull the locality groups for this table.
                Map<Text,String> cftlg = Maps.newHashMap();
                Map<String,Set<ByteSequence>> lgtcf = Maps.newHashMap();
                for (Entry<String,Set<Text>> locs : localityGroups.entrySet()) {
                    lgtcf.put(locs.getKey(), new HashSet<>());
                    for (Text loc : locs.getValue()) {
                        cftlg.put(loc, locs.getKey());
                        lgtcf.get(locs.getKey()).add(new ArrayByteSequence(loc.getBytes()));
                    }
                }
                columnFamilyToLocalityGroup.put(tableName, cftlg);
                localityGroupToColumnFamilies.put(tableName, lgtcf);
            }
            tableConfigs.put(tableName, tableConfig);
            
        }
    } catch (AccumuloException | AccumuloSecurityException | TableNotFoundException e) {
        throw new IOException("Unable to get configuration.  Please call MultiRFileOutput.setAccumuloConfiguration with the proper credentials", e);
    }
}
 
Example 4
Source File: CardinalityScanner.java    From datawave with Apache License 2.0 5 votes vote down vote up
public Set<CardinalityIntersectionRecord> scanCardinalities(List<String> fields, DateAggregationType dateAggregationType,
                DatatypeAggregationType datatypeAggregationType) throws Exception {
    
    Map<CardinalityIntersectionRecord,HyperLogLogPlus> cardinalityMap = new TreeMap<>();
    Scanner scanner = null;
    try {
        ZooKeeperInstance instance = new ZooKeeperInstance(config.getInstanceName(), config.getZookeepers());
        Connector connector = instance.getConnector(config.getUsername(), new PasswordToken(config.getPassword()));
        Collection<Authorizations> authCollection = Collections.singleton(new Authorizations(config.getAuths().split(",")));
        if (!connector.tableOperations().exists(config.getTableName())) {
            throw new IllegalArgumentException("Table " + config.getTableName() + " does not exist");
        }
        scanner = ScannerHelper.createScanner(connector, config.getTableName(), authCollection);
        Range r = new Range(config.getBeginDate(), config.getEndDate() + "\0");
        scanner.setRange(r);
        
        Iterator<Map.Entry<Key,Value>> itr = scanner.iterator();
        while (itr.hasNext()) {
            Map.Entry<Key,Value> nextEntry = itr.next();
            Key key = nextEntry.getKey();
            String field = key.getColumnFamily().toString();
            if (fields != null && !fields.isEmpty() && !fields.contains(field)) {
                continue;
            } else {
                addEntry(cardinalityMap, nextEntry, dateAggregationType, datatypeAggregationType);
            }
        }
    } catch (Exception e) {
        log.error(e);
    } finally {
        if (scanner != null) {
            scanner.close();
            
        }
    }
    return cardinalityMap.keySet();
}
 
Example 5
Source File: AccumuloCounterSource.java    From datawave with Apache License 2.0 5 votes vote down vote up
public AccumuloCounterSource(String instanceStr, String zookeepers, String username, String password, String table) throws AccumuloException,
                AccumuloSecurityException {
    ZooKeeperInstance instance = new ZooKeeperInstance(instanceStr, zookeepers);
    connector = instance.getConnector(username, new PasswordToken(password));
    queryTable = table;
    this.username = username;
}
 
Example 6
Source File: AccumuloGraphConfiguration.java    From vertexium with Apache License 2.0 5 votes vote down vote up
public Connector createConnector() {
    try {
        LOGGER.info("Connecting to accumulo instance [%s] zookeeper servers [%s]", this.getAccumuloInstanceName(), this.getZookeeperServers());
        ZooKeeperInstance instance = new ZooKeeperInstance(getClientConfiguration());
        return instance.getConnector(this.getAccumuloUsername(), this.getAuthenticationToken());
    } catch (Exception ex) {
        throw new VertexiumException(
            String.format("Could not connect to Accumulo instance [%s] zookeeper servers [%s]", this.getAccumuloInstanceName(), this.getZookeeperServers()),
            ex
        );
    }
}
 
Example 7
Source File: AccumuloInstanceDriver.java    From rya with Apache License 2.0 5 votes vote down vote up
/**
 * Sets up the {@link MiniAccumuloCluster} or the {@link MockInstance}.
 * @throws Exception
 */
public void setUpInstance() throws Exception {
    if (!isMock) {
        log.info("Setting up " + driverName + " MiniAccumulo cluster...");
        // Create and Run MiniAccumulo Cluster
        tempDir = Files.createTempDir();
        tempDir.deleteOnExit();
        miniAccumuloCluster = new MiniAccumuloCluster(tempDir, userpwd);
        copyHadoopHomeToTemp();
        miniAccumuloCluster.getConfig().setInstanceName(instanceName);
        log.info(driverName + " MiniAccumulo instance starting up...");
        miniAccumuloCluster.start();
        Thread.sleep(1000);
        log.info(driverName + " MiniAccumulo instance started");
        log.info("Creating connector to " + driverName + " MiniAccumulo instance...");
        zooKeeperInstance = new ZooKeeperInstance(miniAccumuloCluster.getClientConfig());
        instance = zooKeeperInstance;
        connector = zooKeeperInstance.getConnector(user, new PasswordToken(userpwd));
        log.info("Created connector to " + driverName + " MiniAccumulo instance");
    } else {
        log.info("Setting up " + driverName + " mock instance...");
        mockInstance = new MockInstance(instanceName);
        instance = mockInstance;
        connector = mockInstance.getConnector(user, new PasswordToken(userpwd));
        log.info("Created connector to " + driverName + " mock instance");
    }
    zooKeepers = instance.getZooKeepers();
}
 
Example 8
Source File: AccumuloGeoTableTest.java    From mrgeo with Apache License 2.0 5 votes vote down vote up
@Test
@Category(UnitTest.class)
public void testGetTile() throws Exception
{

  ZooKeeperInstance zkinst = new ZooKeeperInstance(inst, zoo);
  PasswordToken pwTok = new PasswordToken(pw.getBytes());
  Connector conn = zkinst.getConnector(u, pwTok);
  Assert.assertNotNull(conn);

  PasswordToken token = new PasswordToken(pw.getBytes());

  Authorizations auths = new Authorizations(authsStr.split(","));
  long start = 0;
  long end = Long.MAX_VALUE;
  Key sKey = AccumuloUtils.toKey(start);
  Key eKey = AccumuloUtils.toKey(end);
  Range r = new Range(sKey, eKey);
  Scanner s = conn.createScanner("paris4", auths);
  s.fetchColumnFamily(new Text(Integer.toString(10)));
  s.setRange(r);

  Iterator<Entry<Key, Value>> it = s.iterator();
  while (it.hasNext())
  {
    Entry<Key, Value> ent = it.next();
    if (ent == null)
    {
      return;
    }
    System.out.println("current key   = " + AccumuloUtils.toLong(ent.getKey().getRow()));
    System.out.println("current value = " + ent.getValue().getSize());
  }

}
 
Example 9
Source File: UpgradeCounterValues.java    From datawave with Apache License 2.0 4 votes vote down vote up
protected void run(String[] args) throws ParseException, AccumuloSecurityException, AccumuloException, TableNotFoundException, IOException {
    parseConfig(args);
    
    ZooKeeperInstance instance = new ZooKeeperInstance(ClientConfiguration.loadDefault().withInstance(instanceName).withZkHosts(zookeepers));
    Connector connector = instance.getConnector(username, new PasswordToken(password));
    Authorizations auths = connector.securityOperations().getUserAuthorizations(connector.whoami());
    
    try (BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig().setMaxWriteThreads(bwThreads).setMaxMemory(bwMemory)
                    .setMaxLatency(60, TimeUnit.SECONDS));
                    BatchScanner scanner = connector.createBatchScanner(tableName, auths, bsThreads)) {
        scanner.setRanges(ranges);
        
        for (Entry<Key,Value> entry : scanner) {
            Key key = entry.getKey();
            
            ByteArrayDataInput in = ByteStreams.newDataInput(entry.getValue().get());
            Counters counters = new Counters();
            try {
                counters.readFields(in);
            } catch (IOException e) {
                // The IO exception means the counters are in the wrong format. We *assume* that they are in
                // the old (CDH3) format, and de-serialize according to that, and re-write the key with the new value.
                in = ByteStreams.newDataInput(entry.getValue().get());
                int numGroups = in.readInt();
                while (numGroups-- > 0) {
                    String groupName = Text.readString(in);
                    String groupDisplayName = Text.readString(in);
                    CounterGroup group = counters.addGroup(groupName, groupDisplayName);
                    
                    int groupSize = WritableUtils.readVInt(in);
                    for (int i = 0; i < groupSize; i++) {
                        String counterName = Text.readString(in);
                        String counterDisplayName = counterName;
                        if (in.readBoolean())
                            counterDisplayName = Text.readString(in);
                        long value = WritableUtils.readVLong(in);
                        group.addCounter(counterName, counterDisplayName, value);
                    }
                }
                
                ByteArrayDataOutput out = ByteStreams.newDataOutput();
                counters.write(out);
                Mutation m = new Mutation(key.getRow());
                m.put(key.getColumnFamily(), key.getColumnQualifier(), key.getColumnVisibilityParsed(), key.getTimestamp() + 1,
                                new Value(out.toByteArray()));
                writer.addMutation(m);
            }
        }
        
    }
}
 
Example 10
Source File: MetadataHelperUpdateHdfsListener.java    From datawave with Apache License 2.0 4 votes vote down vote up
private void maybeUpdateTypeMetadataInHdfs(final SharedCacheCoordinator watcher, String triStateName, String metadataTableName) throws Exception {
    
    boolean locked = false;
    InterProcessMutex lock = (InterProcessMutex) watcher.getMutex("lock");
    try {
        locked = lock.acquire(this.lockWaitTime, TimeUnit.MILLISECONDS);
        if (!locked)
            log.debug("table:" + metadataTableName + " Unable to acquire lock to update " + metadataTableName
                            + ". Another webserver is updating the typeMetadata.");
        else
            log.debug("table:" + metadataTableName + " Obtained lock on updateTypeMetadata for " + metadataTableName);
    } catch (Exception e) {
        log.warn("table:" + metadataTableName + " Got Exception trying to acquire lock to update " + metadataTableName + ".", e);
    }
    
    try {
        if (locked) {
            try {
                log.debug("table:" + metadataTableName + " checkTriState(" + triStateName + ", " + SharedTriState.STATE.NEEDS_UPDATE);
                if (watcher.checkTriState(triStateName, SharedTriState.STATE.NEEDS_UPDATE)) {
                    if (log.isDebugEnabled()) {
                        log.debug("table:" + metadataTableName + " " + this + " STATE is NEEDS_UPDATE. Will write the TypeMetadata map to hdfs");
                    }
                    watcher.setTriState(triStateName, SharedTriState.STATE.UPDATING);
                    if (log.isDebugEnabled()) {
                        log.debug("table:" + metadataTableName + " " + this + " setTriState to UPDATING");
                    }
                    // get a connection for my MetadataHelper, and get the TypeMetadata map
                    ZooKeeperInstance instance = new ZooKeeperInstance(ClientConfiguration.loadDefault().withInstance(this.instance)
                                    .withZkHosts(this.zookeepers));
                    Connector connector = instance.getConnector(this.username, new PasswordToken(this.password));
                    TypeMetadataHelper typeMetadataHelper = this.typeMetadataHelperFactory.createTypeMetadataHelper(connector, metadataTableName,
                                    allMetadataAuths, false);
                    typeMetadataWriter.writeTypeMetadataMap(typeMetadataHelper.getTypeMetadataMap(this.allMetadataAuths), metadataTableName);
                    if (log.isDebugEnabled()) {
                        log.debug("table:" + metadataTableName + " " + this + " set the sharedTriState needsUpdate to UPDATED for " + metadataTableName);
                    }
                    watcher.setTriState(triStateName, SharedTriState.STATE.UPDATED);
                } else {
                    if (log.isDebugEnabled()) {
                        log.debug("table:"
                                        + metadataTableName
                                        + " "
                                        + this
                                        + "  STATE is not NEEDS_UPDATE! Someone else may be writing or has already written the TypeMetadata map, just release the lock");
                    }
                }
            } catch (Exception ex) {
                log.warn("table:" + metadataTableName + " Unable to write TypeMetadataMap for " + metadataTableName, ex);
                watcher.setTriState(triStateName, SharedTriState.STATE.NEEDS_UPDATE);
                if (log.isDebugEnabled()) {
                    log.debug("After exception, set the SharedTriState STATE to NEEDS_UPDATE");
                }
                
            }
        }
    } finally {
        if (locked) {
            lock.release();
            if (log.isTraceEnabled())
                log.trace("table:" + metadataTableName + " " + this + " released the lock for " + metadataTableName);
            
        }
    }
}
 
Example 11
Source File: AccumuloInstanceDriver.java    From rya with Apache License 2.0 4 votes vote down vote up
/**
 * Sets up the {@link MiniAccumuloCluster} or the {@link MockInstance} or
 * distribution instance
 * @throws Exception
 */
public void setUpInstance() throws Exception {
    switch (instanceType) {
        case DISTRIBUTION:
            log.info("Setting up " + driverName + " distribution instance...");
            if (instanceName == null) {
                throw new IllegalArgumentException("Must specify instance name for distributed mode");
            } else if (zooKeepers == null) {
                throw new IllegalArgumentException("Must specify ZooKeeper hosts for distributed mode");
            }
            instance = new ZooKeeperInstance(instanceName, zooKeepers);
            connector = instance.getConnector(user, new PasswordToken(userpwd));
            log.info("Created connector to " + driverName + " distribution instance");
            break;
        case MINI:
            log.info("Setting up " + driverName + " MiniAccumulo cluster...");
            // Create and Run MiniAccumulo Cluster
            tempDir = Files.createTempDir();
            tempDir.deleteOnExit();
            miniAccumuloCluster = new MiniAccumuloCluster(tempDir, userpwd);
            copyHadoopHomeToTemp();
            miniAccumuloCluster.getConfig().setInstanceName(instanceName);
            log.info(driverName + " MiniAccumulo instance starting up...");
            miniAccumuloCluster.start();
            Thread.sleep(1000);
            log.info(driverName + " MiniAccumulo instance started");
            log.info("Creating connector to " + driverName + " MiniAccumulo instance...");
            zooKeeperInstance = new ZooKeeperInstance(miniAccumuloCluster.getClientConfig());
            instance = zooKeeperInstance;
            connector = zooKeeperInstance.getConnector(user, new PasswordToken(userpwd));
            log.info("Created connector to " + driverName + " MiniAccumulo instance");
            break;
        case MOCK:
            log.info("Setting up " + driverName + " mock instance...");
            mockInstance = new MockInstance(instanceName);
            instance = mockInstance;
            connector = mockInstance.getConnector(user, new PasswordToken(userpwd));
            log.info("Created connector to " + driverName + " mock instance");
            break;
        default:
            throw new AccumuloException("Unexpected instance type: " + instanceType);
    }
    zooKeepers = instance.getZooKeepers();
}