Java Code Examples for org.apache.accumulo.core.client.Connector#tableOperations()

The following examples show how to use org.apache.accumulo.core.client.Connector#tableOperations() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ProspectorService.java    From rya with Apache License 2.0 6 votes vote down vote up
/**
 * Constructs an instance of {@link ProspectorService}.
 *
 * @param connector - The Accumulo connector used to communicate with the table. (not null)
 * @param tableName - The name of the Accumulo table that will be queried for Prospect results. (not null)
 * @throws AccumuloException A problem occurred while creating the table.
 * @throws AccumuloSecurityException A problem occurred while creating the table.
 */
public ProspectorService(Connector connector, String tableName) throws AccumuloException, AccumuloSecurityException {
    this.connector = requireNonNull(connector);
    this.tableName = requireNonNull(tableName);

    this.plans = ProspectorUtils.planMap(manager.getPlans());

    // Create the table if it doesn't already exist.
    try {
        final TableOperations tos = connector.tableOperations();
        if(!tos.exists(tableName)) {
            tos.create(tableName);
        }
    } catch(TableExistsException e) {
        // Do nothing. Something else must have made it while we were.
    }
}
 
Example 2
Source File: MixedGeoAndGeoWaveTest.java    From datawave with Apache License 2.0 6 votes vote down vote up
private static void writeKeyValues(Connector connector, Multimap<BulkIngestKey,Value> keyValues) throws Exception {
    final TableOperations tops = connector.tableOperations();
    final Set<BulkIngestKey> biKeys = keyValues.keySet();
    for (final BulkIngestKey biKey : biKeys) {
        final String tableName = biKey.getTableName().toString();
        if (!tops.exists(tableName))
            tops.create(tableName);
        
        final BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig());
        for (final Value val : keyValues.get(biKey)) {
            final Mutation mutation = new Mutation(biKey.getKey().getRow());
            mutation.put(biKey.getKey().getColumnFamily(), biKey.getKey().getColumnQualifier(), biKey.getKey().getColumnVisibilityParsed(), biKey.getKey()
                            .getTimestamp(), val);
            writer.addMutation(mutation);
        }
        writer.close();
    }
}
 
Example 3
Source File: MultiValueCompositeIndexTest.java    From datawave with Apache License 2.0 6 votes vote down vote up
private static void writeKeyValues(Connector connector, Multimap<BulkIngestKey,Value> keyValues) throws Exception {
    final TableOperations tops = connector.tableOperations();
    final Set<BulkIngestKey> biKeys = keyValues.keySet();
    for (final BulkIngestKey biKey : biKeys) {
        final String tableName = biKey.getTableName().toString();
        if (!tops.exists(tableName))
            tops.create(tableName);
        
        final BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig());
        for (final Value val : keyValues.get(biKey)) {
            final Mutation mutation = new Mutation(biKey.getKey().getRow());
            mutation.put(biKey.getKey().getColumnFamily(), biKey.getKey().getColumnQualifier(), biKey.getKey().getColumnVisibilityParsed(), biKey.getKey()
                            .getTimestamp(), val);
            writer.addMutation(mutation);
        }
        writer.close();
    }
}
 
Example 4
Source File: CompositeIndexTest.java    From datawave with Apache License 2.0 6 votes vote down vote up
private static void writeKeyValues(Connector connector, Multimap<BulkIngestKey,Value> keyValues) throws Exception {
    final TableOperations tops = connector.tableOperations();
    final Set<BulkIngestKey> biKeys = keyValues.keySet();
    for (final BulkIngestKey biKey : biKeys) {
        final String tableName = biKey.getTableName().toString();
        if (!tops.exists(tableName))
            tops.create(tableName);
        
        final BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig());
        for (final Value val : keyValues.get(biKey)) {
            final Mutation mutation = new Mutation(biKey.getKey().getRow());
            mutation.put(biKey.getKey().getColumnFamily(), biKey.getKey().getColumnQualifier(), biKey.getKey().getColumnVisibilityParsed(), biKey.getKey()
                            .getTimestamp(), val);
            writer.addMutation(mutation);
        }
        writer.close();
    }
}
 
Example 5
Source File: GeoSortedQueryDataTest.java    From datawave with Apache License 2.0 6 votes vote down vote up
private static void writeKeyValues(Connector connector, Multimap<BulkIngestKey,Value> keyValues) throws Exception {
    final TableOperations tops = connector.tableOperations();
    final Set<BulkIngestKey> biKeys = keyValues.keySet();
    for (final BulkIngestKey biKey : biKeys) {
        final String tableName = biKey.getTableName().toString();
        if (!tops.exists(tableName))
            tops.create(tableName);
        
        final BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig());
        for (final Value val : keyValues.get(biKey)) {
            final Mutation mutation = new Mutation(biKey.getKey().getRow());
            mutation.put(biKey.getKey().getColumnFamily(), biKey.getKey().getColumnQualifier(), biKey.getKey().getColumnVisibilityParsed(), val);
            writer.addMutation(mutation);
        }
        writer.close();
    }
}
 
Example 6
Source File: ExceededOrThresholdMarkerJexlNodeTest.java    From datawave with Apache License 2.0 6 votes vote down vote up
private static void writeKeyValues(Connector connector, Multimap<BulkIngestKey,Value> keyValues) throws Exception {
    final TableOperations tops = connector.tableOperations();
    final Set<BulkIngestKey> biKeys = keyValues.keySet();
    for (final BulkIngestKey biKey : biKeys) {
        final String tableName = biKey.getTableName().toString();
        if (!tops.exists(tableName))
            tops.create(tableName);
        
        final BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig());
        for (final Value val : keyValues.get(biKey)) {
            final Mutation mutation = new Mutation(biKey.getKey().getRow());
            mutation.put(biKey.getKey().getColumnFamily(), biKey.getKey().getColumnQualifier(), biKey.getKey().getColumnVisibilityParsed(), biKey.getKey()
                            .getTimestamp(), val);
            writer.addMutation(mutation);
        }
        writer.close();
    }
}
 
Example 7
Source File: ContentFunctionQueryTest.java    From datawave with Apache License 2.0 6 votes vote down vote up
private static void writeKeyValues(Connector connector, Multimap<BulkIngestKey,Value> keyValues) throws Exception {
    final TableOperations tops = connector.tableOperations();
    final Set<BulkIngestKey> biKeys = keyValues.keySet();
    tops.create(TableName.DATE_INDEX);
    for (final BulkIngestKey biKey : biKeys) {
        final String tableName = biKey.getTableName().toString();
        if (!tops.exists(tableName))
            tops.create(tableName);
        
        final BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig());
        for (final Value val : keyValues.get(biKey)) {
            final Mutation mutation = new Mutation(biKey.getKey().getRow());
            mutation.put(biKey.getKey().getColumnFamily(), biKey.getKey().getColumnQualifier(), biKey.getKey().getColumnVisibilityParsed(), biKey.getKey()
                            .getTimestamp(), val);
            writer.addMutation(mutation);
        }
        writer.close();
    }
}
 
Example 8
Source File: AccumuloInstanceExistsIT.java    From rya with Apache License 2.0 6 votes vote down vote up
@Test
public void exists_dataTables() throws AccumuloException, AccumuloSecurityException, RyaClientException, TableExistsException {
    final Connector connector = getConnector();
    final TableOperations tableOps = connector.tableOperations();

    // Create the Rya instance's Rya details table.
    final String instanceName = "test_instance_";

    final String spoTableName = instanceName + RdfCloudTripleStoreConstants.TBL_SPO_SUFFIX;
    final String ospTableName = instanceName + RdfCloudTripleStoreConstants.TBL_OSP_SUFFIX;
    final String poTableName = instanceName + RdfCloudTripleStoreConstants.TBL_PO_SUFFIX;
    tableOps.create(spoTableName);
    tableOps.create(ospTableName);
    tableOps.create(poTableName);

    // Verify the command reports the instance exists.
    final AccumuloConnectionDetails connectionDetails = new AccumuloConnectionDetails(
            getUsername(),
            getPassword().toCharArray(),
            getInstanceName(),
            getZookeepers());

    final AccumuloInstanceExists instanceExists = new AccumuloInstanceExists(connectionDetails, getConnector());
    assertTrue( instanceExists.exists(instanceName) );
}
 
Example 9
Source File: AccumuloInstanceExistsIT.java    From rya with Apache License 2.0 6 votes vote down vote up
@Test
public void exists_ryaDetailsTable() throws AccumuloException, AccumuloSecurityException, RyaClientException, TableExistsException {
    final Connector connector = getConnector();
    final TableOperations tableOps = connector.tableOperations();

    // Create the Rya instance's Rya details table.
    final String instanceName = "test_instance_";
    final String ryaDetailsTable = instanceName + AccumuloRyaInstanceDetailsRepository.INSTANCE_DETAILS_TABLE_NAME;
    tableOps.create(ryaDetailsTable);

    // Verify the command reports the instance exists.
    final AccumuloConnectionDetails connectionDetails = new AccumuloConnectionDetails(
            getUsername(),
            getPassword().toCharArray(),
            getInstanceName(),
            getZookeepers());

    final AccumuloInstanceExists instanceExists = new AccumuloInstanceExists(connectionDetails, getConnector());
    assertTrue( instanceExists.exists(instanceName) );
}
 
Example 10
Source File: NumShards.java    From datawave with Apache License 2.0 5 votes vote down vote up
private void ensureTableExists(Connector connector, String metadataTableName) throws AccumuloException, AccumuloSecurityException {
    TableOperations tops = connector.tableOperations();
    if (!tops.exists(metadataTableName)) {
        log.info("Creating table: " + metadataTableName);
        try {
            tops.create(metadataTableName);
        } catch (TableExistsException tee) {
            log.error(metadataTableName + " already exists someone got here first");
        }
    }
}
 
Example 11
Source File: PcjIntegrationTestingUtil.java    From rya with Apache License 2.0 5 votes vote down vote up
public static void deleteIndexTables(final Connector accCon, final int tableNum,
        final String prefix) throws AccumuloException, AccumuloSecurityException,
TableNotFoundException {
    final TableOperations ops = accCon.tableOperations();
    final String tablename = prefix + "INDEX_";
    for (int i = 1; i < tableNum + 1; i++) {
        if (ops.exists(tablename + i)) {
            ops.delete(tablename + i);
        }
    }
}
 
Example 12
Source File: PcjIntegrationTestingUtil.java    From rya with Apache License 2.0 5 votes vote down vote up
public static void deleteCoreRyaTables(final Connector accCon, final String prefix)
        throws AccumuloException, AccumuloSecurityException,
        TableNotFoundException {
    final TableOperations ops = accCon.tableOperations();
    if (ops.exists(prefix + "spo")) {
        ops.delete(prefix + "spo");
    }
    if (ops.exists(prefix + "po")) {
        ops.delete(prefix + "po");
    }
    if (ops.exists(prefix + "osp")) {
        ops.delete(prefix + "osp");
    }
}
 
Example 13
Source File: MergeTool.java    From rya with Apache License 2.0 5 votes vote down vote up
/**
 * Imports the child files that hold the table data into the parent instance as a temporary table.
 * @param childTableName the name of the child table to import into a temporary parent table.
 * @throws Exception
 */
public void importChildFilesToTempParentTable(final String childTableName) throws Exception {
    // Create a temporary table in the parent instance to import the child files to.  Then run the merge process on the parent table and temp child table.
    final String tempChildTable = childTableName + TEMP_SUFFIX;

    createTempTableIfNeeded(tempChildTable);

    final AccumuloRdfConfiguration parentAccumuloRdfConfiguration = new AccumuloRdfConfiguration(conf);
    parentAccumuloRdfConfiguration.setTablePrefix(childTablePrefix);
    final Connector parentConnector = AccumuloRyaUtils.setupConnector(parentAccumuloRdfConfiguration);
    final TableOperations parentTableOperations = parentConnector.tableOperations();

    final Path localWorkDir = CopyTool.getPath(localMergeFileImportDir, childTableName);
    final Path hdfsBaseWorkDir = CopyTool.getPath(baseImportDir, childTableName);

    CopyTool.copyLocalToHdfs(localWorkDir, hdfsBaseWorkDir, conf);

    final Path files = CopyTool.getPath(hdfsBaseWorkDir.toString(), "files");
    final Path failures = CopyTool.getPath(hdfsBaseWorkDir.toString(), "failures");
    final FileSystem fs = FileSystem.get(conf);
    // With HDFS permissions on, we need to make sure the Accumulo user can read/move the files
    fs.setPermission(hdfsBaseWorkDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
    if (fs.exists(failures)) {
        fs.delete(failures, true);
    }
    fs.mkdirs(failures);

    parentTableOperations.importDirectory(tempChildTable, files.toString(), failures.toString(), false);

    AccumuloRyaUtils.printTablePretty(tempChildTable, conf);
}
 
Example 14
Source File: AccumuloHDFSFileInputFormat.java    From rya with Apache License 2.0 5 votes vote down vote up
@Override
public List<InputSplit> getSplits(JobContext jobContext) throws IOException {
    //read the params from AccumuloInputFormat
    Configuration conf = jobContext.getConfiguration();
    Instance instance = MRUtils.AccumuloProps.getInstance(jobContext);
    String user = MRUtils.AccumuloProps.getUsername(jobContext);
    AuthenticationToken password = MRUtils.AccumuloProps.getPassword(jobContext);
    String table = MRUtils.AccumuloProps.getTablename(jobContext);
    ArgumentChecker.notNull(instance);
    ArgumentChecker.notNull(table);

    //find the files necessary
    try {
        Connector connector = instance.getConnector(user, password);
        TableOperations tos = connector.tableOperations();
        String tableId = tos.tableIdMap().get(table);
        Scanner scanner = connector.createScanner("accumulo.metadata", Authorizations.EMPTY); //TODO: auths?
        scanner.setRange(new Range(new Text(tableId + "\u0000"), new Text(tableId + "\uFFFD")));
        scanner.fetchColumnFamily(new Text("file"));
        List<String> files = new ArrayList<String>();
        List<InputSplit> fileSplits = new ArrayList<InputSplit>();
        for (Map.Entry<Key, Value> entry : scanner) {
            String file = entry.getKey().getColumnQualifier().toString();
            Path path = new Path(file);
            FileSystem fs = path.getFileSystem(conf);
            FileStatus fileStatus = fs.getFileStatus(path);
            long len = fileStatus.getLen();
            BlockLocation[] fileBlockLocations = fs.getFileBlockLocations(fileStatus, 0, len);
            files.add(file);
            fileSplits.add(new FileSplit(path, 0, len, fileBlockLocations[0].getHosts()));
        }
        System.out.println(files);
        return fileSplits;
    } catch (Exception e) {
        throw new IOException(e);
    }
}
 
Example 15
Source File: EdgeKeyVersioningCache.java    From datawave with Apache License 2.0 5 votes vote down vote up
private void ensureTableExists(Connector connector) throws AccumuloSecurityException, AccumuloException {
    TableOperations tops = connector.tableOperations();
    if (!tops.exists(metadataTableName)) {
        log.info("Creating table: " + metadataTableName);
        try {
            tops.create(metadataTableName);
        } catch (TableExistsException e) {
            log.error(metadataTableName + " already exists someone got here first.");
        }
    }
}
 
Example 16
Source File: RyaTableNames.java    From rya with Apache License 2.0 4 votes vote down vote up
/**
     * Get the the Accumulo table names that are used by an instance of Rya.
     *
     * @param ryaInstanceName - The name of the Rya instance. (not null)
     * @param conn - A connector to the host Accumulo instance. (not null)
     * @return The Accumulo table names that are used by the Rya instance.
     * @throws NotInitializedException The instance's Rya Details have not been initialized.
     * @throws RyaDetailsRepositoryException General problem with the Rya Details repository.
     * @throws PCJStorageException General problem with the PCJ storage.
     */
    public List<String> getTableNames(final String ryaInstanceName, final Connector conn) throws NotInitializedException, RyaDetailsRepositoryException, PCJStorageException {
        // Build the list of tables that may be present within the Rya instance.
        final List<String> tables = new ArrayList<>();

        // Core Rya tables.
        final TableLayoutStrategy coreTableNames = new TablePrefixLayoutStrategy(ryaInstanceName);
        tables.add( coreTableNames.getSpo() );
        tables.add( coreTableNames.getPo() );
        tables.add( coreTableNames.getOsp() );
        tables.add( coreTableNames.getEval() );
        tables.add( coreTableNames.getNs() );
        tables.add( coreTableNames.getProspects() );
        tables.add( coreTableNames.getSelectivity() );

        // Rya Details table.
        tables.add( AccumuloRyaInstanceDetailsRepository.makeTableName(ryaInstanceName) );

        // Secondary Indexer Tables.
        final RyaDetailsRepository detailsRepo = new AccumuloRyaInstanceDetailsRepository(conn, ryaInstanceName);
        final RyaDetails details = detailsRepo.getRyaInstanceDetails();

        if(details.getEntityCentricIndexDetails().isEnabled()) {
            tables.add( EntityCentricIndex.makeTableName(ryaInstanceName) );
        }

        if(details.getFreeTextIndexDetails().isEnabled()) {
            tables.addAll( AccumuloFreeTextIndexer.makeTableNames(ryaInstanceName) );
        }

        if(details.getTemporalIndexDetails().isEnabled()) {
            tables.add( AccumuloTemporalIndexer.makeTableName(ryaInstanceName) );
        }

/**
 *         if(details.getGeoIndexDetails().isEnabled()) {
 *             tables.add( GeoMesaGeoIndexer.makeTableName(ryaInstanceName) );
 *         }
 */

        if(details.getPCJIndexDetails().isEnabled()) {
            try(final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(conn, ryaInstanceName)) {
                final List<String> pcjIds = pcjStorage.listPcjs();

                final PcjTableNameFactory tableNameFactory = new PcjTableNameFactory();
                for(final String pcjId : pcjIds) {
                    tables.add( tableNameFactory.makeTableName(ryaInstanceName, pcjId) );
                }
            }
        }

        // Verify they actually exist. If any don't, remove them from the list.
        final TableOperations tableOps = conn.tableOperations();

        final Iterator<String> tablesIt = tables.iterator();
        while(tablesIt.hasNext()) {
            final String table = tablesIt.next();
            if(!tableOps.exists(table)) {
                tablesIt.remove();
            }
        }

        return tables;
    }
 
Example 17
Source File: CopyTool.java    From rya with Apache License 2.0 4 votes vote down vote up
/**
 * Imports the files that hold the table data into the child instance.
 * @param childTableName the name of the child table to import.
 * @throws Exception
 */
public void importFilesToChildTable(final String childTableName) throws Exception {
    final String normalizedChildTableName = FilenameUtils.normalize(childTableName);
    if (normalizedChildTableName == null) {
        throw new Exception("Invalid child table name: " + childTableName);
    }

    final Configuration childConfig = MergeToolMapper.getChildConfig(conf);
    final AccumuloRdfConfiguration childAccumuloRdfConfiguration = new AccumuloRdfConfiguration(childConfig);
    childAccumuloRdfConfiguration.setTablePrefix(childTablePrefix);
    final Connector childConnector = AccumuloRyaUtils.setupConnector(childAccumuloRdfConfiguration);
    final TableOperations childTableOperations = childConnector.tableOperations();

    final Path localWorkDir = getPath(localCopyFileImportDir, normalizedChildTableName);
    final Path hdfsBaseWorkDir = getPath(baseImportDir, normalizedChildTableName);

    final FileSystem fs = FileSystem.get(conf);
    if (fs.exists(hdfsBaseWorkDir)) {
        fs.delete(hdfsBaseWorkDir, true);
    }

    log.info("Importing from the local directory: " + localWorkDir);
    log.info("Importing to the HDFS directory: " + hdfsBaseWorkDir);
    copyLocalToHdfs(localWorkDir, hdfsBaseWorkDir);

    final Path files = getPath(hdfsBaseWorkDir.toString(), "files");
    final Path failures = getPath(hdfsBaseWorkDir.toString(), "failures");

    // With HDFS permissions on, we need to make sure the Accumulo user can read/move the files
    final FsShell hdfs = new FsShell(conf);
    if (!fs.isDirectory(hdfsBaseWorkDir)) {
        throw new IllegalArgumentException("Configured working directory is not a valid directory" + hdfsBaseWorkDir.toString());
    }
    hdfs.run(new String[] {"-chmod", "777", hdfsBaseWorkDir.toString()});
    if (fs.exists(failures)) {
        fs.delete(failures, true);
    }
    fs.mkdirs(failures);

    childTableOperations.importDirectory(normalizedChildTableName, files.toString(), failures.toString(), false);
}
 
Example 18
Source File: CopyTool.java    From rya with Apache License 2.0 4 votes vote down vote up
private int runCopy() throws Exception {
    log.info("Setting up Copy Tool...");

    setup();

    if (!useCopyFileOutput) {
        createChildInstance(conf);
    }

    final AccumuloRdfConfiguration parentAccumuloRdfConfiguration = new AccumuloRdfConfiguration(conf);
    parentAccumuloRdfConfiguration.setTablePrefix(tablePrefix);
    final Connector parentConnector = AccumuloRyaUtils.setupConnector(parentAccumuloRdfConfiguration);
    final TableOperations parentTableOperations = parentConnector.tableOperations();

    for (final String table : tables) {
        // Check if the parent table exists before creating a job on it
        if (parentTableOperations.exists(table)) {
            final String childTable = table.replaceFirst(tablePrefix, childTablePrefix);
            final String jobName = "Copy Tool, copying Parent Table: " + table + ", into Child Table: " + childTable + ", " + System.currentTimeMillis();
            log.info("Initializing job: " + jobName);
            conf.set(MRUtils.JOB_NAME_PROP, jobName);
            conf.set(MergeTool.TABLE_NAME_PROP, table);

            final Job job = Job.getInstance(conf);
            job.setJarByClass(CopyTool.class);

            setupAccumuloInput(job);

            InputFormatBase.setInputTableName(job, table);

            // Set input output of the particular job
            if (useCopyFileOutput) {
                job.setMapOutputKeyClass(Key.class);
                job.setMapOutputValueClass(Value.class);
                job.setOutputKeyClass(Key.class);
                job.setOutputValueClass(Value.class);
            } else {
                job.setMapOutputKeyClass(Text.class);
                job.setMapOutputValueClass(Mutation.class);
                job.setOutputKeyClass(Text.class);
                job.setOutputValueClass(Mutation.class);
            }

            setupAccumuloOutput(job, childTable);

            // Set mapper and reducer classes
            if (useCopyFileOutput) {
                setupSplitsFile(job, parentTableOperations, table, childTable);
                job.setMapperClass(FileCopyToolMapper.class);
            } else {
                job.setMapperClass(AccumuloCopyToolMapper.class);
            }
            job.setReducerClass(Reducer.class);

            // Submit the job
            final Date beginTime = new Date();
            log.info("Job for table \"" + table + "\" started: " + beginTime);
            final int exitCode = job.waitForCompletion(true) ? 0 : 1;

            if (exitCode == 0) {
                if (useCopyFileOutput) {
                    log.info("Moving data from HDFS to the local file system for the table: " + childTable);
                    final Path hdfsPath = getPath(baseOutputDir, childTable);
                    final Path localPath = getPath(localBaseOutputDir, childTable);
                    log.info("HDFS directory: " + hdfsPath.toString());
                    log.info("Local directory: " + localPath.toString());
                    copyHdfsToLocal(hdfsPath, localPath);
                }

                final Date endTime = new Date();
                log.info("Job for table \"" + table + "\" finished: " + endTime);
                log.info("The job took " + (endTime.getTime() - beginTime.getTime()) / 1000 + " seconds.");
            } else {
                log.error("Job for table \"" + table + "\" Failed!!!");
                return exitCode;
            }
        } else {
            log.warn("The table \"" + table + "\" was NOT found in the parent instance and cannot be copied.");
        }
    }

    return 0;
}