org.apache.accumulo.core.data.Mutation Java Examples

The following examples show how to use org.apache.accumulo.core.data.Mutation. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MetricAdapter.java    From timely with Apache License 2.0 6 votes vote down vote up
public static Mutation toMutation(Metric metric) {

        final Mutation mutation = new Mutation(encodeRowKey(metric));
        List<Tag> tags = metric.getTags();
        tags = escapeDelimiters(tags);
        Collections.sort(tags);

        for (final Tag entry : tags) {
            if (entry.getKey().equals(VISIBILITY_TAG))
                continue;

            final String cf = entry.join();
            // @formatter:off
            String cq = tags.stream().filter(inner -> !inner.equals(entry))
                    .filter(inner -> !inner.getKey().equals(VISIBILITY_TAG))
                    .map(Tag::join)
                    .collect(Collectors.joining(","));
            // @formatter:on
            byte[] cqBytes = encodeColQual(metric.getValue().getTimestamp(), cq);
            mutation.put(new Text(cf.getBytes(Charset.forName("UTF-8"))), new Text(cqBytes), extractVisibility(tags),
                    metric.getValue().getTimestamp(), extractValue(metric));
        }
        return mutation;
    }
 
Example #2
Source File: JoinSelectStatsUtil.java    From rya with Apache License 2.0 6 votes vote down vote up
public static Mutation createMutation(TripleRow tripleRow) {
  Mutation mutation = new Mutation(new Text(tripleRow.getRow()));
  byte[] columnVisibility = tripleRow.getColumnVisibility();
  ColumnVisibility cv = columnVisibility == null ? EMPTY_CV : new ColumnVisibility(columnVisibility);
  Long timestamp = tripleRow.getTimestamp();
  boolean hasts = timestamp != null;
  timestamp = timestamp == null ? 0l : timestamp;
  byte[] value = tripleRow.getValue();
  Value v = value == null ? EMPTY_VALUE : new Value(value);
  byte[] columnQualifier = tripleRow.getColumnQualifier();
  Text cqText = columnQualifier == null ? EMPTY_TEXT : new Text(columnQualifier);
  byte[] columnFamily = tripleRow.getColumnFamily();
  Text cfText = columnFamily == null ? EMPTY_TEXT : new Text(columnFamily);

  if (hasts) {
    mutation.put(cfText, cqText, cv, timestamp, v);
  } else {
    mutation.put(cfText, cqText, cv, v);

  }
  return mutation;
}
 
Example #3
Source File: PcjIntegrationTestingUtil.java    From rya with Apache License 2.0 6 votes vote down vote up
/**
 * Create the {@link Mutations} required to write a new {@link BindingSet}
 * to a PCJ table for each {@link VariableOrder} that is provided.
 *
 * @param varOrders
 *            - The variables orders the result will be written to. (not
 *            null)
 * @param result
 *            - A new PCJ result. (not null)
 * @return Mutation that will write the result to a PCJ table.
 * @throws PcjException
 *             The binding set could not be encoded.
 */
private static Set<Mutation> makeWriteResultMutations(
        final Set<VariableOrder> varOrders, final BindingSet result)
                throws PcjException {
    checkNotNull(varOrders);
    checkNotNull(result);

    final Set<Mutation> mutations = new HashSet<>();

    for (final VariableOrder varOrder : varOrders) {
        try {
            // Serialize the result to the variable order.
            final byte[] serializedResult = converter.convert(result, varOrder);

            // Row ID = binding set values, Column Family = variable order
            // of the binding set.
            final Mutation addResult = new Mutation(serializedResult);
            addResult.put(varOrder.toString(), "", "");
            mutations.add(addResult);
        } catch (final BindingSetConversionException e) {
            throw new PcjException("Could not serialize a result.", e);
        }
    }

    return mutations;
}
 
Example #4
Source File: buggyMockTable.java    From coming with MIT License 6 votes vote down vote up
synchronized void addMutation(Mutation m) {
  long now = System.currentTimeMillis();
  mutationCount++;
  for (ColumnUpdate u : m.getUpdates()) {
    Key key = new Key(m.getRow(), 0, m.getRow().length, u.getColumnFamily(), 0, u.getColumnFamily().length, u.getColumnQualifier(), 0,
        u.getColumnQualifier().length, u.getColumnVisibility(), 0, u.getColumnVisibility().length, u.getTimestamp());
    if (u.isDeleted())
      key.setDeleted(true);
    if (!u.hasTimestamp())
      if (timeType.equals(TimeType.LOGICAL))
        key.setTimestamp(mutationCount);
      else
        key.setTimestamp(now);
    
    table.put(new MockMemKey(key, mutationCount), new Value(u.getValue()));
  }
}
 
Example #5
Source File: AlphaNumKeyConstraint.java    From accumulo-examples with Apache License 2.0 6 votes vote down vote up
@Override
public List<Short> check(Environment env, Mutation mutation) {
  Set<Short> violations = null;

  if (!isAlphaNum(mutation.getRow()))
    violations = addViolation(violations, NON_ALPHA_NUM_ROW);

  Collection<ColumnUpdate> updates = mutation.getUpdates();
  for (ColumnUpdate columnUpdate : updates) {
    if (!isAlphaNum(columnUpdate.getColumnFamily()))
      violations = addViolation(violations, NON_ALPHA_NUM_COLF);

    if (!isAlphaNum(columnUpdate.getColumnQualifier()))
      violations = addViolation(violations, NON_ALPHA_NUM_COLQ);
  }

  return null == violations ? null : new ArrayList<>(violations);
}
 
Example #6
Source File: ElementMutationBuilder.java    From vertexium with Apache License 2.0 6 votes vote down vote up
public void addPropertyMetadataItemToMutation(
    Mutation m,
    ElementId elementId,
    Property property,
    String metadataKey,
    Object metadataValue,
    Visibility visibility
) {
    Text columnQualifier = getPropertyMetadataColumnQualifierText(property, metadataKey);
    ColumnVisibility metadataVisibility = visibilityToAccumuloVisibility(visibility);
    if (metadataValue == null) {
        addPropertyMetadataItemDeleteToMutation(m, columnQualifier, metadataVisibility);
    } else {
        addPropertyMetadataItemAddToMutation(
            m,
            elementId,
            property,
            columnQualifier,
            metadataVisibility,
            metadataKey,
            visibility,
            metadataValue
        );
    }
}
 
Example #7
Source File: ElementMutationBuilder.java    From vertexium with Apache License 2.0 6 votes vote down vote up
private <T extends Element> void createMutationForElementBuilder(AccumuloGraph graph, ElementBuilder<T> elementBuilder, String rowKey, Mutation m) {
    for (PropertyDeleteMutation propertyDeleteMutation : elementBuilder.getPropertyDeletes()) {
        addPropertyDeleteToMutation(m, propertyDeleteMutation);
    }
    for (PropertySoftDeleteMutation propertySoftDeleteMutation : elementBuilder.getPropertySoftDeletes()) {
        addPropertySoftDeleteToMutation(m, propertySoftDeleteMutation);
    }
    for (Property property : elementBuilder.getProperties()) {
        addPropertyToMutation(graph, m, elementBuilder, rowKey, property);
    }
    for (AdditionalVisibilityAddMutation additionalVisibility : elementBuilder.getAdditionalVisibilities()) {
        addAdditionalVisibilityToMutation(m, additionalVisibility);
    }
    for (AdditionalVisibilityDeleteMutation additionalVisibilityDelete : elementBuilder.getAdditionalVisibilityDeletes()) {
        addAdditionalVisibilityDeleteToMutation(m, additionalVisibilityDelete);
    }
    for (MarkPropertyHiddenMutation markPropertyHidden : elementBuilder.getMarkPropertyHiddenMutations()) {
        addMarkPropertyHiddenToMutation(m, markPropertyHidden);
    }
    for (MarkPropertyVisibleMutation markPropertyVisible : elementBuilder.getMarkPropertyVisibleMutations()) {
        addMarkPropertyVisibleToMutation(m, markPropertyVisible);
    }
    Iterable<ExtendedDataMutation> extendedData = elementBuilder.getExtendedData();
    saveExtendedDataMarkers(m, extendedData);
}
 
Example #8
Source File: AccumuloKeyValuePairGenerator.java    From geowave with Apache License 2.0 6 votes vote down vote up
public List<KeyValue> constructKeyValuePairs(final T entry) {
  final List<KeyValue> keyValuePairs = new ArrayList<>();
  final GeoWaveRow[] rows =
      BaseDataStoreUtils.getGeoWaveRows(entry, adapter, index, visibilityWriter);
  if ((rows != null) && (rows.length > 0)) {
    for (final GeoWaveRow row : rows) {
      final Mutation m = AccumuloWriter.rowToMutation(row);
      for (final ColumnUpdate cu : m.getUpdates()) {
        keyValuePairs.add(
            new KeyValue(
                new Key(
                    m.getRow(),
                    cu.getColumnFamily(),
                    cu.getColumnQualifier(),
                    cu.getColumnVisibility(),
                    cu.getTimestamp()),
                cu.getValue()));
      }
    }
  }

  return keyValuePairs;
}
 
Example #9
Source File: ChunkInputFormatIT.java    From accumulo-examples with Apache License 2.0 6 votes vote down vote up
@Test
public void testInfoWithoutChunks() throws Exception {
  client.tableOperations().create(tableName);
  BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
  for (Entry<Key,Value> e : baddata) {
    Key k = e.getKey();
    Mutation m = new Mutation(k.getRow());
    m.put(k.getColumnFamily(), k.getColumnQualifier(),
        new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
    bw.addMutation(m);
  }
  bw.close();

  assertEquals(0, CIFTester.main(tableName, CIFTester.TestBadData.class.getName()));
  assertEquals(1, assertionErrors.get(tableName).size());
}
 
Example #10
Source File: CharacterHistogram.java    From accumulo-examples with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  Opts opts = new Opts();
  opts.parseArgs(CharacterHistogram.class.getName(), args);

  Job job = Job.getInstance(opts.getHadoopConfig());
  job.setJobName(CharacterHistogram.class.getSimpleName());
  job.setJarByClass(CharacterHistogram.class);
  job.setInputFormatClass(ChunkInputFormat.class);
  job.getConfiguration().set(VIS, opts.visibilities);
  job.setMapperClass(HistMapper.class);
  job.setMapOutputKeyClass(Text.class);
  job.setMapOutputValueClass(Mutation.class);

  job.setNumReduceTasks(0);

  job.setOutputFormatClass(AccumuloOutputFormat.class);
  AccumuloOutputFormat.configure().clientProperties(opts.getClientProperties())
      .defaultTable(opts.tableName).createTables(true);

  System.exit(job.waitForCompletion(true) ? 0 : 1);
}
 
Example #11
Source File: EntityCentricIndex.java    From rya with Apache License 2.0 5 votes vote down vote up
protected Mutation deleteMutation(final TripleRow tripleRow) {
    final Mutation m = new Mutation(new Text(tripleRow.getRow()));

    final byte[] columnFamily = tripleRow.getColumnFamily();
    final Text cfText = columnFamily == null ? EMPTY_TEXT : new Text(columnFamily);

    final byte[] columnQualifier = tripleRow.getColumnQualifier();
    final Text cqText = columnQualifier == null ? EMPTY_TEXT : new Text(columnQualifier);

    final byte[] columnVisibility = tripleRow.getColumnVisibility();
    final ColumnVisibility cv = columnVisibility == null ? EMPTY_CV : new ColumnVisibility(columnVisibility);

    m.putDelete(cfText, cqText, cv, tripleRow.getTimestamp());
    return m;
}
 
Example #12
Source File: TestAccumuloStorage.java    From spork with Apache License 2.0 5 votes vote down vote up
@Test
public void testWrite2TupleWithColumnQual() throws IOException,
        ParseException {
    AccumuloStorage storage = new AccumuloStorage("col:qual");

    Tuple t = TupleFactory.getInstance().newTuple(2);
    t.set(0, "row");
    t.set(1, "value");

    Collection<Mutation> mutations = storage.getMutations(t);

    Assert.assertEquals(1, mutations.size());

    Mutation m = mutations.iterator().next();

    Assert.assertTrue("Rows not equal",
            Arrays.equals(m.getRow(), ((String) t.get(0)).getBytes()));

    List<ColumnUpdate> colUpdates = m.getUpdates();
    Assert.assertEquals(1, colUpdates.size());

    ColumnUpdate colUpdate = colUpdates.get(0);
    Assert.assertTrue("CF not equal",
            Arrays.equals(colUpdate.getColumnFamily(), "col".getBytes()));
    Assert.assertTrue("CQ not equal", Arrays.equals(
            colUpdate.getColumnQualifier(), "qual".getBytes()));
    Assert.assertTrue("Values not equal",
            Arrays.equals(colUpdate.getValue(), "value".getBytes()));
}
 
Example #13
Source File: DateIndexHelperTest.java    From datawave with Apache License 2.0 5 votes vote down vote up
private static void write(String shardDate, int[] shardIndicies, String dataType, String type, String dateField, String dateValue, String visibility)
                throws ParseException, IOException, InterruptedException {
    ColumnVisibility vis = new ColumnVisibility(visibility);
    KeyValue kv = getDateIndexEntry(shardDate, shardIndicies, dataType, type, dateField, dateValue, vis);
    Mutation m = new Mutation(kv.getKey().getRow());
    m.put(kv.getKey().getColumnFamily(), kv.getKey().getColumnQualifier(), vis, kv.getKey().getTimestamp(), kv.getValue());
    recordWriter.write(new Text(TableName.DATE_INDEX), m);
}
 
Example #14
Source File: HiveAccumuloTableOutputFormat.java    From accumulo-hive-storage-manager with Apache License 2.0 5 votes vote down vote up
@Override
public org.apache.hadoop.mapred.RecordWriter<Text, Mutation>
getRecordWriter(FileSystem fileSystem,
                JobConf jobConf,
                String s,
                Progressable progressable) throws IOException {
    throw new RuntimeException("Hive should not invoke this method");
}
 
Example #15
Source File: NumericValueConstraintTest.java    From accumulo-examples with Apache License 2.0 5 votes vote down vote up
@Test
public void testCheck() {
  Mutation goodMutation = new Mutation(new Text("r"));
  goodMutation.put(new Text("cf"), new Text("cq"), new Value("1234".getBytes()));
  assertNull(nvc.check(null, goodMutation));

  // Check that multiple bad mutations result in one violation only
  Mutation badMutation = new Mutation(new Text("r"));
  badMutation.put(new Text("cf"), new Text("cq"), new Value("foo1234".getBytes()));
  badMutation.put(new Text("cf2"), new Text("cq2"), new Value("foo1234".getBytes()));
  assertEquals(NumericValueConstraint.NON_NUMERIC_VALUE,
      Iterables.getOnlyElement(nvc.check(null, badMutation)).shortValue());
}
 
Example #16
Source File: AccumuloLastNStore.java    From accumulo-recipes with Apache License 2.0 5 votes vote down vote up
/**
 * Add the index which will be managed by the versioning iterator and the data rows to scan from the index
 *
 * @param group
 * @param entry
 */
@Override
public void put(String group, Event entry) {
    checkNotNull(group);
    checkNotNull(entry);

    // first put the main index pointing to the contextId (The column family is prefixed with the NULL_BYTE to guarantee it shows up first
    Mutation indexMutation = new Mutation(group);
    indexMutation.put(NULL_BYTE + "INDEX", "", new ColumnVisibility(), entry.getTimestamp(), new Value((entry.getType() + ONE_BYTE + entry.getId()).getBytes()));

    for (Attribute attribute : entry.getAttributes()) {
        String fam = String.format("%s%s%s%s", END_BYTE, entry.getType(), ONE_BYTE, entry.getId());
        Object value = attribute.getValue();
        try {
            String serialize = typeRegistry.encode(value);
            String aliasForType = typeRegistry.getAlias(value);
            String qual = String.format("%s%s%s%s%s", attribute.getKey(), NULL_BYTE, serialize, NULL_BYTE, aliasForType);
            indexMutation.put(fam, qual, new ColumnVisibility(getVisibility(attribute, "")), entry.getTimestamp(),
                    new Value("".getBytes()));
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }

    try {
        writer.addMutation(indexMutation);
    } catch (MutationsRejectedException ex) {
        throw new RuntimeException("There was an error writing the mutation for [index=" + group + ",entryId=" + entry.getId() + "]", ex);
    }
}
 
Example #17
Source File: TestAccumuloStorage.java    From spork with Apache License 2.0 5 votes vote down vote up
@Test
public void testWriteIgnoredExtraMap() throws IOException, ParseException {
    AccumuloStorage storage = new AccumuloStorage("col1");

    Map<String, Object> map = Maps.newHashMap();

    map.put("mapcol1", "mapval1");
    map.put("mapcol2", "mapval2");
    map.put("mapcol3", "mapval3");
    map.put("mapcol4", "mapval4");

    Tuple t = TupleFactory.getInstance().newTuple(3);
    t.set(0, "row");
    t.set(1, "value1");
    t.set(2, map);

    Collection<Mutation> mutations = storage.getMutations(t);

    Assert.assertEquals(1, mutations.size());

    Mutation m = mutations.iterator().next();

    Assert.assertTrue("Rows not equal",
            Arrays.equals(m.getRow(), ((String) t.get(0)).getBytes()));

    List<ColumnUpdate> colUpdates = m.getUpdates();
    Assert.assertEquals(1, colUpdates.size());

    ColumnUpdate update = colUpdates.get(0);
    Assert.assertEquals("col1", new String(update.getColumnFamily()));
    Assert.assertEquals("", new String(update.getColumnQualifier()));
    Assert.assertEquals("value1", new String(update.getValue()));
}
 
Example #18
Source File: EntityCentricIndexTest.java    From rya with Apache License 2.0 5 votes vote down vote up
@Test
public void testSerializeStatement() throws RyaTypeResolverException {
    Collection<Mutation> indexMutations = EntityCentricIndex.createMutations(ryaStatement);
    Assert.assertEquals("Serialization should produce two rows: subject-centric and object-centric.",
            2, indexMutations.size());
    Assert.assertTrue("Serialization of RyaStatement failed to create equivalent subject-centric row.",
            indexMutations.contains(createMutationFromKeyValue(subjectCentricKey, value)));
    Assert.assertTrue("Serialization of RyaStatement failed to create equivalent object-centric row.",
            indexMutations.contains(createMutationFromKeyValue(objectCentricKey, value)));
}
 
Example #19
Source File: WordCount.java    From accumulo-examples with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
  Opts opts = new Opts();
  opts.parseArgs(WordCount.class.getName(), args);

  // Create Accumulo table and attach Summing iterator
  try (AccumuloClient client = opts.createAccumuloClient()) {
    client.tableOperations().create(opts.tableName);
    IteratorSetting is = new IteratorSetting(10, SummingCombiner.class);
    SummingCombiner.setColumns(is,
        Collections.singletonList(new IteratorSetting.Column("count")));
    SummingCombiner.setEncodingType(is, SummingCombiner.Type.STRING);
    client.tableOperations().attachIterator(opts.tableName, is);
  } catch (TableExistsException e) {
    // ignore
  }

  // Create M/R job
  Job job = Job.getInstance(opts.getHadoopConfig());
  job.setJobName(WordCount.class.getName());
  job.setJarByClass(WordCount.class);
  job.setInputFormatClass(TextInputFormat.class);
  TextInputFormat.setInputPaths(job, new Path(opts.inputDirectory));

  job.setMapperClass(MapClass.class);
  job.setNumReduceTasks(0);
  job.setOutputFormatClass(AccumuloOutputFormat.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Mutation.class);

  if (opts.hdfsPath != null) {
    AccumuloOutputFormat.configure().clientPropertiesPath(opts.hdfsPath)
        .defaultTable(opts.tableName).store(job);
  } else {
    AccumuloOutputFormat.configure().clientProperties(opts.getClientProperties())
        .defaultTable(opts.tableName).store(job);
  }
  System.exit(job.waitForCompletion(true) ? 0 : 1);
}
 
Example #20
Source File: ProspectorUtils.java    From rya with Apache License 2.0 5 votes vote down vote up
public static void initMRJob(final Job job, final String table, final String outtable, final String[] auths) throws AccumuloSecurityException {
    final Configuration conf = job.getConfiguration();
    final String username = conf.get(USERNAME);
    final String password = conf.get(PASSWORD);
    final String instance = conf.get(INSTANCE);
    final String zookeepers = conf.get(ZOOKEEPERS);
    final String mock = conf.get(MOCK);

    //input
    if (Boolean.parseBoolean(mock)) {
        AccumuloInputFormat.setMockInstance(job, instance);
        AccumuloOutputFormat.setMockInstance(job, instance);
    } else if (zookeepers != null) {
        AccumuloInputFormat.setZooKeeperInstance(job, instance, zookeepers);
        AccumuloOutputFormat.setZooKeeperInstance(job, instance, zookeepers);
    } else {
        throw new IllegalArgumentException("Must specify either mock or zookeepers");
    }

    AccumuloInputFormat.setConnectorInfo(job, username, new PasswordToken(password.getBytes(StandardCharsets.UTF_8)));
    AccumuloInputFormat.setInputTableName(job, table);
    job.setInputFormatClass(AccumuloInputFormat.class);
    AccumuloInputFormat.setScanAuthorizations(job, new Authorizations(auths));

    // OUTPUT
    job.setOutputFormatClass(AccumuloOutputFormat.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Mutation.class);
    AccumuloOutputFormat.setConnectorInfo(job, username, new PasswordToken(password.getBytes(StandardCharsets.UTF_8)));
    AccumuloOutputFormat.setDefaultTableName(job, outtable);
}
 
Example #21
Source File: RyaTableMutationsFactory.java    From rya with Apache License 2.0 5 votes vote down vote up
public Map<RdfCloudTripleStoreConstants.TABLE_LAYOUT, Collection<Mutation>> serializeDelete(
        RyaStatement stmt) throws IOException {

    Collection<Mutation> spo_muts = new ArrayList<Mutation>();
    Collection<Mutation> po_muts = new ArrayList<Mutation>();
    Collection<Mutation> osp_muts = new ArrayList<Mutation>();
    /**
     * TODO: If there are contexts, do we still replicate the information into the default graph as well
     * as the named graphs?
     */
    try {
        Map<TABLE_LAYOUT, TripleRow> rowMap = ryaContext.serializeTriple(stmt);
        TripleRow tripleRow = rowMap.get(TABLE_LAYOUT.SPO);
        spo_muts.add(deleteMutation(tripleRow));
        tripleRow = rowMap.get(TABLE_LAYOUT.PO);
        po_muts.add(deleteMutation(tripleRow));
        tripleRow = rowMap.get(TABLE_LAYOUT.OSP);
        osp_muts.add(deleteMutation(tripleRow));
    } catch (TripleRowResolverException fe) {
        throw new IOException(fe);
    }

    Map<RdfCloudTripleStoreConstants.TABLE_LAYOUT, Collection<Mutation>> mutations =
            new HashMap<RdfCloudTripleStoreConstants.TABLE_LAYOUT, Collection<Mutation>>();
    mutations.put(RdfCloudTripleStoreConstants.TABLE_LAYOUT.SPO, spo_muts);
    mutations.put(RdfCloudTripleStoreConstants.TABLE_LAYOUT.PO, po_muts);
    mutations.put(RdfCloudTripleStoreConstants.TABLE_LAYOUT.OSP, osp_muts);

    return mutations;

}
 
Example #22
Source File: AccumuloItem.java    From cognition with Apache License 2.0 5 votes vote down vote up
/**
 * Adds the contents of <code>items</code> to the row represented by <code>out</code> as Column Qualifiers an Values
 * in the Column Family <code>columnFamily</code>
 *
 * @param out
 * @param columnFamily
 * @param items
 */
public static void writeMap(Mutation out, String columnFamily, Map<String, String> items) {
  if (items == null) {
    return;
  }

  for (Entry<String, String> entry : items.entrySet()) {
    if (!(entry.getKey() == null || entry.getValue() == null)) {
      out.put(columnFamily, entry.getKey(), entry.getValue());
    }
  }
}
 
Example #23
Source File: MultiTableRRRangePartitionerTest.java    From datawave with Apache License 2.0 5 votes vote down vote up
@Test(expected = RuntimeException.class)
public void testProblemGettingLocalCacheFiles() throws IOException, URISyntaxException {
    final URL url = createUrl("full_splits.txt");
    
    MultiTableRangePartitioner.setContext(new MapContextImpl<Key,Value,Text,Mutation>(configuration, new TaskAttemptID(), null, null, null, null, null) {
        @Override
        public org.apache.hadoop.fs.Path[] getLocalCacheFiles() throws IOException {
            throw new IOException("Local cache files failure");
        }
    });
    
    getPartition("23432");
}
 
Example #24
Source File: TestAbstractAccumuloStorage.java    From spork with Apache License 2.0 5 votes vote down vote up
public static AbstractAccumuloStorage getAbstractAccumuloStorage(
        String columns, String args) throws ParseException, IOException {
    return new AbstractAccumuloStorage(columns, args) {

        @Override
        public Collection<Mutation> getMutations(Tuple tuple) {
            return null;
        }

        @Override
        protected Tuple getTuple(Key key, Value value) throws IOException {
            return null;
        }
    };
}
 
Example #25
Source File: GroupingAccumuloWriter.java    From datawave with Apache License 2.0 5 votes vote down vote up
private void writeShardIndexKeys(BatchWriterConfig bwConfig, final List<Map.Entry<Multimap<String,String>,UID>> data, String table, boolean reverse)
                throws MutationsRejectedException, TableNotFoundException {
    Map<String,RawMetaData> meta = this.cfgData.getMetadata();
    Set<String> fields;
    if (reverse) {
        fields = this.fieldConfig.getReverseIndexFields();
    } else {
        fields = this.fieldConfig.getIndexFields();
    }
    try (BatchWriter bw = this.conn.createBatchWriter(table, bwConfig)) {
        for (Map.Entry<Multimap<String,String>,UID> rawEntries : data) {
            UID uid = rawEntries.getValue();
            Multimap<String,String> rawData = rawEntries.getKey();
            String shardId = extractShard(rawData);
            long timestamp = shardDateToMillis(shardId);
            shardId = shardId + "_0";
            
            for (Map.Entry<String,String> entry : rawData.entries()) {
                if (fields.contains(entry.getKey())) {
                    Normalizer<?> norm = meta.get(entry.getKey().toLowerCase()).normalizer;
                    String normVal = norm.normalize(entry.getValue());
                    if (reverse) {
                        normVal = new StringBuilder(normVal).reverse().toString();
                    }
                    Mutation mut = new Mutation(normVal);
                    Uid.List.Builder builder = Uid.List.newBuilder();
                    builder.addUID(uid.toString());
                    builder.setCOUNT(1);
                    builder.setIGNORE(false);
                    mut.put(entry.getKey().toUpperCase(), shardId + NULL_SEP + this.dataType, this.cfgData.getDefaultVisibility(), timestamp, new Value(
                                    builder.build().toByteArray()));
                    bw.addMutation(mut);
                }
            }
        }
    }
}
 
Example #26
Source File: AccumuloEventStorageBolt.java    From cognition with Apache License 2.0 5 votes vote down vote up
void populateMutation(Mutation mut, String columnFamily, ColumnVisibility vis, Map<String, String> fields) {
  for (Map.Entry<String, String> entry : fields.entrySet()) {
    String key = defaultString(entry.getKey());
    String value = defaultString(entry.getValue());

    mut.put(columnFamily, key, vis, new Value(value.getBytes()));
  }
}
 
Example #27
Source File: ElementMutationBuilder.java    From vertexium with Apache License 2.0 5 votes vote down vote up
public boolean alterEdgeVertexOutVertex(Mutation vertexOutMutation, Edge edge, Visibility newVisibility) {
    ColumnVisibility currentColumnVisibility = visibilityToAccumuloVisibility(edge.getVisibility());
    ColumnVisibility newColumnVisibility = visibilityToAccumuloVisibility(newVisibility);
    if (currentColumnVisibility.equals(newColumnVisibility)) {
        return false;
    }
    AccumuloEdgeInfo edgeInfo = new AccumuloEdgeInfo(getNameSubstitutionStrategy().deflate(edge.getLabel()), edge.getVertexId(Direction.IN));
    vertexOutMutation.putDelete(AccumuloVertex.CF_OUT_EDGE, new Text(edge.getId()), currentColumnVisibility);
    vertexOutMutation.put(AccumuloVertex.CF_OUT_EDGE, new Text(edge.getId()), newColumnVisibility, edgeInfo.toValue());
    return true;
}
 
Example #28
Source File: GroupingAccumuloWriter.java    From datawave with Apache License 2.0 5 votes vote down vote up
private void writeMetaData(BatchWriterConfig bwConfig, final List<Map.Entry<Multimap<String,String>,UID>> data) throws MutationsRejectedException,
                TableNotFoundException {
    Text dtText = new Text(this.dataType);
    Map<String,RawMetaData> meta = this.cfgData.getMetadata();
    try (BatchWriter bw = this.conn.createBatchWriter(QueryTestTableHelper.METADATA_TABLE_NAME, bwConfig)) {
        for (Map.Entry<Multimap<String,String>,UID> entry : data) {
            Multimap<String,String> rawData = entry.getKey();
            String shardDate = extractShard(rawData);
            
            for (String column : rawData.keySet()) {
                if (meta.containsKey(column.toLowerCase())) {
                    Mutation mut = new Mutation(column);
                    mut.put(ColumnFamilyConstants.COLF_E, dtText, EMPTY_VALUE);
                    Value colVal = new Value(SummingCombiner.VAR_LEN_ENCODER.encode((long) rawData.get(column).size()));
                    mut.put(ColumnFamilyConstants.COLF_F, new Text(this.dataType + NULL_SEP + shardDate), colVal);
                    if (this.fieldConfig.getIndexFields().contains(column)) {
                        mut.put(ColumnFamilyConstants.COLF_I, dtText, EMPTY_VALUE);
                    }
                    if (this.fieldConfig.getReverseIndexFields().contains(column)) {
                        mut.put(ColumnFamilyConstants.COLF_RI, dtText, EMPTY_VALUE);
                    }
                    Normalizer<?> norm = meta.get(column.toLowerCase()).normalizer;
                    String type = getNormalizerTypeName(norm);
                    mut.put(ColumnFamilyConstants.COLF_T, new Text(this.dataType + NULL_SEP + type), EMPTY_VALUE);
                    
                    bw.addMutation(mut);
                } else {
                    log.debug("skipping col entry(" + column + ")");
                }
            }
        }
    }
}
 
Example #29
Source File: DownsampleIteratorTest.java    From timely with Apache License 2.0 5 votes vote down vote up
void put(Map<Key, Value> testData, Metric m) {
    Mutation mutation = MetricAdapter.toMutation(m);
    for (ColumnUpdate cu : mutation.getUpdates()) {
        Key key = new Key(mutation.getRow(), cu.getColumnFamily(), cu.getColumnQualifier(),
                cu.getColumnVisibility(), cu.getTimestamp());
        testData.put(key, new Value(cu.getValue()));
    }
}
 
Example #30
Source File: AccumuloSetupHelper.java    From datawave with Apache License 2.0 5 votes vote down vote up
private void ingestTestData(Configuration conf, TestFileLoader loader) throws IOException, InterruptedException {
    log.debug("------------- ingestTestData -------------");
    
    File tmpDir = new File(System.getProperty("java.io.tmpdir"));
    Path tmpPath = new Path(tmpDir.toURI());
    Path seqFile = new Path(tmpPath, UUID.randomUUID().toString());
    
    TaskAttemptID id = new TaskAttemptID("testJob", 0, TaskType.MAP, 0, 0);
    TaskAttemptContext context = new TaskAttemptContextImpl(conf, id);
    
    try (final RawLocalFileSystem rfs = createSequenceFile(conf, seqFile, loader)) {
        InputSplit split = new FileSplit(seqFile, 0, rfs.pathToFile(seqFile).length(), null);
        EventSequenceFileRecordReader<LongWritable> rr = new EventSequenceFileRecordReader<>();
        rr.initialize(split, context);
        
        Path ocPath = new Path(tmpPath, "oc");
        OutputCommitter oc = new FileOutputCommitter(ocPath, context);
        rfs.deleteOnExit(ocPath);
        
        StandaloneStatusReporter sr = new StandaloneStatusReporter();
        EventMapper<LongWritable,RawRecordContainer,Text,Mutation> mapper = new EventMapper<>();
        MapContext<LongWritable,RawRecordContainer,Text,Mutation> mapContext = new MapContextImpl<>(conf, id, rr, this.recordWriter, oc, sr, split);
        
        Mapper<LongWritable,RawRecordContainer,Text,Mutation>.Context con = new WrappedMapper<LongWritable,RawRecordContainer,Text,Mutation>()
                        .getMapContext(mapContext);
        mapper.run(con);
        mapper.cleanup(con);
    } finally {
        this.recordWriter.close(context);
    }
}