Java Code Examples for org.codehaus.jackson.node.ObjectNode#has()

The following examples show how to use org.codehaus.jackson.node.ObjectNode#has() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ShuffleRewriter.java    From Cubert with Apache License 2.0 6 votes vote down vote up
private JsonNode rewriteDistinct(JsonNode job)
{
    ObjectNode newJob = (ObjectNode) cloneNode(job);
    ObjectNode shuffle = (ObjectNode) newJob.get("shuffle");
    String name = getText(shuffle, "name");

    ObjectNode distinctOp =
            JsonUtils.createObjectNode("operator",
                                       "DISTINCT",
                                       "input",
                                       name,
                                       "output",
                                       name);

    if (!newJob.has("reduce") || newJob.get("reduce").isNull())
        newJob.put("reduce", mapper.createArrayNode());
    ArrayNode reduce = (ArrayNode) newJob.get("reduce");
    reduce.insert(0, distinctOp);

    shuffle.put("type", "SHUFFLE");
    shuffle.put("distinctShuffle", true);

    return newJob;
}
 
Example 2
Source File: CountDistinctRewriter.java    From Cubert with Apache License 2.0 6 votes vote down vote up
private String[] getCountDistinctMeasuresForGroupBy(ObjectNode operatorNode)
{
    ArrayList<String> measureCols = new ArrayList<String>();
    if (!operatorNode.get("operator").getTextValue().equalsIgnoreCase("GROUP_BY"))
        return null;

    if (!operatorNode.has("aggregates"))
        return null;

    for (JsonNode aggregateJson : operatorNode.path("aggregates"))
    {
        // Create the aggregator object
        AggregationType aggType =
                AggregationType.valueOf(JsonUtils.getText(aggregateJson, "type"));
        if (!aggType.toString().equalsIgnoreCase("COUNT_DISTINCT"))
            continue;
        String[] measureColumns = JsonUtils.asArray(aggregateJson.get("input"));
        measureCols.addAll(Arrays.asList(measureColumns));
    }
    return measureCols.toArray(new String[measureCols.size()]);
}
 
Example 3
Source File: GenericEntityDeserializer.java    From secure-data-service with Apache License 2.0 6 votes vote down vote up
@Override
public GenericEntity deserialize(JsonParser parser, DeserializationContext context) throws IOException {
    
    ObjectMapper mapper = (ObjectMapper) parser.getCodec();
    ObjectNode root = (ObjectNode) mapper.readTree(parser);
    
    String entityType = null;
    
    if (root.has(ENTITY_TYPE_KEY)) {
        entityType = root.get(ENTITY_TYPE_KEY).getTextValue();
        root.remove(ENTITY_TYPE_KEY);
    }
    
    Map<String, Object> data = processObject(root);
    if (entityType != null) {
        return new GenericEntity(entityType, data);
    } else {
        return new GenericEntity("Generic", data);
    }
}
 
Example 4
Source File: CriterionUtils.java    From bdf3 with Apache License 2.0 5 votes vote down vote up
public static Criteria getCriteria(ObjectNode rudeCriteria) throws Exception {
	Criteria criteria = new Criteria();
	if (rudeCriteria.has("criterions")) {
		ArrayNode criterions = (ArrayNode) rudeCriteria.get("criterions");
		if (criterions != null) {
			for (Iterator<JsonNode> it = criterions.iterator(); it.hasNext();) {
				criteria.addCriterion(parseCriterion((ObjectNode) it.next()));
			}
		}
	}

	if (rudeCriteria.has("orders")) {
		ArrayNode orders = (ArrayNode) rudeCriteria.get("orders");
		if (orders != null) {
			for (Iterator<JsonNode> it = orders.iterator(); it.hasNext();) {
				ObjectNode rudeCriterion = (ObjectNode) it.next();
				Order order = new Order(JsonUtils.getString(rudeCriterion, "property"), JsonUtils.getBoolean(rudeCriterion, "desc"));
				criteria.addOrder(order);
			}
		}
	}
	return criteria;
}
 
Example 5
Source File: ActivityTO.java    From big-data-lite with MIT License 5 votes vote down vote up
public void setActivityJson(ObjectNode objectNode) {
    this.objectNode = objectNode;
    ActivityType aType = null;

    int custId = objectNode.get(JsonConstant.CUST_ID).getIntValue();
    int movieId = objectNode.get(JsonConstant.MOVIE_ID).getIntValue();
    int activityType = objectNode.get(JsonConstant.ACTIVITY).getIntValue();
    String recommended = objectNode.get(JsonConstant.RECOMMENDED).getTextValue();
    String dateStr = objectNode.get(JsonConstant.TIME).getTextValue();
    double price = objectNode.get(JsonConstant.PRICE).getDoubleValue();
    int rating = objectNode.get(JsonConstant.RATING).getIntValue();
    int position = objectNode.get(JsonConstant.POSITION).getIntValue();
    
    //create ActivityType object
    aType = ActivityType.getType(activityType);

    //now set it to this object
    this.setCustId(custId);
    this.setMovieId(movieId);
    this.setTimeStamp(this.getTimeStamp(dateStr));
    this.setActivity(aType);
    this.setRecommended(BooleanType.getType(recommended));
    
    //Adding the optional one
    this.setPrice(price);
    this.setRating(RatingType.getType(rating));
    this.setPosition(position);
    
    //Set genreId only if it is present in the jsonObject
    if (objectNode.has(JsonConstant.GENRE_ID)) {
        int genreId = objectNode.get(JsonConstant.GENRE_ID).getIntValue();
        this.setGenreId(genreId);
    }

}
 
Example 6
Source File: ActivityTO.java    From big-data-lite with MIT License 5 votes vote down vote up
public void setActivityJson(ObjectNode objectNode) {
    this.objectNode = objectNode;
    ActivityType aType = null;

    int custId = objectNode.get(JsonConstant.CUST_ID).getIntValue();
    int movieId = objectNode.get(JsonConstant.MOVIE_ID).getIntValue();
    int activityType = objectNode.get(JsonConstant.ACTIVITY).getIntValue();
    String recommended = objectNode.get(JsonConstant.RECOMMENDED).getTextValue();
    String dateStr = objectNode.get(JsonConstant.TIME).getTextValue();
    double price = objectNode.get(JsonConstant.PRICE).getDoubleValue();
    int rating = objectNode.get(JsonConstant.RATING).getIntValue();
    int position = objectNode.get(JsonConstant.POSITION).getIntValue();
    
    //create ActivityType object
    aType = ActivityType.getType(activityType);

    //now set it to this object
    this.setCustId(custId);
    this.setMovieId(movieId);
    this.setTimeStamp(this.getTimeStamp(dateStr));
    this.setActivity(aType);
    this.setRecommended(BooleanType.getType(recommended));
    
    //Adding the optional one
    this.setPrice(price);
    this.setRating(RatingType.getType(rating));
    this.setPosition(position);
    
    //Set genreId only if it is present in the jsonObject
    if (objectNode.has(JsonConstant.GENRE_ID)) {
        int genreId = objectNode.get(JsonConstant.GENRE_ID).getIntValue();
        this.setGenreId(genreId);
    }

}
 
Example 7
Source File: Lineage.java    From Cubert with Apache License 2.0 5 votes vote down vote up
public static boolean isCountDistinctAggregate(ObjectNode operatorNode)
{
    if (operatorNode.get("operator") == null)
      return false;

    String type = operatorNode.get("operator").getTextValue();
    if (!type.equals("GROUP_BY") && !type.equals("CUBE"))
        return false;

    if (!operatorNode.has("aggregates"))
        return false;

    for (JsonNode aggregateJson : operatorNode.path("aggregates"))
    {
        // Create the aggregator object
        JsonNode typeNode = aggregateJson.get("type");

        // Group by case
        if (typeNode.isTextual()){
          AggregationType aggType =
            AggregationType.valueOf(JsonUtils.getText(aggregateJson, "type"));
          String measureColumn = JsonUtils.getText(aggregateJson, "input");
          if (aggType != AggregationType.COUNT_DISTINCT)
            return false;
        }
        else if (typeNode instanceof ArrayNode){
          String[] typeArray = JsonUtils.asArray(aggregateJson, "type");
          if (!typeArray[0].equals("SUM") || !typeArray[1].equals("COUNT_TO_ONE"))
            return false;
        }
    }

    return true;
}
 
Example 8
Source File: PhysicalParser.java    From Cubert with Apache License 2.0 5 votes vote down vote up
@Override
public void exitOutputCommand(@NotNull OutputCommandContext ctx)
{
    outputCommandNode.put("name", ctx.ID().get(0).getText());
    outputCommandNode.put("path", cleanPath(ctx.path()));
    if (ctx.format != null)
        outputCommandNode.put("type", ctx.format.getText().toUpperCase());
    else
        outputCommandNode.put("type", ctx.classname.getText());

    addLine(ctx, outputCommandNode);
    ObjectNode paramsNode = objMapper.createObjectNode();
    if (ctx.params() != null)
    {
        for (int i = 0; i < ctx.params().keyval().size(); i++)
        {
            List<TerminalNode> kv = ctx.params().keyval(i).STRING();
            paramsNode.put(CommonUtils.stripQuotes(kv.get(0).getText()),
                           CommonUtils.stripQuotes(kv.get(1).getText()));
        }
    }
    outputCommandNode.put("params", paramsNode);

    if (!paramsNode.has("overwrite"))
        paramsNode.put("overwrite", Boolean.toString(overwrite));

}
 
Example 9
Source File: ExecutorService.java    From Cubert with Apache License 2.0 5 votes vote down vote up
private static void setupConf(JsonNode programNode)
{

    // copy the hadoopConf and libjars from global level to each job
    JsonNode globalHadoopConf = programNode.get("hadoopConf");
    JsonNode libjars = programNode.get("libjars");

    for (JsonNode json : programNode.path("jobs"))
    {
        ObjectNode job = (ObjectNode) json;

        // if there isn't local hadoop conf, then use the global conf
        if (!job.has("hadoopConf"))
        {
            job.put("hadoopConf", globalHadoopConf);
        }
        else
        {
            // if there are local conf properties, then copy only those properties
            // from global properties that are not already defined at local level
            ObjectNode localHadoopConf = (ObjectNode) job.get("hadoopConf");
            Iterator<String> it = globalHadoopConf.getFieldNames();
            while (it.hasNext())
            {
                String key = it.next();
                if (!localHadoopConf.has(key))
                {
                    localHadoopConf.put(key, globalHadoopConf.get(key));
                }
            }
        }

        if (libjars != null)
            job.put("libjars", libjars);
    }
}
 
Example 10
Source File: IndexerResource.java    From hbase-indexer with Apache License 2.0 5 votes vote down vote up
/**
 * Update an index definition.
*/
@PUT
@Path("{name}")
@Consumes("application/json")
@Produces("application/json")
public IndexerDefinition put(@PathParam("name") String indexName, ObjectNode json) throws Exception {
    WriteableIndexerModel model = getModel();
    ObjectMapper m = new ObjectMapper();

    IndexerDefinition oldIndexer = model.getIndexer(indexName);
    IndexerDefinition indexerDefinition = IndexerDefinitionJsonSerDeser.INSTANCE.fromJson(json,
            new IndexerDefinitionBuilder().startFrom(oldIndexer)).build();

    IndexerDefinition.LifecycleState lifecycleState = json.has("lifecycleState") ?
            IndexerDefinition.LifecycleState.valueOf(json.get("lifecycleState").getTextValue()) : null;

    String lock = model.lockIndexer(indexName);
    try {
        if (!oldIndexer.equals(indexerDefinition)) {
            model.updateIndexer(indexerDefinition, lock);
            //System.out.println("Index updated: " + indexName);
        } else {
            //System.out.println("Index already matches the specified settings, did not update it.");
        }
    } finally {
        // In case we requested deletion of an index, it might be that the lock is already removed
        // by the time we get here as part of the index deletion.
        boolean ignoreMissing = lifecycleState != null && lifecycleState == IndexerDefinition.LifecycleState.DELETE_REQUESTED;
        model.unlockIndexer(lock, ignoreMissing);
    }

    return indexerDefinition;
}
 
Example 11
Source File: ShuffleRewriter.java    From Cubert with Apache License 2.0 4 votes vote down vote up
private JsonNode rewriteBlockgen(JsonNode job)
{

    String blockgenType = job.get("shuffle").get("blockgenType").getTextValue();

    if (blockgenType.equalsIgnoreCase("BY_INDEX"))
    {
        return rewriteBlockgenByIndex(job);
    }
    // else: following is the rewrite of BLOCKGEN

    ObjectNode newJob = (ObjectNode) cloneNode(job);
    ObjectNode shuffle = (ObjectNode) newJob.get("shuffle");
    JsonNode blockgenTypeNode = shuffle.get("blockgenType");
    JsonNode blockgenValueNode = shuffle.get("blockgenValue");

    if (!shuffle.has("pivotKeys"))
        throw new PlanRewriteException("PivotKeys are not defined in SHUFFLE");

    // add CREATE_BLOCK operator in the reducer
    if (!newJob.has("reduce") || newJob.get("reduce").isNull())
        newJob.put("reduce", mapper.createArrayNode());
    ArrayNode reduce = (ArrayNode) newJob.get("reduce");
    ObjectNode createBlockOperator =
            createObjectNode("operator",
                             "CREATE_BLOCK",
                             "input",
                             shuffle.get("name"),
                             "output",
                             shuffle.get("name"),
                             "blockgenType",
                             blockgenTypeNode,
                             "blockgenValue",
                             blockgenValueNode,
                             "partitionKeys",
                             shuffle.get("partitionKeys"));
    copyLine(shuffle, createBlockOperator, "[REDUCE] ");
    reduce.insert(0, createBlockOperator);

    // add DISTINCT operator, if requested
    boolean isDistinct =
            shuffle.has("distinct") && shuffle.get("distinct").getBooleanValue();

    if (isDistinct)
    {
        ObjectNode distinct =
                createObjectNode("operator",
                                 "DISTINCT",
                                 "input",
                                 shuffle.get("name"),
                                 "output",
                                 shuffle.get("name"));
        copyLine(shuffle, distinct, "[REDUCE DISTINCT]");
        reduce.insert(0, distinct);
    }

    // the sort keys for the SHUFFLE are set to the actual
    // blockgen PARTITION KEYS. These sort keys are configured into the JsonNode for
    // the CREATE_BLOCK operator

    // clean up shuffle
    shuffle.remove("blockgenType");
    shuffle.remove("blockgenValue");
    shuffle.put("type", "SHUFFLE");
    shuffle.put("distinct", isDistinct);

    if (!CommonUtils.isPrefix(asArray(shuffle, "pivotKeys"),
                              asArray(shuffle, "partitionKeys")))
    {
        createBlockOperator.put("pivotKeys", shuffle.get("pivotKeys"));
        shuffle.put("pivotKeys", shuffle.get("partitionKeys"));
    }

    return newJob;
}
 
Example 12
Source File: ShuffleRewriter.java    From Cubert with Apache License 2.0 4 votes vote down vote up
private JsonNode rewriteBlockgenByIndex(JsonNode job)
{
    ObjectNode newJob = (ObjectNode) cloneNode(job);
    ObjectNode shuffle = (ObjectNode) newJob.get("shuffle");

    String path = getText(shuffle, "relation");

    // add a cache index
    String indexName = generateVariableName(namesUsed);
    if (!newJob.has("cacheIndex") || newJob.get("cacheIndex").isNull())
        newJob.put("cacheIndex", mapper.createArrayNode());
    ArrayNode cacheIndex = (ArrayNode) newJob.get("cacheIndex");
    cacheIndex.add(createObjectNode("name", indexName, "path", path));

    // create BLOCK-INDEX-JOIN operator
    ObjectNode blockIndexJoin =
            createObjectNode("operator",
                             "BLOCK_INDEX_JOIN",
                             "input",
                             shuffle.get("name"),
                             "output",
                             shuffle.get("name"),
                             "partitionKeys",
                             shuffle.get("partitionKeys"),
                             "index",
                             indexName);
    copyLine(shuffle, blockIndexJoin, "[MAP] ");
    // add it as the last operator for all mapper
    for (JsonNode map : newJob.path("map"))
    {
        if (!map.has("operators") || map.get("operators").isNull())
            ((ObjectNode) map).put("operators", mapper.createArrayNode());
        ArrayNode operators = (ArrayNode) map.get("operators");
        // we need unique references for all blockIndexJoin
        operators.add(JsonUtils.cloneNode(blockIndexJoin));
    }

    // create CREATE-BLOCK operator
    ObjectNode createBlock =
            createObjectNode("operator",
                             "CREATE_BLOCK",
                             "input",
                             shuffle.get("name"),
                             "output",
                             shuffle.get("name"),
                             "blockgenType",
                             "BY_INDEX",
                             "index",
                             indexName,
                             "partitionKeys",
                             createArrayNode("BLOCK_ID"),
                             "indexPath",
                             path);

    copyLine(shuffle, createBlock, "[REDUCE] ");
    // add it as first operator in reduce
    if (!newJob.has("reduce") || newJob.get("reduce").isNull())
        newJob.put("reduce", mapper.createArrayNode());
    ArrayNode reduce = (ArrayNode) newJob.get("reduce");
    reduce.insert(0, createBlock);

    // add DISTINCT operator, if requested
    boolean isDistinct =
            shuffle.has("distinct") && shuffle.get("distinct").getBooleanValue();

    if (isDistinct)
    {
        ObjectNode distinct =
                createObjectNode("operator",
                                 "DISTINCT",
                                 "input",
                                 shuffle.get("name"),
                                 "output",
                                 shuffle.get("name"));
        copyLine(shuffle, distinct, "[REDUCE DISTINCT] ");
        reduce.insert(0, distinct);
    }

    // blockgen by index uses a different partitioner
    shuffle.put("partitionerClass",
                "com.linkedin.cubert.plan.physical.ByIndexPartitioner");

    // clean up shuffle
    shuffle.put("type", "SHUFFLE");
    shuffle.put("partitionKeys", createArrayNode("BLOCK_ID"));
    shuffle.put("distinct", isDistinct);
    shuffle.put("index", indexName);
    shuffle.remove("blockgenType");
    shuffle.remove("relation");

    ArrayNode pivotKeys = mapper.createArrayNode();
    pivotKeys.add("BLOCK_ID");
    if (shuffle.has("pivotKeys"))
    {
        for (JsonNode key : shuffle.path("pivotKeys"))
            pivotKeys.add(key);
    }
    shuffle.put("pivotKeys", pivotKeys);

    return newJob;
}
 
Example 13
Source File: ShuffleRewriter.java    From Cubert with Apache License 2.0 4 votes vote down vote up
private JsonNode rewriteCube(JsonNode job)
{
    ObjectNode newJob = (ObjectNode) cloneNode(job);
    ObjectNode shuffle = (ObjectNode) newJob.get("shuffle");
    String name = getText(shuffle, "name");
    JsonNode aggregates = shuffle.get("aggregates");

    // create the OLAP_CUBE_COUNT_DISTINCT operator
    ObjectNode cube =
            createObjectNode("operator",
                             "CUBE",
                             "input",
                             name,
                             "output",
                             name,
                             "dimensions",
                             shuffle.get("dimensions"),
                             "aggregates",
                             cloneNode(aggregates));

    if (shuffle.has("groupingSets"))
        cube.put("groupingSets", shuffle.get("groupingSets"));
    if (shuffle.has("innerDimensions"))
        cube.put("innerDimensions", shuffle.get("innerDimensions"));
    if (shuffle.has("hashTableSize"))
        cube.put("hashTableSize", shuffle.get("hashTableSize"));
    copyLine(shuffle, cube, "[MAP] ");

    // add it as the last operator for all mapper
    for (JsonNode map : newJob.path("map"))
    {
        if (!map.has("operators") || map.get("operators").isNull())
            ((ObjectNode) map).put("operators", mapper.createArrayNode());
        ArrayNode operators = (ArrayNode) map.get("operators");
        operators.add(cube);
    }

    rewriteGroupByAggregateForCube(aggregates);

    // create the GROUP BY operator at the reducer
    ObjectNode groupBy =
            createObjectNode("operator",
                             "GROUP_BY",
                             "input",
                             name,
                             "output",
                             name,
                             "groupBy",
                             shuffle.get("dimensions"),
                             "aggregates",
                             aggregates);
    copyLine(shuffle, groupBy, "[REDUCE] ");
    // add it as first operator in reduce
    if (!newJob.has("reduce") || newJob.get("reduce").isNull())
        newJob.put("reduce", mapper.createArrayNode());
    ArrayNode reduce = (ArrayNode) newJob.get("reduce");
    reduce.insert(0, groupBy);

    // clean up shuffle
    shuffle.put("type", "SHUFFLE");
    shuffle.put("aggregates", aggregates);
    shuffle.put("partitionKeys", shuffle.get("dimensions"));
    shuffle.put("pivotKeys", shuffle.get("dimensions"));
    shuffle.remove("dimensions");
    shuffle.remove("groupingSets");
    shuffle.remove("innerDimensions");

    return newJob;
}
 
Example 14
Source File: ShuffleRewriter.java    From Cubert with Apache License 2.0 4 votes vote down vote up
private JsonNode rewriteJoin(JsonNode job)
{
    ObjectNode newJob = (ObjectNode) cloneNode(job);
    ObjectNode shuffle = (ObjectNode) newJob.get("shuffle");
    JsonNode joinKeys = shuffle.get("joinKeys");
    String blockName = getText(shuffle, "name");

    // make sure there are two mappers in the job
    JsonNode mapJsons = newJob.get("map");
    if (mapJsons.size() != 2)
    {
        throw new RuntimeException("There must be exactly two multimappers for JOIN shuffle command.");
    }

    // Add the Map side operator in each of the mappers
    // tag = 1, for the first mapper (non dimensional)
    // tag = 0, for the second dimensional mapper
    int tag = 1;
    for (JsonNode mapJson: mapJsons)
    {
        if (!mapJson.has("operators") || mapJson.get("operators").isNull())
            ((ObjectNode) mapJson).put("operators", mapper.createArrayNode());
        ArrayNode operators = (ArrayNode) mapJson.get("operators");

        // we need unique references for all blockIndexJoin
        operators.add(createObjectNode("operator", "REDUCE_JOIN_MAPPER",
                                       "input", createArrayNode(blockName),
                                       "output", blockName,
                                       "joinKeys", joinKeys,
                                       "tag", tag));
        tag --;
    }

    // create the reduce side operator
    ObjectNode reducerOperator = createObjectNode("operator", "REDUCE_JOIN",
                                                  "input", createArrayNode(blockName),
                                                  "output", blockName,
                                                  "joinKeys", joinKeys);
    if (shuffle.has("joinType"))
        reducerOperator.put("joinType", shuffle.get("joinType"));

    // add the reduce side operator
    if (!newJob.has("reduce") || newJob.get("reduce").isNull())
    {
        newJob.put("reduce", mapper.createArrayNode());
    }
    ArrayNode reduce = (ArrayNode) newJob.get("reduce");
    reduce.insert(0, reducerOperator);

    // Fix the shuffle json
    if (shuffle.has("partitionKeys"))
    {
        String[] partitionKeys = JsonUtils.asArray(shuffle, "partitionKeys");
        String[] joinKeyNames = JsonUtils.asArray(shuffle, "joinKeys");
        // make sure that partitionKeys is prefix of joinKeys
        if (!CommonUtils.isPrefix(joinKeyNames, partitionKeys))
        {
            throw new RuntimeException("Partition key must be a prefix of join keys");
        }
    } else {
        shuffle.put("partitionKeys", shuffle.get("joinKeys"));
    }
    // We will sort on (joinKeys + ___tag)
    JsonNode pivotKeys = cloneNode(shuffle.get("joinKeys"));
    ((ArrayNode) pivotKeys).add("___tag");

    shuffle.put("type", "SHUFFLE");
    shuffle.put("join", true);
    shuffle.put("pivotKeys", pivotKeys);
    shuffle.remove("joinKeys");

    return newJob;
}
 
Example 15
Source File: ShuffleRewriter.java    From Cubert with Apache License 2.0 4 votes vote down vote up
private void copyLine(ObjectNode from, ObjectNode to, String prefix)
{
    if (from.has("line"))
        to.put("line", prefix + getText(from, "line"));
}