Java Code Examples for scala.collection.JavaConversions#seqAsJavaList()

The following examples show how to use scala.collection.JavaConversions#seqAsJavaList() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Script.java    From systemds with Apache License 2.0 6 votes vote down vote up
/**
 * Pass a Scala Seq of inputs to the script. The inputs are either two-value
 * or three-value tuples, where the first value is the variable name, the
 * second value is the variable value, and the third optional value is the
 * metadata.
 *
 * @param inputs
 *            Scala Seq of inputs (parameters ($) and variables).
 * @return {@code this} Script object to allow chaining of methods
 */
public Script in(scala.collection.Seq<Object> inputs) {
	List<Object> list = JavaConversions.seqAsJavaList(inputs);
	for (Object obj : list) {
		if (obj instanceof Tuple3) {
			@SuppressWarnings("unchecked")
			Tuple3<String, Object, MatrixMetadata> t3 = (Tuple3<String, Object, MatrixMetadata>) obj;
			in(t3._1(), t3._2(), t3._3());
		} else if (obj instanceof Tuple2) {
			@SuppressWarnings("unchecked")
			Tuple2<String, Object> t2 = (Tuple2<String, Object>) obj;
			in(t2._1(), t2._2());
		} else {
			throw new MLContextException("Only Tuples of 2 or 3 values are permitted");
		}
	}
	return this;
}
 
Example 2
Source File: KafkaAssignmentGenerator.java    From kafka-assigner with Apache License 2.0 6 votes vote down vote up
private static Set<Integer> brokerHostnamesToBrokerIds(
        ZkUtils zkUtils, Set<String> brokerHostnameSet, boolean checkPresence) {
    List<Broker> brokers = JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster());
    Set<Integer> brokerIdSet = Sets.newHashSet();
    for (Broker broker : brokers) {
        BrokerEndPoint endpoint = broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT);
        if (brokerHostnameSet.contains(endpoint.host())) {
            brokerIdSet.add(broker.id());
        }
    }
    Preconditions.checkArgument(!checkPresence ||
            brokerHostnameSet.size() == brokerIdSet.size(),
            "Some hostnames could not be found! We found: " + brokerIdSet);

    return brokerIdSet;
}
 
Example 3
Source File: KafkaHubServiceImpl.java    From kafka-eagle with Apache License 2.0 6 votes vote down vote up
private File createKafkaTempJson(Map<TopicPartition, Seq<Object>> tuple) throws IOException {
	JSONObject object = new JSONObject();
	object.put("version", 1);
	JSONArray array = new JSONArray();
	for (Entry<TopicPartition, Seq<Object>> entry : JavaConversions.mapAsJavaMap(tuple).entrySet()) {
		List<Object> replicas = JavaConversions.seqAsJavaList(entry.getValue());
		JSONObject tpObject = new JSONObject();
		tpObject.put("topic", entry.getKey().topic());
		tpObject.put("partition", entry.getKey().partition());
		tpObject.put("replicas", replicas);
		array.add(tpObject);
	}
	object.put("partitions", array);
	File f = File.createTempFile("ke_reassignment_", ".json");
	FileWriter out = new FileWriter(f);
	out.write(object.toJSONString());
	out.close();
	f.deleteOnExit();
	return f;
}
 
Example 4
Source File: Script.java    From systemds with Apache License 2.0 6 votes vote down vote up
/**
 * Pass a Scala Seq of inputs to the script. The inputs are either two-value
 * or three-value tuples, where the first value is the variable name, the
 * second value is the variable value, and the third optional value is the
 * metadata.
 *
 * @param inputs
 *            Scala Seq of inputs (parameters ($) and variables).
 * @return {@code this} Script object to allow chaining of methods
 */
public Script in(scala.collection.Seq<Object> inputs) {
	List<Object> list = JavaConversions.seqAsJavaList(inputs);
	for (Object obj : list) {
		if (obj instanceof Tuple3) {
			@SuppressWarnings("unchecked")
			Tuple3<String, Object, MatrixMetadata> t3 = (Tuple3<String, Object, MatrixMetadata>) obj;
			in(t3._1(), t3._2(), t3._3());
		} else if (obj instanceof Tuple2) {
			@SuppressWarnings("unchecked")
			Tuple2<String, Object> t2 = (Tuple2<String, Object>) obj;
			in(t2._1(), t2._2());
		} else {
			throw new MLContextException("Only Tuples of 2 or 3 values are permitted");
		}
	}
	return this;
}
 
Example 5
Source File: BrokerServiceImpl.java    From kafka-eagle with Apache License 2.0 6 votes vote down vote up
/** Get broker id list. */
public List<Object> getBrokerIdList(String clusterAlias) {
	List<Object> brokerIds = new ArrayList<>();
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	try {
		if (zkc.pathExists(BROKER_IDS_PATH)) {
			Seq<String> subBrokerIdsPaths = zkc.getChildren(BROKER_IDS_PATH);
			for (String id : JavaConversions.seqAsJavaList(subBrokerIdsPaths)) {
				brokerIds.add(Integer.parseInt(id));
			}
		}
	} catch (Exception e) {
		LOG.error("Get kafka broker id has error, msg is ", e);
		e.printStackTrace();
	}
	if (zkc != null) {
		kafkaZKPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return brokerIds;
}
 
Example 6
Source File: OffsetGetter.java    From kmanager with Apache License 2.0 6 votes vote down vote up
public List<OffsetInfo> processTopic(String group, String topic) throws Exception {
	List<String> partitionIds = null;
	try {
		partitionIds = JavaConversions.seqAsJavaList(ZKUtils.getZKUtilsFromKafka()
				.getChildren(ZkUtils.BrokerTopicsPath() + "/" + topic + "/partitions"));
	} catch (Exception e) {
		if (e instanceof NoNodeException) {
			LOG.warn("Is topic >" + topic + "< exists!", e);
			return null;
		}
	}
	List<OffsetInfo> offsetInfos = new ArrayList<OffsetInfo>();
	OffsetInfo offsetInfo = null;
	if (partitionIds == null) {
		// TODO that topic exists in consumer node but not in topics node?!
		return null;
	}

	for (String partitionId : partitionIds) {
		offsetInfo = processPartition(group, topic, partitionId);
		if (offsetInfo != null) {
			offsetInfos.add(offsetInfo);
		}
	}
	return offsetInfos;
}
 
Example 7
Source File: KafkaAssignmentGenerator.java    From kafka-assigner with Apache License 2.0 6 votes vote down vote up
private static void printCurrentBrokers(ZkUtils zkUtils) throws JSONException {
    List<Broker> brokers = JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster());
    JSONArray json = new JSONArray();
    for (Broker broker : brokers) {
        BrokerEndPoint endpoint = broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT);
        JSONObject brokerJson = new JSONObject();
        brokerJson.put("id", broker.id());
        brokerJson.put("host", endpoint.host());
        brokerJson.put("port", endpoint.port());
        if (broker.rack().isDefined()) {
            brokerJson.put("rack", broker.rack().get());
        }
        json.put(brokerJson);
    }
    System.out.println("CURRENT BROKERS:");
    System.out.println(json.toString());
}
 
Example 8
Source File: ScroogeStructConverter.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
/**
 * In composite types,  such as the type of the key in a map, since we use reflection to get the type class, this method
 * does conversion based on the class provided.
 *
 * @return converted ThriftType
 */
private ThriftType convertClassToThriftType(String name, Requirement requirement, Manifest<?> typeManifest) {
  Class typeClass = typeManifest.runtimeClass();
  if (typeManifest.runtimeClass() == boolean.class) {
    return new ThriftType.BoolType();
  } else if (typeClass == byte.class) {
    return new ThriftType.ByteType();
  } else if (typeClass == double.class) {
    return new ThriftType.DoubleType();
  } else if (typeClass == short.class) {
    return new ThriftType.I16Type();
  } else if (typeClass == int.class) {
    return new ThriftType.I32Type();
  } else if (typeClass == long.class) {
    return new ThriftType.I64Type();
  } else if (typeClass == String.class) {
    return new ThriftType.StringType();
  } else if (typeClass == ByteBuffer.class) {
    return new ThriftType.StringType();
  } else if (typeClass == scala.collection.Seq.class) {
    Manifest<?> a = typeManifest.typeArguments().apply(0);
    return convertListTypeField(name, a, requirement);
  } else if (typeClass == scala.collection.Set.class) {
    Manifest<?> setElementManifest = typeManifest.typeArguments().apply(0);
    return convertSetTypeField(name, setElementManifest, requirement);
  } else if (typeClass == scala.collection.Map.class) {
    List<Manifest<?>> ms = JavaConversions.seqAsJavaList(typeManifest.typeArguments());
    Manifest keyManifest = ms.get(0);
    Manifest valueManifest = ms.get(1);
    return convertMapTypeField(name, keyManifest, valueManifest, requirement);
  } else if (com.twitter.scrooge.ThriftEnum.class.isAssignableFrom(typeClass)) {
    return convertEnumTypeField(typeClass, name);
  } else {
    return convertStructFromClass(typeClass);
  }
}
 
Example 9
Source File: OffsetGetter.java    From kmanager with Apache License 2.0 5 votes vote down vote up
public Node getClusterViz() {
	Node rootNode = new Node("KafkaCluster");
	List<Node> childNodes = new ArrayList<Node>();
	List<Broker> brokers = JavaConversions.seqAsJavaList(ZKUtils.getZKUtilsFromKafka().getAllBrokersInCluster());
	brokers.forEach(broker -> {
		List<EndPoint> endPoints = JavaConversions.seqAsJavaList(broker.endPoints().seq());
		childNodes.add(new Node(broker.id() + ":" + endPoints.get(0).host() + ":" + endPoints.get(0).port(), null));
	});
	rootNode.setChildren(childNodes);
	return rootNode;
}
 
Example 10
Source File: PrecisionMetric.java    From predictionio-template-java-ecom-recommender with Apache License 2.0 5 votes vote down vote up
@Override
public Double calculate(SparkContext sc, Seq<Tuple2<EmptyParams, RDD<Tuple3<Query, PredictedResult, Set<String>>>>> qpas) {
    List<Tuple2<EmptyParams, RDD<Tuple3<Query, PredictedResult, Set<String>>>>> sets = JavaConversions.seqAsJavaList(qpas);
    List<Double> allSetResults = new ArrayList<>();

    for (Tuple2<EmptyParams, RDD<Tuple3<Query, PredictedResult, Set<String>>>> set : sets) {
        List<Double> setResults = set._2().toJavaRDD().map(new Function<Tuple3<Query, PredictedResult, Set<String>>, Double>() {
            @Override
            public Double call(Tuple3<Query, PredictedResult, Set<String>> qpa) throws Exception {
                Set<String> predicted = new HashSet<>();
                for (ItemScore itemScore : qpa._2().getItemScores()) {
                    predicted.add(itemScore.getItemEntityId());
                }
                Set<String> intersection = new HashSet<>(predicted);
                intersection.retainAll(qpa._3());

                return 1.0 * intersection.size() / qpa._2().getItemScores().size();
            }
        }).collect();

        allSetResults.addAll(setResults);
    }
    double sum = 0.0;
    for (Double value : allSetResults) sum += value;

    return sum / allSetResults.size();
}
 
Example 11
Source File: TestKafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 5 votes vote down vote up
public List<String> findTopicPartition(String clusterAlias, String topic) {
	KafkaZkClient zkc = zkPool.getZkClient(clusterAlias);
	Seq<String> brokerTopicsPaths = zkc.getChildren(BROKER_TOPICS_PATH + "/" + topic + "/partitions");
	List<String> topicAndPartitions = JavaConversions.seqAsJavaList(brokerTopicsPaths);
	if (zkc != null) {
		zkPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return topicAndPartitions;
}
 
Example 12
Source File: KafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 5 votes vote down vote up
/**
 * According to topic and partition to obtain Replicas & Isr.
 */
public String getReplicasIsr(String clusterAlias, String topic, int partitionid) {
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	TopicPartition tp = new TopicPartition(topic, partitionid);
	Seq<Object> replis = zkc.getReplicasForPartition(tp);
	List<Object> targets = JavaConversions.seqAsJavaList(replis);
	if (zkc != null) {
		kafkaZKPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return targets.toString();
}
 
Example 13
Source File: KafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 5 votes vote down vote up
/**
 * Obtaining kafka consumer information from zookeeper.
 */
public Map<String, List<String>> getConsumers(String clusterAlias) {
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	Map<String, List<String>> consumers = new HashMap<String, List<String>>();
	try {
		Seq<String> subConsumerPaths = zkc.getChildren(CONSUMERS_PATH);
		List<String> groups = JavaConversions.seqAsJavaList(subConsumerPaths);
		for (String group : groups) {
			String path = CONSUMERS_PATH + "/" + group + "/owners";
			if (zkc.pathExists(path)) {
				Seq<String> owners = zkc.getChildren(path);
				List<String> ownersSerialize = JavaConversions.seqAsJavaList(owners);
				consumers.put(group, ownersSerialize);
			} else {
				LOG.error("Consumer Path[" + path + "] is not exist.");
			}
		}
	} catch (Exception ex) {
		LOG.error(ex.getMessage());
	} finally {
		if (zkc != null) {
			kafkaZKPool.release(clusterAlias, zkc);
			zkc = null;
		}
	}
	return consumers;
}
 
Example 14
Source File: RDFUpdate.java    From oryx with Apache License 2.0 5 votes vote down vote up
private Predicate buildPredicate(Split split,
                                 CategoricalValueEncodings categoricalValueEncodings) {
  if (split == null) {
    // Left child always applies, but is evaluated second
    return new True();
  }

  int featureIndex = inputSchema.predictorToFeatureIndex(split.feature());
  FieldName fieldName = FieldName.create(inputSchema.getFeatureNames().get(featureIndex));

  if (split.featureType().equals(FeatureType.Categorical())) {
    // Note that categories in MLlib model select the *left* child but the
    // convention here will be that the predicate selects the *right* child
    // So the predicate will evaluate "not in" this set
    // More ugly casting
    @SuppressWarnings("unchecked")
    Collection<Double> javaCategories = (Collection<Double>) (Collection<?>)
        JavaConversions.seqAsJavaList(split.categories());
    Set<Integer> negativeEncodings = javaCategories.stream().map(Double::intValue).collect(Collectors.toSet());

    Map<Integer,String> encodingToValue =
        categoricalValueEncodings.getEncodingValueMap(featureIndex);
    List<String> negativeValues = negativeEncodings.stream().map(encodingToValue::get).collect(Collectors.toList());

    String joinedValues = TextUtils.joinPMMLDelimited(negativeValues);
    return new SimpleSetPredicate(fieldName,
                                  SimpleSetPredicate.BooleanOperator.IS_NOT_IN,
                                  new Array(Array.Type.STRING, joinedValues));

  } else {
    // For MLlib, left means <= threshold, so right means >
    return new SimplePredicate(fieldName,
        SimplePredicate.Operator.GREATER_THAN,
        Double.toString(split.threshold()));
  }
}
 
Example 15
Source File: ResourceDetectBeforeMergingJob.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
@Override
protected void doExecute() throws Exception {
    logger.info("Start detect resource before merge.");
    String cubeId = getParam(MetadataConstants.P_CUBE_ID);

    final CubeManager cubeManager = CubeManager.getInstance(config);
    final CubeInstance cube = cubeManager.getCubeByUuid(cubeId);
    final CubeSegment mergedSeg = cube.getSegmentById(getParam(MetadataConstants.P_SEGMENT_IDS));
    final SegmentInfo mergedSegInfo = MetadataConverter.getSegmentInfo(cube, mergedSeg.getUuid(),
            mergedSeg.getName(), mergedSeg.getStorageLocationIdentifier());
    final List<CubeSegment> mergingSegments = cube.getMergingSegments(mergedSeg);
    final List<SegmentInfo> segmentInfos = Lists.newArrayList();
    Collections.sort(mergingSegments);
    for (CubeSegment cubeSegment : mergingSegments) {
        segmentInfos.add(MetadataConverter.getSegmentInfo(cube, cubeSegment.getUuid(), cubeSegment.getName(),
                cubeSegment.getStorageLocationIdentifier()));
    }
    infos.clearMergingSegments();
    infos.recordMergingSegments(segmentInfos);
    Map<Long, DFLayoutMergeAssist> mergeCuboidsAssist = CubeMergeJob.generateMergeAssist(segmentInfos, ss);
    ResourceDetectUtils.write(
            new Path(config.getJobTmpShareDir(project, jobId), ResourceDetectUtils.countDistinctSuffix()),
            ResourceDetectUtils
                    .findCountDistinctMeasure(JavaConversions.asJavaCollection(mergedSegInfo.toBuildLayouts())));
    Map<String, List<String>> resourcePaths = Maps.newHashMap();
    infos.clearSparkPlans();
    for (Map.Entry<Long, DFLayoutMergeAssist> entry : mergeCuboidsAssist.entrySet()) {
        Dataset<Row> afterMerge = entry.getValue().merge(config, getParam(MetadataConstants.P_CUBE_NAME));
        infos.recordSparkPlan(afterMerge.queryExecution().sparkPlan());
        List<Path> paths = JavaConversions
                .seqAsJavaList(ResourceDetectUtils.getPaths(afterMerge.queryExecution().sparkPlan()));
        List<String> pathStrs = paths.stream().map(Path::toString).collect(Collectors.toList());
        resourcePaths.put(String.valueOf(entry.getKey()), pathStrs);
    }
    ResourceDetectUtils.write(new Path(config.getJobTmpShareDir(project, jobId),
            mergedSeg.getUuid() + "_" + ResourceDetectUtils.fileName()), resourcePaths);
}
 
Example 16
Source File: ZKUtils.java    From kmanager with Apache License 2.0 4 votes vote down vote up
public static List<String> getAllTopics() {
	return JavaConversions.seqAsJavaList(zkUtilsFromKafka.getAllTopics());
}
 
Example 17
Source File: KafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 4 votes vote down vote up
/**
 * Get all broker list from zookeeper.
 */
public List<BrokersInfo> getAllBrokersInfo(String clusterAlias) {
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	List<BrokersInfo> targets = new ArrayList<BrokersInfo>();
	if (zkc.pathExists(BROKER_IDS_PATH)) {
		Seq<String> subBrokerIdsPaths = zkc.getChildren(BROKER_IDS_PATH);
		List<String> brokerIdss = JavaConversions.seqAsJavaList(subBrokerIdsPaths);
		int id = 0;
		for (String ids : brokerIdss) {
			try {
				Tuple2<Option<byte[]>, Stat> tuple = zkc.getDataAndStat(BROKER_IDS_PATH + "/" + ids);
				BrokersInfo broker = new BrokersInfo();
				broker.setCreated(CalendarUtils.convertUnixTime2Date(tuple._2.getCtime()));
				broker.setModify(CalendarUtils.convertUnixTime2Date(tuple._2.getMtime()));
				String tupleString = new String(tuple._1.get());
				if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.sasl.enable") || SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.ssl.enable")) {
					String endpoints = JSON.parseObject(tupleString).getString("endpoints");
					String tmp = endpoints.split("//")[1];
					broker.setHost(tmp.substring(0, tmp.length() - 2).split(":")[0]);
					broker.setPort(Integer.valueOf(tmp.substring(0, tmp.length() - 2).split(":")[1]));
				} else {
					String host = JSON.parseObject(tupleString).getString("host");
					int port = JSON.parseObject(tupleString).getInteger("port");
					broker.setHost(host);
					broker.setPort(port);
				}
				broker.setJmxPort(JSON.parseObject(tupleString).getInteger("jmx_port"));
				broker.setId(++id);
				broker.setIds(ids);
				targets.add(broker);
			} catch (Exception ex) {
				LOG.error(ex.getMessage());
			}
		}
	}
	if (zkc != null) {
		kafkaZKPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return targets;
}
 
Example 18
Source File: KafkaAssignmentGenerator.java    From kafka-assigner with Apache License 2.0 4 votes vote down vote up
private static void printLeastDisruptiveReassignment(
        ZkUtils zkUtils, List<String> specifiedTopics, Set<Integer> specifiedBrokers,
        Set<Integer> excludedBrokers, Map<Integer, String> rackAssignment, int desiredReplicationFactor)
        throws JSONException {
    // We need three inputs for rebalacing: the brokers, the topics, and the current assignment
    // of topics to brokers.
    Set<Integer> brokerSet = specifiedBrokers;
    if (brokerSet == null || brokerSet.isEmpty()) {
        brokerSet = Sets.newHashSet(Lists.transform(
                JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster()),
                new Function<Broker, Integer>() {
                    @Override
                    public Integer apply(Broker broker) {
                        return broker.id();
                    }
                }));
    }

    // Exclude brokers that we want to decommission
    Set<Integer> brokers = Sets.difference(brokerSet, excludedBrokers);
    rackAssignment.keySet().retainAll(brokers);

    // The most common use case is to rebalance all topics, but explicit topic addition is also
    // supported.
    Seq<String> topics = specifiedTopics != null ?
            JavaConversions.collectionAsScalaIterable(specifiedTopics).toSeq() :
            zkUtils.getAllTopics();

    // Print the current assignment in case a rollback is needed
    printCurrentAssignment(zkUtils, JavaConversions.seqAsJavaList(topics));

    Map<String, Map<Integer, List<Integer>>> initialAssignments =
            KafkaTopicAssigner.topicMapToJavaMap(zkUtils.getPartitionAssignmentForTopics(
                    topics));

    // Assign topics one at a time. This is slightly suboptimal from a packing standpoint, but
    // it's close enough to work in practice. We can also always follow it up with a Kafka
    // leader election rebalance if necessary.
    JSONObject json = new JSONObject();
    json.put("version", KAFKA_FORMAT_VERSION);
    JSONArray partitionsJson = new JSONArray();
    KafkaTopicAssigner assigner = new KafkaTopicAssigner();
    for (String topic : JavaConversions.seqAsJavaList(topics)) {
        Map<Integer, List<Integer>> partitionAssignment = initialAssignments.get(topic);
        Map<Integer, List<Integer>> finalAssignment = assigner.generateAssignment(
                topic, partitionAssignment, brokers, rackAssignment, desiredReplicationFactor);
        for (Map.Entry<Integer, List<Integer>> e : finalAssignment.entrySet()) {
            JSONObject partitionJson = new JSONObject();
            partitionJson.put("topic", topic);
            partitionJson.put("partition", e.getKey());
            partitionJson.put("replicas", new JSONArray(e.getValue()));
            partitionsJson.put(partitionJson);
        }
    }
    json.put("partitions", partitionsJson);
    System.out.println("NEW ASSIGNMENT:\n" + json.toString());
}
 
Example 19
Source File: ZKUtils.java    From kmanager with Apache License 2.0 2 votes vote down vote up
public static List<String> getConsumersInGroup(String group) {
	return JavaConversions.seqAsJavaList(zkUtilsFromKafka.getConsumersInGroup(group));

}
 
Example 20
Source File: Script.java    From systemds with Apache License 2.0 2 votes vote down vote up
/**
 * Register output variables.
 *
 * @param outputNames
 *            names of the output variables
 * @return {@code this} Script object to allow chaining of methods
 */
public Script out(scala.collection.Seq<String> outputNames) {
	List<String> list = JavaConversions.seqAsJavaList(outputNames);
	outputVariables.addAll(list);
	return this;
}