Java Code Examples for scala.collection.Seq

The following examples show how to use scala.collection.Seq. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: Flink-CEPplus   Source File: CassandraConnectorITCase.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testCassandraScalaTupleAtLeastOnceSinkBuilderDetection() throws Exception {
	Class<scala.Tuple1<String>> c = (Class<scala.Tuple1<String>>) new scala.Tuple1<>("hello").getClass();
	Seq<TypeInformation<?>> typeInfos = JavaConverters.asScalaBufferConverter(
		Collections.<TypeInformation<?>>singletonList(BasicTypeInfo.STRING_TYPE_INFO)).asScala();
	Seq<String> fieldNames = JavaConverters.asScalaBufferConverter(
		Collections.singletonList("_1")).asScala();

	CaseClassTypeInfo<scala.Tuple1<String>> typeInfo = new CaseClassTypeInfo<scala.Tuple1<String>>(c, null, typeInfos, fieldNames) {
		@Override
		public TypeSerializer<scala.Tuple1<String>> createSerializer(ExecutionConfig config) {
			return null;
		}
	};

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<scala.Tuple1<String>> input = env.fromElements(new scala.Tuple1<>("hello")).returns(typeInfo);

	CassandraSink.CassandraSinkBuilder<scala.Tuple1<String>> sinkBuilder = CassandraSink.addSink(input);
	assertTrue(sinkBuilder instanceof CassandraSink.CassandraScalaProductSinkBuilder);
}
 
Example 2
Source Project: Flink-CEPplus   Source File: KafkaTestEnvironmentImpl.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public int getLeaderToShutDown(String topic) throws Exception {
	ZkClient zkClient = createZkClient();
	PartitionMetadata firstPart = null;
	do {
		if (firstPart != null) {
			LOG.info("Unable to find leader. error code {}", firstPart.errorCode());
			// not the first try. Sleep a bit
			Thread.sleep(150);
		}

		Seq<PartitionMetadata> partitionMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkClient).partitionsMetadata();
		firstPart = partitionMetadata.head();
	}
	while (firstPart.errorCode() != 0);
	zkClient.close();

	return firstPart.leader().get().id();
}
 
Example 3
Source Project: DDMQ   Source File: KafkaProduceOffsetFetcher.java    License: Apache License 2.0 6 votes vote down vote up
private Map<Integer, Map<String, List<Integer>>> parseMetadataResponse(TopicMetadataResponse response) {
    Map<Integer/*broker id*/, Map<String/*topic*/, List<Integer>/*partition id*/>> metadata = Maps.newHashMap();
    Seq<TopicMetadata> topicMetadatas = response.topicsMetadata();
    for (TopicMetadata topicMetadata : JavaConverters.asJavaListConverter(topicMetadatas).asJava()) {
        List<PartitionMetadata> partitionsMetadata = JavaConverters.asJavaListConverter(topicMetadata.partitionsMetadata()).asJava();
        String topic = topicMetadata.topic();
        for (PartitionMetadata partitionMetadata : partitionsMetadata) {
            int partitionId = partitionMetadata.partitionId();
            int brokerId = partitionMetadata.leader().get().id();
            if (!metadata.containsKey(brokerId)) {
                metadata.put(brokerId, Maps.newHashMap());
            }
            if (!metadata.get(brokerId).containsKey(topic)) {
                metadata.get(brokerId).put(topic, Lists.newArrayList());
            }
            metadata.get(brokerId).get(topic).add(partitionId);
        }
    }
    return metadata;
}
 
Example 4
private Seq<KoreanToken> convertPhrasesToTokens(Seq<KoreanPhrase> phrases) {
    KoreanToken[] tokens = new KoreanToken[phrases.length()];

    Iterator<KoreanPhrase> iterator = phrases.iterator();
    int i = 0;
    while (iterator.hasNext()) {
        KoreanPhrase phrase = iterator.next();
        tokens[i++] = new KoreanToken(phrase.text(), phrase.pos(), phrase.offset(), phrase.length(), scala.Option.apply(null), false);
    }

    Arrays.sort(tokens, (o1, o2) -> {
        if(o1.offset()== o2.offset())
            return 0;
        return o1.offset()< o2.offset()? -1 : 1;
    });

    return JavaConverters.asScalaBuffer(Arrays.asList(tokens)).toSeq();
}
 
Example 5
Source Project: flink   Source File: CassandraConnectorITCase.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testCassandraScalaTupleAtLeastOnceSinkBuilderDetection() throws Exception {
	Class<scala.Tuple1<String>> c = (Class<scala.Tuple1<String>>) new scala.Tuple1<>("hello").getClass();
	Seq<TypeInformation<?>> typeInfos = JavaConverters.asScalaBufferConverter(
		Collections.<TypeInformation<?>>singletonList(BasicTypeInfo.STRING_TYPE_INFO)).asScala();
	Seq<String> fieldNames = JavaConverters.asScalaBufferConverter(
		Collections.singletonList("_1")).asScala();

	CaseClassTypeInfo<scala.Tuple1<String>> typeInfo = new CaseClassTypeInfo<scala.Tuple1<String>>(c, null, typeInfos, fieldNames) {
		@Override
		public TypeSerializer<scala.Tuple1<String>> createSerializer(ExecutionConfig config) {
			return null;
		}
	};

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<scala.Tuple1<String>> input = env.fromElements(new scala.Tuple1<>("hello")).returns(typeInfo);

	CassandraSink.CassandraSinkBuilder<scala.Tuple1<String>> sinkBuilder = CassandraSink.addSink(input);
	assertTrue(sinkBuilder instanceof CassandraSink.CassandraScalaProductSinkBuilder);
}
 
Example 6
Source Project: kafka-eagle   Source File: BrokerServiceImpl.java    License: Apache License 2.0 6 votes vote down vote up
/** Get kafka broker numbers from zookeeper. */
public long brokerNumbers(String clusterAlias) {
	long count = 0;
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	try {
		if (zkc.pathExists(BROKER_IDS_PATH)) {
			Seq<String> subBrokerIdsPaths = zkc.getChildren(BROKER_IDS_PATH);
			count = JavaConversions.seqAsJavaList(subBrokerIdsPaths).size();
		}
	} catch (Exception e) {
		LOG.error("Get kafka broker numbers has error, msg is " + e.getCause().getMessage());
		e.printStackTrace();
	}
	if (zkc != null) {
		kafkaZKPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return count;
}
 
Example 7
Source Project: kafka-eagle   Source File: BrokerServiceImpl.java    License: Apache License 2.0 6 votes vote down vote up
/** Get broker id list. */
public List<Object> getBrokerIdList(String clusterAlias) {
	List<Object> brokerIds = new ArrayList<>();
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	try {
		if (zkc.pathExists(BROKER_IDS_PATH)) {
			Seq<String> subBrokerIdsPaths = zkc.getChildren(BROKER_IDS_PATH);
			for (String id : JavaConversions.seqAsJavaList(subBrokerIdsPaths)) {
				brokerIds.add(Integer.parseInt(id));
			}
		}
	} catch (Exception e) {
		LOG.error("Get kafka broker id has error, msg is ", e);
		e.printStackTrace();
	}
	if (zkc != null) {
		kafkaZKPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return brokerIds;
}
 
Example 8
Source Project: doctorkafka   Source File: KafkaClusterManager.java    License: Apache License 2.0 6 votes vote down vote up
private scala.collection.Map<Object, Seq<Object>> getReplicaAssignmentForTopic(
    ZkUtils zkUtils, String topic) {
  if (topicPartitionAssignments.containsKey(topic)) {
    return topicPartitionAssignments.get(topic);
  }
  List<String> topics = new ArrayList<>();
  topics.add(topic);
  Seq<String> topicsSeq = scala.collection.JavaConverters.asScalaBuffer(topics).toSeq();

  scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> assignments;
  assignments = zkUtils.getPartitionAssignmentForTopics(topicsSeq);

  scala.collection.Map<Object, Seq<Object>> partitionAssignment = assignments.get(topic).get();
  topicPartitionAssignments.put(topic, partitionAssignment);
  return partitionAssignment;
}
 
Example 9
Source Project: doctorkafka   Source File: KafkaClusterManager.java    License: Apache License 2.0 6 votes vote down vote up
private scala.collection.Map<TopicAndPartition, Seq<Object>> getAssignmentPlan(
    Map<TopicPartition, Integer[]> replicasMap) {
  scala.collection.mutable.HashMap<TopicAndPartition, Seq<Object>> result =
      new scala.collection.mutable.HashMap<>();

  for (Map.Entry<TopicPartition, Integer[]> entry : replicasMap.entrySet()) {
    TopicPartition tp = entry.getKey();
    TopicAndPartition tap = new TopicAndPartition(tp.topic(), tp.partition());
    List<Object> objs = Arrays.asList(entry.getValue()).stream()
        .map(val -> (Object) val).collect(Collectors.toList());
    Seq<Object> replicas = JavaConverters.asScalaBuffer(objs).seq();
    result.put(tap, replicas);
  }

  assert replicasMap.size() == result.size();
  LOG.debug("replicaMap.size = {}, result.size = {}", replicasMap.size(), result.size());
  return result;
}
 
Example 10
Source Project: DCMonitor   Source File: KafkaInfos.java    License: MIT License 6 votes vote down vote up
public List<PartitionInfo> getPartitionInfos(String group, String topic) {
  Seq<String> singleTopic = JavaConversions.asScalaBuffer(Collections.singletonList(topic)).toSeq();
  scala.collection.Map<String, Seq<Object>> pidMap = ZkUtils.getPartitionsForTopics(zkClient, singleTopic);
  Option<Seq<Object>> partitions = pidMap.get(topic);
  if (partitions.get() == null) {
    return Collections.emptyList();
  }
  List<PartitionInfo> infos = Lists.newArrayList();
  for (Object o : JavaConversions.asJavaList(partitions.get())) {
    PartitionInfo info = getPartitionInfo(group, topic, Int.unbox(o));
    if (info != null) {
      infos.add(info);
    }
  }
  return infos;
}
 
Example 11
/**
 * @param topic Kafka topic
 * @param partitionsToReassign a map from partition (int) to new replica list (int seq)
 *
 * @return a json string with the same format as output of kafka.utils.ZkUtils.formatAsReassignmentJson
 *
 * Example:
 * <pre>
 *   {"version":1,"partitions":[
 *     {"topic":"kmf-topic","partition":1,"replicas":[0,1]},
 *     {"topic":"kmf-topic","partition":2,"replicas":[1,2]},
 *     {"topic":"kmf-topic","partition":0,"replicas":[2,0]}]}
 * </pre>
 */
private static String formatAsNewReassignmentJson(String topic, scala.collection.Map<Object, Seq<Object>> partitionsToReassign) {
  StringBuilder builder = new StringBuilder();
  builder.append("{\"version\":1,\"partitions\":[\n");
  for (int partition = 0; partition < partitionsToReassign.size(); partition++) {
    builder.append("  {\"topic\":\"").append(topic).append("\",\"partition\":").append(partition).append(",\"replicas\":[");
    Seq<Object> replicas = partitionsToReassign.apply(partition);
    for (int replicaIndex = 0; replicaIndex < replicas.size(); replicaIndex++) {
      Object replica = replicas.apply(replicaIndex);
      builder.append(replica).append(",");
    }
    builder.setLength(builder.length() - 1);
    builder.append("]},\n");
  }
  builder.setLength(builder.length() - 2);
  builder.append("]}");
  return builder.toString();
}
 
Example 12
private void writeReports( Coverage coverage, List<File> sourceRoots, File coberturaXmlOutputDirectory,
                           File scoverageXmlOutputDirectory, File scoverageHtmlOutputDirectory )
{
    Seq<File> sourceRootsAsScalaSeq = JavaConverters.asScalaBuffer( sourceRoots );

    new CoberturaXmlWriter( sourceRootsAsScalaSeq, coberturaXmlOutputDirectory ).write( coverage );
    getLog().info( String.format( "Written Cobertura XML report [%s]",
                                  new File( coberturaXmlOutputDirectory, "cobertura.xml" ).getAbsolutePath() ) );

    new ScoverageXmlWriter( sourceRootsAsScalaSeq, scoverageXmlOutputDirectory, false ).write( coverage );
    getLog().info( String.format( "Written XML coverage report [%s]",
                                  new File( scoverageXmlOutputDirectory, "scoverage.xml" ).getAbsolutePath() ) );

    new ScoverageHtmlWriter( sourceRootsAsScalaSeq, scoverageHtmlOutputDirectory, Option.<String>apply( encoding ) ).write( coverage );
    getLog().info( String.format( "Written HTML coverage report [%s]",
                                  new File( scoverageHtmlOutputDirectory, "index.html" ).getAbsolutePath() ) );

    getLog().info( String.format( "Statement coverage.: %s%%", coverage.statementCoverageFormatted() ) );
    getLog().info( String.format( "Branch coverage....: %s%%", coverage.branchCoverageFormatted() ) );
}
 
Example 13
Source Project: uReplicator   Source File: KafkaBrokerTopicObserver.java    License: Apache License 2.0 6 votes vote down vote up
private void tryAddTopic(String topic) {
  scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignmentForTopics =
      _zkUtils.getPartitionAssignmentForTopics(
          JavaConversions.asScalaBuffer(ImmutableList.of(topic)));
  if (partitionAssignmentForTopics.get(topic).isEmpty()
      || partitionAssignmentForTopics.get(topic).get().size() == 0) {
    LOGGER.debug("try to refresh for topic {} but found no topic partition for it", topic);
    return;
  }
  synchronized (_lock) {
    LOGGER.info("starting to refresh for adding topic {}", topic);
    if (!getAllTopics().contains(topic)) {
      try {
        _topicPartitionInfoMap.put(topic, new TopicPartition(topic,
            partitionAssignmentForTopics.get(topic).get().size()));
      } catch (Exception e) {
        LOGGER.warn("Failed to get topicPartition info for {} from kafka zk: {}", topic, e);
      }
    }
    LOGGER.info("finished refreshing for adding topic {}", topic);
  }
}
 
Example 14
Source Project: uReplicator   Source File: TopicPartitionCountObserver.java    License: Apache License 2.0 6 votes vote down vote up
private void updateTopicPartitionInfoMap(final Set<String> topicsToCheck) {
  if (topicsToCheck.size() > 0) {
    // get topic partition count and maybe update partition counts for existing topics
    scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignmentForTopics =
        zkUtils.getPartitionAssignmentForTopics(JavaConversions.asScalaBuffer(ImmutableList.copyOf(topicsToCheck)));

    for (String topic : topicsToCheck) {
      try {
        topicPartitionMap.put(topic, partitionAssignmentForTopics.get(topic).get().size());
      } catch (Exception e) {
        logger.warn("Failed to get topicPartition info for topic={} of zkPath={}",
            topic, zkPath, e);
      }
    }
  }
}
 
Example 15
Source Project: Scribengin   Source File: KafkaTool.java    License: GNU Affero General Public License v3.0 5 votes vote down vote up
public boolean reassignPartitionReplicas(String topic, int partition, Integer ... brokerId) {
  ZkClient client = new ZkClient(zkConnects, 10000, 10000, ZKStringSerializer$.MODULE$);
  TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);

  Buffer<Object> seqs = asScalaBuffer(Arrays.asList((Object[])brokerId));
  Map<TopicAndPartition, Seq<Object>> map = new HashMap<>();
  map.put(topicAndPartition, seqs);
  ReassignPartitionsCommand command = new ReassignPartitionsCommand(client, asScalaMap(map));
  return command.reassignPartitions();
}
 
Example 16
@Override
public Double calculate(SparkContext sc, Seq<Tuple2<EmptyParams, RDD<Tuple3<Query, PredictedResult, Set<String>>>>> qpas) {
    List<Tuple2<EmptyParams, RDD<Tuple3<Query, PredictedResult, Set<String>>>>> sets = JavaConversions.seqAsJavaList(qpas);
    List<Double> allSetResults = new ArrayList<>();

    for (Tuple2<EmptyParams, RDD<Tuple3<Query, PredictedResult, Set<String>>>> set : sets) {
        List<Double> setResults = set._2().toJavaRDD().map(new Function<Tuple3<Query, PredictedResult, Set<String>>, Double>() {
            @Override
            public Double call(Tuple3<Query, PredictedResult, Set<String>> qpa) throws Exception {
                Set<String> predicted = new HashSet<>();
                for (ItemScore itemScore : qpa._2().getItemScores()) {
                    predicted.add(itemScore.getItemEntityId());
                }
                Set<String> intersection = new HashSet<>(predicted);
                intersection.retainAll(qpa._3());

                return 1.0 * intersection.size() / qpa._2().getItemScores().size();
            }
        }).collect();

        allSetResults.addAll(setResults);
    }
    double sum = 0.0;
    for (Double value : allSetResults) sum += value;

    return sum / allSetResults.size();
}
 
Example 17
Source Project: DDMQ   Source File: KafkaProduceOffsetFetcher.java    License: Apache License 2.0 5 votes vote down vote up
public Map<Integer, Map<String, List<Integer>>> getMetadata(Set<String> topics) {
    if (CollectionUtils.isEmpty(topics)) {
        return Collections.emptyMap();
    }
    Seq<Broker> brokers = ZkUtils.getAllBrokersInCluster(zkClient);
    for (Broker broker : JavaConverters.asJavaListConverter(brokers).asJava()) {
        brokerIndex.put(broker.id(), broker);
    }
    TopicMetadataResponse response = ClientUtils.fetchTopicMetadata(JavaConverters.asScalaSetConverter(topics).asScala(), brokers, "GetMetadataClient", 10000, 0);
    return parseMetadataResponse(response);
}
 
Example 18
Source Project: systemds   Source File: MLContextTest.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testInputTupleSeqNoMetadataDML() {
	System.out.println("MLContextTest - Tuple sequence no metadata DML");

	List<String> list1 = new ArrayList<>();
	list1.add("1,2");
	list1.add("3,4");
	JavaRDD<String> javaRDD1 = sc.parallelize(list1);
	RDD<String> rdd1 = JavaRDD.toRDD(javaRDD1);

	List<String> list2 = new ArrayList<>();
	list2.add("5,6");
	list2.add("7,8");
	JavaRDD<String> javaRDD2 = sc.parallelize(list2);
	RDD<String> rdd2 = JavaRDD.toRDD(javaRDD2);

	Tuple2 tuple1 = new Tuple2("m1", rdd1);
	Tuple2 tuple2 = new Tuple2("m2", rdd2);
	List tupleList = new ArrayList();
	tupleList.add(tuple1);
	tupleList.add(tuple2);
	Seq seq = JavaConversions.asScalaBuffer(tupleList).toSeq();

	Script script = dml("print('sums: ' + sum(m1) + ' ' + sum(m2));").in(seq);
	setExpectedStdOut("sums: 10.0 26.0");
	ml.execute(script);
}
 
Example 19
Source Project: deep-spark   Source File: DeepRDD.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Seq<String> getPreferredLocations(Partition split) {
    initExtractorClient();

    List<String> locations = extractorClient.getPreferredLocations(split);
    if (locations == null || locations.isEmpty()) {
        return super.getPreferredLocations(split);
    }

    return asScalaBuffer(locations);

}
 
Example 20
Source Project: Decision   Source File: KafkaTopicService.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void createOrUpdateTopic(String topic, int replicationFactor, int partitions) {
    logger.debug("Creating topic {} with replication {} and {} partitions", topic, replicationFactor, partitions);
    Topic.validate(topic);
    Seq<Object> brokerList = ZkUtils.getSortedBrokerList(zkClient);
    Map<Object, Seq<Object>> partitionReplicaAssignment = AdminUtils.assignReplicasToBrokers(brokerList,
            partitions, replicationFactor, AdminUtils.assignReplicasToBrokers$default$4(),
            AdminUtils.assignReplicasToBrokers$default$5());
    AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topic, partitionReplicaAssignment,
            AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK$default$4(),
            AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK$default$5());
    logger.debug("Topic {} created", topic);
}
 
Example 21
Source Project: systemds   Source File: MLContextTest.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testInputTupleSeqNoMetadataDML() {
	System.out.println("MLContextTest - Tuple sequence no metadata DML");

	List<String> list1 = new ArrayList<>();
	list1.add("1,2");
	list1.add("3,4");
	JavaRDD<String> javaRDD1 = sc.parallelize(list1);
	RDD<String> rdd1 = JavaRDD.toRDD(javaRDD1);

	List<String> list2 = new ArrayList<>();
	list2.add("5,6");
	list2.add("7,8");
	JavaRDD<String> javaRDD2 = sc.parallelize(list2);
	RDD<String> rdd2 = JavaRDD.toRDD(javaRDD2);

	Tuple2 tuple1 = new Tuple2("m1", rdd1);
	Tuple2 tuple2 = new Tuple2("m2", rdd2);
	List tupleList = new ArrayList();
	tupleList.add(tuple1);
	tupleList.add(tuple2);
	Seq seq = JavaConversions.asScalaBuffer(tupleList).toSeq();

	Script script = dml("print('sums: ' + sum(m1) + ' ' + sum(m2));").in(seq);
	setExpectedStdOut("sums: 10.0 26.0");
	ml.execute(script);
}
 
Example 22
Source Project: sylph   Source File: KafkaOffsetCommitter.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
private void commitKafkaOffsets(Map<TopicAndPartition, Long> internalOffsets)
        throws Exception
{
    logger.info("committing offset to kafka, {}", internalOffsets);

    Seq<Tuple2<TopicAndPartition, Long>> fromOffsetsAsJava = JavaConverters.mapAsScalaMapConverter(internalOffsets).asScala().toSeq();
    kafkaCluster.setConsumerOffsets(groupId, (scala.collection.immutable.Map<TopicAndPartition, Object>) Map$.MODULE$.<TopicAndPartition, Long>apply(fromOffsetsAsJava));
}
 
Example 23
Source Project: DDMQ   Source File: KafkaProduceOffsetFetcher.java    License: Apache License 2.0 5 votes vote down vote up
public Map<Integer, Map<String, List<Integer>>> getMetadata(Set<String> topics) {
    if (CollectionUtils.isEmpty(topics)) {
        return Collections.emptyMap();
    }
    Seq<Broker> brokers = ZkUtils.getAllBrokersInCluster(zkClient);
    for (Broker broker : JavaConverters.asJavaListConverter(brokers).asJava()) {
        brokerIndex.put(broker.id(), broker);
    }
    TopicMetadataResponse response = ClientUtils.fetchTopicMetadata(JavaConverters.asScalaSetConverter(topics).asScala(), brokers, "GetMetadataClient", 10000, 0);
    return parseMetadataResponse(response);
}
 
Example 24
Source Project: parquet-mr   Source File: ScroogeStructConverter.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * When define an enum in scrooge, each enum value is a subclass of the enum class, the enum class could be Operation$
 */
private List getEnumList(String enumName) throws ClassNotFoundException, IllegalAccessException, NoSuchFieldException, NoSuchMethodException, InvocationTargetException {
  enumName += "$";//In scala generated code, the actual class is ended with $
  Class companionObjectClass = Class.forName(enumName);
  Object cObject = companionObjectClass.getField("MODULE$").get(null);
  Method listMethod = companionObjectClass.getMethod("list", new Class[]{});
  Object result = listMethod.invoke(cObject, null);
  return JavaConversions.seqAsJavaList((Seq) result);
}
 
Example 25
Source Project: bpmn.ai   Source File: BpmnaiUtils.java    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
public Dataset<Row> removeDuplicatedColumns(Dataset<Row> dataset) {
    Dataset<Row> newDataset;
    //remove duplicated columns
    //find duplicated columns and their first name under which they occurred
    String[] columns = dataset.columns();
    Map<String, Column> uniqueColumnNameMapping = new HashMap<>();

    Pattern p = Pattern.compile("(\\w+_)\\d*");
    for(String col : columns) {
        Matcher m = p.matcher(col);
        if(m.matches()) {
            if(!uniqueColumnNameMapping.keySet().contains(m.group(1))) {
                uniqueColumnNameMapping.put(m.group(1), new Column(col));
            }
        }
    }

    Seq<Column> selectionColumns =  JavaConverters.asScalaIteratorConverter(uniqueColumnNameMapping.values().iterator()).asScala().toSeq();

    //create new dataset if necessary
    if(columns.length != uniqueColumnNameMapping.size()) {

        newDataset = dataset.select(selectionColumns).toDF();

        //rename columns
        Map<String, String> swappedUniqueColumnNameMapping = new HashMap<>();
        for(String key : uniqueColumnNameMapping.keySet()) {
            swappedUniqueColumnNameMapping.put(uniqueColumnNameMapping.get(key).toString(), key);
        }

        for(String column : newDataset.columns()) {
            newDataset = newDataset.withColumnRenamed(column, swappedUniqueColumnNameMapping.get(column));
        }

        return newDataset;
    } else {
        return  dataset;
    }
}
 
Example 26
Source Project: jeesuite-libs   Source File: ZkConsumerCommand.java    License: Apache License 2.0 5 votes vote down vote up
public List<BrokerInfo> fetchAllBrokers(){
	List<BrokerInfo> result = new ArrayList<>();
	Seq<Broker> brokers = zkUtils.getAllBrokersInCluster();
	Iterator<Broker> iterator = brokers.toList().iterator();
	while(iterator.hasNext()){
		Broker broker = iterator.next();
		Node node = broker.getNode(ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)).get();
		result.add(new BrokerInfo(node.idString(), node.host(), node.port()));
	}
	return result;
}
 
Example 27
Source Project: rug-cli   Source File: PathCommand.java    License: GNU General Public License v3.0 5 votes vote down vote up
@Command
public void run(@Argument(index = 1, defaultValue = "") String expression,
        @Option("change-dir") String rootName, @Option("values") boolean values) {

    Collection<GraphNode> treeNodes = new ProgressReportingOperationRunner<Collection<GraphNode>>(
            "Evaluating path expression against project").run((indicator) -> {

                PathExpression pathExpression = PathExpressionParser$.MODULE$
                        .parseString(expression);

                File root = FileUtils.createProjectRoot(rootName);
                ArtifactSource source = ArtifactSourceUtils.createArtifactSource(root);

                ExpressionEngine pxe = new PathExpressionEngine();
                TreeNode pmv = new ProjectMutableView(new EmptyArtifactSource(""), source);

                Either<String, Seq<GraphNode>> result = pxe.evaluate(pmv, pathExpression,
                        DefaultExecutionContext$.MODULE$, Option$.MODULE$.apply(null));

                if (result.isLeft()) {
                    throw new CommandException(
                            String.format("Evaluating path expression failed:\n\n%s",
                                    result.left().get()),
                            "path");
                }

                return JavaConverters.asJavaCollectionConverter(result.right().get())
                        .asJavaCollection();
            });

    printResult(expression, values, treeNodes);
}
 
Example 28
@Override
public TopicMetadataResponse send(TopicMetadataRequest request) {
  java.util.List<String> topics = request.topics();
  TopicMetadata[] topicMetadataArray = new TopicMetadata[topics.size()];

  for (int i = 0; i < topicMetadataArray.length; i++) {
    String topic = topics.get(i);
    if (!topic.equals(topicName)) {
      topicMetadataArray[i] = new TopicMetadata(topic, null, Errors.UNKNOWN_TOPIC_OR_PARTITION.code());
    } else {
      PartitionMetadata[] partitionMetadataArray = new PartitionMetadata[partitionCount];
      for (int j = 0; j < partitionCount; j++) {
        java.util.List<BrokerEndPoint> emptyJavaList = Collections.emptyList();
        List<BrokerEndPoint> emptyScalaList = JavaConversions.asScalaBuffer(emptyJavaList).toList();
        partitionMetadataArray[j] =
            new PartitionMetadata(j, Some.apply(brokerArray[partitionLeaderIndices[j]]), emptyScalaList,
                emptyScalaList, Errors.NONE.code());
      }

      Seq<PartitionMetadata> partitionsMetadata = List.fromArray(partitionMetadataArray);
      topicMetadataArray[i] = new TopicMetadata(topic, partitionsMetadata, Errors.NONE.code());
    }
  }

  Seq<BrokerEndPoint> brokers = List.fromArray(brokerArray);
  Seq<TopicMetadata> topicsMetadata = List.fromArray(topicMetadataArray);

  return new TopicMetadataResponse(new kafka.api.TopicMetadataResponse(brokers, topicsMetadata, -1));
}
 
Example 29
Source Project: kafka-eagle   Source File: KafkaServiceImpl.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Obtaining kafka consumer information from zookeeper.
 */
public Map<String, List<String>> getConsumers(String clusterAlias) {
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	Map<String, List<String>> consumers = new HashMap<String, List<String>>();
	try {
		Seq<String> subConsumerPaths = zkc.getChildren(CONSUMERS_PATH);
		List<String> groups = JavaConversions.seqAsJavaList(subConsumerPaths);
		for (String group : groups) {
			String path = CONSUMERS_PATH + "/" + group + "/owners";
			if (zkc.pathExists(path)) {
				Seq<String> owners = zkc.getChildren(path);
				List<String> ownersSerialize = JavaConversions.seqAsJavaList(owners);
				consumers.put(group, ownersSerialize);
			} else {
				LOG.error("Consumer Path[" + path + "] is not exist.");
			}
		}
	} catch (Exception ex) {
		LOG.error(ex.getMessage());
	} finally {
		if (zkc != null) {
			kafkaZKPool.release(clusterAlias, zkc);
			zkc = null;
		}
	}
	return consumers;
}
 
Example 30
Source Project: kafka-eagle   Source File: TestKafkaServiceImpl.java    License: Apache License 2.0 5 votes vote down vote up
public List<String> findTopicPartition(String clusterAlias, String topic) {
	KafkaZkClient zkc = zkPool.getZkClient(clusterAlias);
	Seq<String> brokerTopicsPaths = zkc.getChildren(BROKER_TOPICS_PATH + "/" + topic + "/partitions");
	List<String> topicAndPartitions = JavaConversions.seqAsJavaList(brokerTopicsPaths);
	if (zkc != null) {
		zkPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return topicAndPartitions;
}