scala.collection.Seq Java Examples

The following examples show how to use scala.collection.Seq. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TopicPartitionCountObserver.java    From uReplicator with Apache License 2.0 6 votes vote down vote up
private void updateTopicPartitionInfoMap(final Set<String> topicsToCheck) {
  if (topicsToCheck.size() > 0) {
    // get topic partition count and maybe update partition counts for existing topics
    scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignmentForTopics =
        zkUtils.getPartitionAssignmentForTopics(JavaConversions.asScalaBuffer(ImmutableList.copyOf(topicsToCheck)));

    for (String topic : topicsToCheck) {
      try {
        topicPartitionMap.put(topic, partitionAssignmentForTopics.get(topic).get().size());
      } catch (Exception e) {
        logger.warn("Failed to get topicPartition info for topic={} of zkPath={}",
            topic, zkPath, e);
      }
    }
  }
}
 
Example #2
Source File: MultiClusterTopicManagementService.java    From kafka-monitor with Apache License 2.0 6 votes vote down vote up
/**
 * @param topic Kafka topic
 * @param partitionsToReassign a map from partition (int) to new replica list (int seq)
 *
 * @return a json string with the same format as output of kafka.utils.ZkUtils.formatAsReassignmentJson
 *
 * Example:
 * <pre>
 *   {"version":1,"partitions":[
 *     {"topic":"kmf-topic","partition":1,"replicas":[0,1]},
 *     {"topic":"kmf-topic","partition":2,"replicas":[1,2]},
 *     {"topic":"kmf-topic","partition":0,"replicas":[2,0]}]}
 * </pre>
 */
private static String formatAsNewReassignmentJson(String topic, scala.collection.Map<Object, Seq<Object>> partitionsToReassign) {
  StringBuilder builder = new StringBuilder();
  builder.append("{\"version\":1,\"partitions\":[\n");
  for (int partition = 0; partition < partitionsToReassign.size(); partition++) {
    builder.append("  {\"topic\":\"").append(topic).append("\",\"partition\":").append(partition).append(",\"replicas\":[");
    Seq<Object> replicas = partitionsToReassign.apply(partition);
    for (int replicaIndex = 0; replicaIndex < replicas.size(); replicaIndex++) {
      Object replica = replicas.apply(replicaIndex);
      builder.append(replica).append(",");
    }
    builder.setLength(builder.length() - 1);
    builder.append("]},\n");
  }
  builder.setLength(builder.length() - 2);
  builder.append("]}");
  return builder.toString();
}
 
Example #3
Source File: KafkaInfos.java    From DCMonitor with MIT License 6 votes vote down vote up
public List<PartitionInfo> getPartitionInfos(String group, String topic) {
  Seq<String> singleTopic = JavaConversions.asScalaBuffer(Collections.singletonList(topic)).toSeq();
  scala.collection.Map<String, Seq<Object>> pidMap = ZkUtils.getPartitionsForTopics(zkClient, singleTopic);
  Option<Seq<Object>> partitions = pidMap.get(topic);
  if (partitions.get() == null) {
    return Collections.emptyList();
  }
  List<PartitionInfo> infos = Lists.newArrayList();
  for (Object o : JavaConversions.asJavaList(partitions.get())) {
    PartitionInfo info = getPartitionInfo(group, topic, Int.unbox(o));
    if (info != null) {
      infos.add(info);
    }
  }
  return infos;
}
 
Example #4
Source File: KafkaClusterManager.java    From doctorkafka with Apache License 2.0 6 votes vote down vote up
private scala.collection.Map<TopicAndPartition, Seq<Object>> getAssignmentPlan(
    Map<TopicPartition, Integer[]> replicasMap) {
  scala.collection.mutable.HashMap<TopicAndPartition, Seq<Object>> result =
      new scala.collection.mutable.HashMap<>();

  for (Map.Entry<TopicPartition, Integer[]> entry : replicasMap.entrySet()) {
    TopicPartition tp = entry.getKey();
    TopicAndPartition tap = new TopicAndPartition(tp.topic(), tp.partition());
    List<Object> objs = Arrays.asList(entry.getValue()).stream()
        .map(val -> (Object) val).collect(Collectors.toList());
    Seq<Object> replicas = JavaConverters.asScalaBuffer(objs).seq();
    result.put(tap, replicas);
  }

  assert replicasMap.size() == result.size();
  LOG.debug("replicaMap.size = {}, result.size = {}", replicasMap.size(), result.size());
  return result;
}
 
Example #5
Source File: KafkaClusterManager.java    From doctorkafka with Apache License 2.0 6 votes vote down vote up
private scala.collection.Map<Object, Seq<Object>> getReplicaAssignmentForTopic(
    ZkUtils zkUtils, String topic) {
  if (topicPartitionAssignments.containsKey(topic)) {
    return topicPartitionAssignments.get(topic);
  }
  List<String> topics = new ArrayList<>();
  topics.add(topic);
  Seq<String> topicsSeq = scala.collection.JavaConverters.asScalaBuffer(topics).toSeq();

  scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> assignments;
  assignments = zkUtils.getPartitionAssignmentForTopics(topicsSeq);

  scala.collection.Map<Object, Seq<Object>> partitionAssignment = assignments.get(topic).get();
  topicPartitionAssignments.put(topic, partitionAssignment);
  return partitionAssignment;
}
 
Example #6
Source File: OpenKoreanTextPhraseExtractor.java    From elasticsearch-analysis-openkoreantext with Apache License 2.0 6 votes vote down vote up
private Seq<KoreanToken> convertPhrasesToTokens(Seq<KoreanPhrase> phrases) {
    KoreanToken[] tokens = new KoreanToken[phrases.length()];

    Iterator<KoreanPhrase> iterator = phrases.iterator();
    int i = 0;
    while (iterator.hasNext()) {
        KoreanPhrase phrase = iterator.next();
        tokens[i++] = new KoreanToken(phrase.text(), phrase.pos(), phrase.offset(), phrase.length(), scala.Option.apply(null), false);
    }

    Arrays.sort(tokens, (o1, o2) -> {
        if(o1.offset()== o2.offset())
            return 0;
        return o1.offset()< o2.offset()? -1 : 1;
    });

    return JavaConverters.asScalaBuffer(Arrays.asList(tokens)).toSeq();
}
 
Example #7
Source File: CassandraConnectorITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testCassandraScalaTupleAtLeastOnceSinkBuilderDetection() throws Exception {
	Class<scala.Tuple1<String>> c = (Class<scala.Tuple1<String>>) new scala.Tuple1<>("hello").getClass();
	Seq<TypeInformation<?>> typeInfos = JavaConverters.asScalaBufferConverter(
		Collections.<TypeInformation<?>>singletonList(BasicTypeInfo.STRING_TYPE_INFO)).asScala();
	Seq<String> fieldNames = JavaConverters.asScalaBufferConverter(
		Collections.singletonList("_1")).asScala();

	CaseClassTypeInfo<scala.Tuple1<String>> typeInfo = new CaseClassTypeInfo<scala.Tuple1<String>>(c, null, typeInfos, fieldNames) {
		@Override
		public TypeSerializer<scala.Tuple1<String>> createSerializer(ExecutionConfig config) {
			return null;
		}
	};

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<scala.Tuple1<String>> input = env.fromElements(new scala.Tuple1<>("hello")).returns(typeInfo);

	CassandraSink.CassandraSinkBuilder<scala.Tuple1<String>> sinkBuilder = CassandraSink.addSink(input);
	assertTrue(sinkBuilder instanceof CassandraSink.CassandraScalaProductSinkBuilder);
}
 
Example #8
Source File: BrokerServiceImpl.java    From kafka-eagle with Apache License 2.0 6 votes vote down vote up
/** Get kafka broker numbers from zookeeper. */
public long brokerNumbers(String clusterAlias) {
	long count = 0;
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	try {
		if (zkc.pathExists(BROKER_IDS_PATH)) {
			Seq<String> subBrokerIdsPaths = zkc.getChildren(BROKER_IDS_PATH);
			count = JavaConversions.seqAsJavaList(subBrokerIdsPaths).size();
		}
	} catch (Exception e) {
		LOG.error("Get kafka broker numbers has error, msg is " + e.getCause().getMessage());
		e.printStackTrace();
	}
	if (zkc != null) {
		kafkaZKPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return count;
}
 
Example #9
Source File: KafkaProduceOffsetFetcher.java    From DDMQ with Apache License 2.0 6 votes vote down vote up
private Map<Integer, Map<String, List<Integer>>> parseMetadataResponse(TopicMetadataResponse response) {
    Map<Integer/*broker id*/, Map<String/*topic*/, List<Integer>/*partition id*/>> metadata = Maps.newHashMap();
    Seq<TopicMetadata> topicMetadatas = response.topicsMetadata();
    for (TopicMetadata topicMetadata : JavaConverters.asJavaListConverter(topicMetadatas).asJava()) {
        List<PartitionMetadata> partitionsMetadata = JavaConverters.asJavaListConverter(topicMetadata.partitionsMetadata()).asJava();
        String topic = topicMetadata.topic();
        for (PartitionMetadata partitionMetadata : partitionsMetadata) {
            int partitionId = partitionMetadata.partitionId();
            int brokerId = partitionMetadata.leader().get().id();
            if (!metadata.containsKey(brokerId)) {
                metadata.put(brokerId, Maps.newHashMap());
            }
            if (!metadata.get(brokerId).containsKey(topic)) {
                metadata.get(brokerId).put(topic, Lists.newArrayList());
            }
            metadata.get(brokerId).get(topic).add(partitionId);
        }
    }
    return metadata;
}
 
Example #10
Source File: SCoverageReportMojo.java    From scoverage-maven-plugin with Apache License 2.0 6 votes vote down vote up
private void writeReports( Coverage coverage, List<File> sourceRoots, File coberturaXmlOutputDirectory,
                           File scoverageXmlOutputDirectory, File scoverageHtmlOutputDirectory )
{
    Seq<File> sourceRootsAsScalaSeq = JavaConverters.asScalaBuffer( sourceRoots );

    new CoberturaXmlWriter( sourceRootsAsScalaSeq, coberturaXmlOutputDirectory ).write( coverage );
    getLog().info( String.format( "Written Cobertura XML report [%s]",
                                  new File( coberturaXmlOutputDirectory, "cobertura.xml" ).getAbsolutePath() ) );

    new ScoverageXmlWriter( sourceRootsAsScalaSeq, scoverageXmlOutputDirectory, false ).write( coverage );
    getLog().info( String.format( "Written XML coverage report [%s]",
                                  new File( scoverageXmlOutputDirectory, "scoverage.xml" ).getAbsolutePath() ) );

    new ScoverageHtmlWriter( sourceRootsAsScalaSeq, scoverageHtmlOutputDirectory, Option.<String>apply( encoding ) ).write( coverage );
    getLog().info( String.format( "Written HTML coverage report [%s]",
                                  new File( scoverageHtmlOutputDirectory, "index.html" ).getAbsolutePath() ) );

    getLog().info( String.format( "Statement coverage.: %s%%", coverage.statementCoverageFormatted() ) );
    getLog().info( String.format( "Branch coverage....: %s%%", coverage.branchCoverageFormatted() ) );
}
 
Example #11
Source File: KafkaBrokerTopicObserver.java    From uReplicator with Apache License 2.0 6 votes vote down vote up
private void tryAddTopic(String topic) {
  scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignmentForTopics =
      _zkUtils.getPartitionAssignmentForTopics(
          JavaConversions.asScalaBuffer(ImmutableList.of(topic)));
  if (partitionAssignmentForTopics.get(topic).isEmpty()
      || partitionAssignmentForTopics.get(topic).get().size() == 0) {
    LOGGER.debug("try to refresh for topic {} but found no topic partition for it", topic);
    return;
  }
  synchronized (_lock) {
    LOGGER.info("starting to refresh for adding topic {}", topic);
    if (!getAllTopics().contains(topic)) {
      try {
        _topicPartitionInfoMap.put(topic, new TopicPartition(topic,
            partitionAssignmentForTopics.get(topic).get().size()));
      } catch (Exception e) {
        LOGGER.warn("Failed to get topicPartition info for {} from kafka zk: {}", topic, e);
      }
    }
    LOGGER.info("finished refreshing for adding topic {}", topic);
  }
}
 
Example #12
Source File: KafkaTestEnvironmentImpl.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public int getLeaderToShutDown(String topic) throws Exception {
	ZkClient zkClient = createZkClient();
	PartitionMetadata firstPart = null;
	do {
		if (firstPart != null) {
			LOG.info("Unable to find leader. error code {}", firstPart.errorCode());
			// not the first try. Sleep a bit
			Thread.sleep(150);
		}

		Seq<PartitionMetadata> partitionMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkClient).partitionsMetadata();
		firstPart = partitionMetadata.head();
	}
	while (firstPart.errorCode() != 0);
	zkClient.close();

	return firstPart.leader().get().id();
}
 
Example #13
Source File: CassandraConnectorITCase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void testCassandraScalaTupleAtLeastOnceSinkBuilderDetection() throws Exception {
	Class<scala.Tuple1<String>> c = (Class<scala.Tuple1<String>>) new scala.Tuple1<>("hello").getClass();
	Seq<TypeInformation<?>> typeInfos = JavaConverters.asScalaBufferConverter(
		Collections.<TypeInformation<?>>singletonList(BasicTypeInfo.STRING_TYPE_INFO)).asScala();
	Seq<String> fieldNames = JavaConverters.asScalaBufferConverter(
		Collections.singletonList("_1")).asScala();

	CaseClassTypeInfo<scala.Tuple1<String>> typeInfo = new CaseClassTypeInfo<scala.Tuple1<String>>(c, null, typeInfos, fieldNames) {
		@Override
		public TypeSerializer<scala.Tuple1<String>> createSerializer(ExecutionConfig config) {
			return null;
		}
	};

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<scala.Tuple1<String>> input = env.fromElements(new scala.Tuple1<>("hello")).returns(typeInfo);

	CassandraSink.CassandraSinkBuilder<scala.Tuple1<String>> sinkBuilder = CassandraSink.addSink(input);
	assertTrue(sinkBuilder instanceof CassandraSink.CassandraScalaProductSinkBuilder);
}
 
Example #14
Source File: BrokerServiceImpl.java    From kafka-eagle with Apache License 2.0 6 votes vote down vote up
/** Get broker id list. */
public List<Object> getBrokerIdList(String clusterAlias) {
	List<Object> brokerIds = new ArrayList<>();
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	try {
		if (zkc.pathExists(BROKER_IDS_PATH)) {
			Seq<String> subBrokerIdsPaths = zkc.getChildren(BROKER_IDS_PATH);
			for (String id : JavaConversions.seqAsJavaList(subBrokerIdsPaths)) {
				brokerIds.add(Integer.parseInt(id));
			}
		}
	} catch (Exception e) {
		LOG.error("Get kafka broker id has error, msg is ", e);
		e.printStackTrace();
	}
	if (zkc != null) {
		kafkaZKPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return brokerIds;
}
 
Example #15
Source File: KafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 5 votes vote down vote up
/**
 * Obtaining kafka consumer information from zookeeper.
 */
public Map<String, List<String>> getConsumers(String clusterAlias) {
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	Map<String, List<String>> consumers = new HashMap<String, List<String>>();
	try {
		Seq<String> subConsumerPaths = zkc.getChildren(CONSUMERS_PATH);
		List<String> groups = JavaConversions.seqAsJavaList(subConsumerPaths);
		for (String group : groups) {
			String path = CONSUMERS_PATH + "/" + group + "/owners";
			if (zkc.pathExists(path)) {
				Seq<String> owners = zkc.getChildren(path);
				List<String> ownersSerialize = JavaConversions.seqAsJavaList(owners);
				consumers.put(group, ownersSerialize);
			} else {
				LOG.error("Consumer Path[" + path + "] is not exist.");
			}
		}
	} catch (Exception ex) {
		LOG.error(ex.getMessage());
	} finally {
		if (zkc != null) {
			kafkaZKPool.release(clusterAlias, zkc);
			zkc = null;
		}
	}
	return consumers;
}
 
Example #16
Source File: UserType.java    From DataGenerator with Apache License 2.0 5 votes vote down vote up
/**
 * Get allowable child types
 * @param nodeOfThisType Node of this type
 * @return Sequence of allowable child types
 */
@Override
public Seq<UserTypeVal> getAllowableChildTypes(Node<UserStub> nodeOfThisType) {
    LinkedList<UserTypeVal> list = new LinkedList<>();
    list.add(UserType.PUBLIC_USER);
    return ScalaInJavaHelper.linkedListToScalaIterable(list).toSeq();
}
 
Example #17
Source File: MLContextTest.java    From systemds with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testInputTupleSeqNoMetadataDML() {
	System.out.println("MLContextTest - Tuple sequence no metadata DML");

	List<String> list1 = new ArrayList<>();
	list1.add("1,2");
	list1.add("3,4");
	JavaRDD<String> javaRDD1 = sc.parallelize(list1);
	RDD<String> rdd1 = JavaRDD.toRDD(javaRDD1);

	List<String> list2 = new ArrayList<>();
	list2.add("5,6");
	list2.add("7,8");
	JavaRDD<String> javaRDD2 = sc.parallelize(list2);
	RDD<String> rdd2 = JavaRDD.toRDD(javaRDD2);

	Tuple2 tuple1 = new Tuple2("m1", rdd1);
	Tuple2 tuple2 = new Tuple2("m2", rdd2);
	List tupleList = new ArrayList();
	tupleList.add(tuple1);
	tupleList.add(tuple2);
	Seq seq = JavaConversions.asScalaBuffer(tupleList).toSeq();

	Script script = dml("print('sums: ' + sum(m1) + ' ' + sum(m2));").in(seq);
	setExpectedStdOut("sums: 10.0 26.0");
	ml.execute(script);
}
 
Example #18
Source File: KafkaTopicService.java    From Decision with Apache License 2.0 5 votes vote down vote up
@Override
public void createOrUpdateTopic(String topic, int replicationFactor, int partitions) {
    logger.debug("Creating topic {} with replication {} and {} partitions", topic, replicationFactor, partitions);
    Topic.validate(topic);
    Seq<Object> brokerList = ZkUtils.getSortedBrokerList(zkClient);
    Map<Object, Seq<Object>> partitionReplicaAssignment = AdminUtils.assignReplicasToBrokers(brokerList,
            partitions, replicationFactor, AdminUtils.assignReplicasToBrokers$default$4(),
            AdminUtils.assignReplicasToBrokers$default$5());
    AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topic, partitionReplicaAssignment,
            AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK$default$4(),
            AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK$default$5());
    logger.debug("Topic {} created", topic);
}
 
Example #19
Source File: DeepRDD.java    From deep-spark with Apache License 2.0 5 votes vote down vote up
@Override
public Seq<String> getPreferredLocations(Partition split) {
    initExtractorClient();

    List<String> locations = extractorClient.getPreferredLocations(split);
    if (locations == null || locations.isEmpty()) {
        return super.getPreferredLocations(split);
    }

    return asScalaBuffer(locations);

}
 
Example #20
Source File: ReporterMetricsAdapter.java    From zipkin-finagle with Apache License 2.0 5 votes vote down vote up
@Override public void incrementMessagesDropped(Throwable cause) {
  if (cause instanceof FinagleSender.WrappedException) cause = cause.getCause();
  Seq<Traversable<String>> paths = Throwables.mkString(cause).inits().toSeq();
  for (Iterator<Traversable<String>> i = paths.iterator(); i.hasNext();) {
    messagesDropped.counter(i.next().toSeq()).incr();
  }
}
 
Example #21
Source File: KamonHistogram.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
@Override
public Long[] getRecordedValues() {
    final List<Long> values = new ArrayList<>();
    final Seq<Bucket> buckets = getSnapshot(false).distribution().buckets();
    buckets.foreach(bucket -> addBucketValuesToList(bucket, values));
    return values.toArray(new Long[0]);
}
 
Example #22
Source File: TestKafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 5 votes vote down vote up
public List<String> findTopicPartition(String clusterAlias, String topic) {
	KafkaZkClient zkc = zkPool.getZkClient(clusterAlias);
	Seq<String> brokerTopicsPaths = zkc.getChildren(BROKER_TOPICS_PATH + "/" + topic + "/partitions");
	List<String> topicAndPartitions = JavaConversions.seqAsJavaList(brokerTopicsPaths);
	if (zkc != null) {
		zkPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return topicAndPartitions;
}
 
Example #23
Source File: URPChecker.java    From doctorkafka with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
  CommandLine commandLine = parseCommandLine(args);
  String zookeeper = commandLine.getOptionValue(ZOOKEEPER);

  ZkUtils zkUtils = KafkaUtils.getZkUtils(zookeeper);
  Seq<String> topicsSeq = zkUtils.getAllTopics();
  List<String> topics = scala.collection.JavaConverters.seqAsJavaList(topicsSeq);

  scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>>
      partitionAssignments = zkUtils.getPartitionAssignmentForTopics(topicsSeq);

  Map<String, Integer> replicationFactors = new HashMap<>();
  Map<String, Integer> partitionCounts = new HashMap<>();

  topics.stream().forEach(topic -> {
    int partitionCount = partitionAssignments.get(topic).get().size();
    int factor = partitionAssignments.get(topic).get().head()._2().size();
    partitionCounts.put(topic, partitionCount);
    replicationFactors.put(topic, factor);
  });

  List<PartitionInfo> urps = KafkaClusterManager.getUnderReplicatedPartitions(
      zookeeper, SecurityProtocol.PLAINTEXT, null, topics, partitionAssignments, replicationFactors, partitionCounts);

  for (PartitionInfo partitionInfo : urps) {
    LOG.info("under-replicated : {}", partitionInfo);
  }
}
 
Example #24
Source File: BpmnaiUtils.java    From bpmn.ai with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
public Dataset<Row> removeDuplicatedColumns(Dataset<Row> dataset) {
    Dataset<Row> newDataset;
    //remove duplicated columns
    //find duplicated columns and their first name under which they occurred
    String[] columns = dataset.columns();
    Map<String, Column> uniqueColumnNameMapping = new HashMap<>();

    Pattern p = Pattern.compile("(\\w+_)\\d*");
    for(String col : columns) {
        Matcher m = p.matcher(col);
        if(m.matches()) {
            if(!uniqueColumnNameMapping.keySet().contains(m.group(1))) {
                uniqueColumnNameMapping.put(m.group(1), new Column(col));
            }
        }
    }

    Seq<Column> selectionColumns =  JavaConverters.asScalaIteratorConverter(uniqueColumnNameMapping.values().iterator()).asScala().toSeq();

    //create new dataset if necessary
    if(columns.length != uniqueColumnNameMapping.size()) {

        newDataset = dataset.select(selectionColumns).toDF();

        //rename columns
        Map<String, String> swappedUniqueColumnNameMapping = new HashMap<>();
        for(String key : uniqueColumnNameMapping.keySet()) {
            swappedUniqueColumnNameMapping.put(uniqueColumnNameMapping.get(key).toString(), key);
        }

        for(String column : newDataset.columns()) {
            newDataset = newDataset.withColumnRenamed(column, swappedUniqueColumnNameMapping.get(column));
        }

        return newDataset;
    } else {
        return  dataset;
    }
}
 
Example #25
Source File: ZkConsumerCommand.java    From jeesuite-libs with Apache License 2.0 5 votes vote down vote up
public List<BrokerInfo> fetchAllBrokers(){
	List<BrokerInfo> result = new ArrayList<>();
	Seq<Broker> brokers = zkUtils.getAllBrokersInCluster();
	Iterator<Broker> iterator = brokers.toList().iterator();
	while(iterator.hasNext()){
		Broker broker = iterator.next();
		Node node = broker.getNode(ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)).get();
		result.add(new BrokerInfo(node.idString(), node.host(), node.port()));
	}
	return result;
}
 
Example #26
Source File: PathCommand.java    From rug-cli with GNU General Public License v3.0 5 votes vote down vote up
@Command
public void run(@Argument(index = 1, defaultValue = "") String expression,
        @Option("change-dir") String rootName, @Option("values") boolean values) {

    Collection<GraphNode> treeNodes = new ProgressReportingOperationRunner<Collection<GraphNode>>(
            "Evaluating path expression against project").run((indicator) -> {

                PathExpression pathExpression = PathExpressionParser$.MODULE$
                        .parseString(expression);

                File root = FileUtils.createProjectRoot(rootName);
                ArtifactSource source = ArtifactSourceUtils.createArtifactSource(root);

                ExpressionEngine pxe = new PathExpressionEngine();
                TreeNode pmv = new ProjectMutableView(new EmptyArtifactSource(""), source);

                Either<String, Seq<GraphNode>> result = pxe.evaluate(pmv, pathExpression,
                        DefaultExecutionContext$.MODULE$, Option$.MODULE$.apply(null));

                if (result.isLeft()) {
                    throw new CommandException(
                            String.format("Evaluating path expression failed:\n\n%s",
                                    result.left().get()),
                            "path");
                }

                return JavaConverters.asJavaCollectionConverter(result.right().get())
                        .asJavaCollection();
            });

    printResult(expression, values, treeNodes);
}
 
Example #27
Source File: UserType.java    From DataGenerator with Apache License 2.0 5 votes vote down vote up
/**
 * Get allowable child types
 * @param nodeOfThisType Node of this type
 * @return Sequence of allowable child types
 */
@Override
public Seq<UserTypeVal> getAllowableChildTypes(Node<UserStub> nodeOfThisType) {
    LinkedList<UserTypeVal> list = new LinkedList<>();
    list.add(UserType.SOCIAL_NETWORK_EMPLOYEE);
    list.add(UserType.PUBLIC_USER);
    return ScalaInJavaHelper.linkedListToScalaIterable(list).toSeq();
}
 
Example #28
Source File: OperatorUtil.java    From doctorkafka with Apache License 2.0 5 votes vote down vote up
public static String getBrokers(String zkUrl, SecurityProtocol securityProtocol) {
  ZkUtils zkUtils = getZkUtils(zkUrl);
  Seq<Broker> brokersSeq = zkUtils.getAllBrokersInCluster();
  Broker[] brokers = new Broker[brokersSeq.size()];
  brokersSeq.copyToArray(brokers);

  String brokersStr = Arrays.stream(brokers)
      .map(b -> b.brokerEndPoint(
          ListenerName.forSecurityProtocol(securityProtocol)).connectionString())
      .reduce(null, (a, b) -> (a == null) ? b : a + "," + b);
  return brokersStr;
}
 
Example #29
Source File: FlinkAggregateJoinTransposeRule.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Convert aggregate with AUXILIARY_GROUP to regular aggregate.
 * Return original aggregate and null project if the given aggregate does not contain AUXILIARY_GROUP,
 * else new aggregate without AUXILIARY_GROUP and a project to permute output columns if needed.
 */
private Pair<Aggregate, List<RexNode>> toRegularAggregate(Aggregate aggregate) {
	Tuple2<int[], Seq<AggregateCall>> auxGroupAndRegularAggCalls = AggregateUtil.checkAndSplitAggCalls(aggregate);
	final int[] auxGroup = auxGroupAndRegularAggCalls._1;
	final Seq<AggregateCall> regularAggCalls = auxGroupAndRegularAggCalls._2;
	if (auxGroup.length != 0) {
		int[] fullGroupSet = AggregateUtil.checkAndGetFullGroupSet(aggregate);
		ImmutableBitSet newGroupSet = ImmutableBitSet.of(fullGroupSet);
		List<AggregateCall> aggCalls = JavaConverters.seqAsJavaListConverter(regularAggCalls).asJava();
		final Aggregate newAgg = aggregate.copy(
				aggregate.getTraitSet(),
				aggregate.getInput(),
				aggregate.indicator,
				newGroupSet,
				com.google.common.collect.ImmutableList.of(newGroupSet),
				aggCalls);
		final List<RelDataTypeField> aggFields = aggregate.getRowType().getFieldList();
		final List<RexNode> projectAfterAgg = new ArrayList<>();
		for (int i = 0; i < fullGroupSet.length; ++i) {
			int group = fullGroupSet[i];
			int index = newGroupSet.indexOf(group);
			projectAfterAgg.add(new RexInputRef(index, aggFields.get(i).getType()));
		}
		int fieldCntOfAgg = aggFields.size();
		for (int i = fullGroupSet.length; i < fieldCntOfAgg; ++i) {
			projectAfterAgg.add(new RexInputRef(i, aggFields.get(i).getType()));
		}
		Preconditions.checkArgument(projectAfterAgg.size() == fieldCntOfAgg);
		return new Pair<>(newAgg, projectAfterAgg);
	} else {
		return new Pair<>(aggregate, null);
	}
}
 
Example #30
Source File: KafkaPartitionLevelConsumerTest.java    From incubator-pinot with Apache License 2.0 5 votes vote down vote up
@Override
public TopicMetadataResponse send(TopicMetadataRequest request) {
  java.util.List<String> topics = request.topics();
  TopicMetadata[] topicMetadataArray = new TopicMetadata[topics.size()];

  for (int i = 0; i < topicMetadataArray.length; i++) {
    String topic = topics.get(i);
    if (!topic.equals(topicName)) {
      topicMetadataArray[i] = new TopicMetadata(topic, null, Errors.UNKNOWN_TOPIC_OR_PARTITION.code());
    } else {
      PartitionMetadata[] partitionMetadataArray = new PartitionMetadata[partitionCount];
      for (int j = 0; j < partitionCount; j++) {
        java.util.List<BrokerEndPoint> emptyJavaList = Collections.emptyList();
        List<BrokerEndPoint> emptyScalaList = JavaConversions.asScalaBuffer(emptyJavaList).toList();
        partitionMetadataArray[j] =
            new PartitionMetadata(j, Some.apply(brokerArray[partitionLeaderIndices[j]]), emptyScalaList,
                emptyScalaList, Errors.NONE.code());
      }

      Seq<PartitionMetadata> partitionsMetadata = List.fromArray(partitionMetadataArray);
      topicMetadataArray[i] = new TopicMetadata(topic, partitionsMetadata, Errors.NONE.code());
    }
  }

  Seq<BrokerEndPoint> brokers = List.fromArray(brokerArray);
  Seq<TopicMetadata> topicsMetadata = List.fromArray(topicMetadataArray);

  return new TopicMetadataResponse(new kafka.api.TopicMetadataResponse(brokers, topicsMetadata, -1));
}