scala.collection.JavaConversions Java Examples

The following examples show how to use scala.collection.JavaConversions. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: UnitTestCoverageCollector.java    From ExecDashboard with Apache License 2.0 7 votes vote down vote up
private void updateCollectorItemMetricDetail(CollectorItemMetricDetail collectorItemMetricDetail, Row itemRow) {
    Date timeWindowDt = itemRow.getAs("timeWindow");
    List<String> scaMetricList = Arrays.asList("coverage");
    Collection<Object> javaCollection = JavaConversions.asJavaCollection(((WrappedArray) itemRow.getAs("metrics")).toList());

    Optional.ofNullable(javaCollection)
            .orElseGet(Collections::emptyList)
            .forEach(m -> {
                GenericRowWithSchema genericRowWithSchema = (GenericRowWithSchema) m;
                String existingLabelName = genericRowWithSchema.getAs("name");
                    if (scaMetricList.contains(existingLabelName)) {
                        String valueStr = genericRowWithSchema.getAs("value");
                        try{
                        double value = Double.parseDouble(valueStr);
                        MetricCount mc = getMetricCount("", value, "unit-test-coverage");
                        if (mc != null) {
                            collectorItemMetricDetail.setStrategy(getCollectionStrategy());
                            collectorItemMetricDetail.addCollectorItemMetricCount(timeWindowDt, mc);
                            collectorItemMetricDetail.setLastScanDate(timeWindowDt);
                        }
                        }catch (Exception e){
                            LOGGER.info("Exception: Not a number, 'value' = "+valueStr,e);
                        }
                    }
            });
}
 
Example #2
Source File: BeansInitializer.java    From gsn with GNU General Public License v3.0 6 votes vote down vote up
public static StreamSource source(SourceConf sc){
 StreamSource s = new StreamSource();
 s.setAlias(sc.alias());
 s.setSqlQuery(sc.query());
 if (sc.slide().isDefined())
  s.setRawSlideValue(sc.slide().get());
 if (sc.samplingRate().isDefined())
  s.setSamplingRate(((Double)sc.samplingRate().get()).floatValue());
 if (sc.disconnectBufferSize().isDefined())
  s.setDisconnectedBufferSize(((Integer)sc.disconnectBufferSize().get()));
 if (sc.storageSize().isDefined())
  s.setRawHistorySize(sc.storageSize().get());
 AddressBean[] add=new AddressBean[sc.wrappers().size()];
 int i=0;
 for (WrapperConf w:JavaConversions.asJavaIterable(sc.wrappers())){
  add[i]=address(w);
  i++;
 }
 s.setAddressing(add);
 return s;
}
 
Example #3
Source File: SamzaExecutor.java    From samza with Apache License 2.0 6 votes vote down vote up
@Override
public List<String> listTables(ExecutionContext context) throws ExecutorException {
  String address = environmentVariableHandler.getEnvironmentVariable(SAMZA_SQL_SYSTEM_KAFKA_ADDRESS);
  if (address == null || address.isEmpty()) {
    address = DEFAULT_SERVER_ADDRESS;
  }
  try {
    ZkUtils zkUtils = new ZkUtils(new ZkClient(address, DEFAULT_ZOOKEEPER_CLIENT_TIMEOUT),
        new ZkConnection(address), false);
    return JavaConversions.seqAsJavaList(zkUtils.getAllTopics())
      .stream()
      .map(x -> SAMZA_SYSTEM_KAFKA + "." + x)
      .collect(Collectors.toList());
  } catch (ZkTimeoutException ex) {
    throw new ExecutorException(ex);
  }
}
 
Example #4
Source File: MLContextConversionUtil.java    From systemds with Apache License 2.0 6 votes vote down vote up
/**
 * Convert a {@code MatrixObject} to a {@code RDD<String>} in IJV format.
 *
 * @param matrixObject
 *            the {@code MatrixObject}
 * @return the {@code MatrixObject} converted to a {@code RDD<String>}
 */
public static RDD<String> matrixObjectToRDDStringIJV(MatrixObject matrixObject) {

	// NOTE: The following works when called from Java but does not
	// currently work when called from Spark Shell (when you call
	// collect() on the RDD<String>).
	//
	// JavaRDD<String> javaRDD = jsc.parallelize(list);
	// RDD<String> rdd = JavaRDD.toRDD(javaRDD);
	//
	// Therefore, we call parallelize() on the SparkContext rather than
	// the JavaSparkContext to produce the RDD<String> for Scala.

	List<String> list = matrixObjectToListStringIJV(matrixObject);

	ClassTag<String> tag = scala.reflect.ClassTag$.MODULE$.apply(String.class);
	return sc().parallelize(JavaConversions.asScalaBuffer(list), sc().defaultParallelism(), tag);
}
 
Example #5
Source File: RangePartitionCoalescer.java    From gatk with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
@Override
public PartitionGroup[] coalesce(int maxPartitions, RDD<?> parent) {
    if (maxPartitions != parent.getNumPartitions()) {
        throw new IllegalArgumentException("Cannot use " + getClass().getSimpleName() +
                " with a different number of partitions to the parent RDD.");
    }
    List<Partition> partitions = Arrays.asList(parent.getPartitions());
    PartitionGroup[] groups = new PartitionGroup[partitions.size()];

    for (int i = 0; i < partitions.size(); i++) {
        Seq<String> preferredLocations = parent.getPreferredLocations(partitions.get(i));
        scala.Option<String> preferredLocation = scala.Option.apply
                (preferredLocations.isEmpty() ? null : preferredLocations.apply(0));
        PartitionGroup group = new PartitionGroup(preferredLocation);
        List<Partition> partitionsInGroup =
                partitions.subList(i, maxEndPartitionIndexes.get(i) + 1);
        group.partitions().append(JavaConversions.asScalaBuffer(partitionsInGroup));
        groups[i] = group;
    }
    return groups;
}
 
Example #6
Source File: KafkaUnit.java    From SkaETL with Apache License 2.0 6 votes vote down vote up
/**
 * @return All topic names
 */
public List<String> listTopics() {
    String zookeeperHost = zookeeperString;
    Boolean isSucre = false;
    int sessionTimeoutMs = 200000;
    int connectionTimeoutMs = 15000;
    int maxInFlightRequests = 10;
    Time time = Time.SYSTEM;
    String metricGroup = "myGroup";
    String metricType = "myType";
    KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperHost,isSucre,sessionTimeoutMs,
            connectionTimeoutMs,maxInFlightRequests,time,metricGroup,metricType);
    AdminZkClient adminZkClient = new AdminZkClient(zkClient);
    try {
        // run
        LOGGER.info("Executing: ListTopics ");

            return JavaConversions.asJavaCollection(adminZkClient.getAllTopicConfigs().keys())
                    .stream()
                    .collect(Collectors.toList());

    } finally {
        zkClient.close();
    }
}
 
Example #7
Source File: LibraryPolicyCollector.java    From ExecDashboard with Apache License 2.0 6 votes vote down vote up
private void updateCollectorItemMetricDetail(CollectorItemMetricDetail collectorItemMetricDetail, Row itemRow, String type) {
    Date timeWindowDt = itemRow.getAs("timeWindow");
    Collection<Object> javaCollection = JavaConversions.asJavaCollection(((WrappedArray) itemRow.getAs(type)).toList());

    Optional.ofNullable(javaCollection)
        .orElseGet(Collections::emptyList)
        .forEach(m -> {
            GenericRowWithSchema genericRowWithSchema = (GenericRowWithSchema) m;
            String level = genericRowWithSchema.getAs("level");
            int value = genericRowWithSchema.getAs("count");
            MetricCount mc = getMetricCount(level, value, type);
            if (mc != null) {
                collectorItemMetricDetail.setStrategy(getCollectionStrategy());
                collectorItemMetricDetail.addCollectorItemMetricCount(timeWindowDt, mc);
                collectorItemMetricDetail.setLastScanDate(timeWindowDt);
            }
        });
}
 
Example #8
Source File: Spark.java    From tinkerpop with Apache License 2.0 6 votes vote down vote up
public static void refresh() {
    if (null == CONTEXT)
        throw new IllegalStateException("The Spark context has not been created.");
    if (CONTEXT.isStopped())
        recreateStopped();

    final Set<String> keepNames = new HashSet<>();
    for (final RDD<?> rdd : JavaConversions.asJavaIterable(CONTEXT.persistentRdds().values())) {
        if (null != rdd.name()) {
            keepNames.add(rdd.name());
            NAME_TO_RDD.put(rdd.name(), rdd);
        }
    }
    // remove all stale names in the NAME_TO_RDD map
    NAME_TO_RDD.keySet().stream().filter(key -> !keepNames.contains(key)).collect(Collectors.toList()).forEach(NAME_TO_RDD::remove);
}
 
Example #9
Source File: KafkaAssignmentGenerator.java    From kafka-assigner with Apache License 2.0 6 votes vote down vote up
private static Set<Integer> brokerHostnamesToBrokerIds(
        ZkUtils zkUtils, Set<String> brokerHostnameSet, boolean checkPresence) {
    List<Broker> brokers = JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster());
    Set<Integer> brokerIdSet = Sets.newHashSet();
    for (Broker broker : brokers) {
        BrokerEndPoint endpoint = broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT);
        if (brokerHostnameSet.contains(endpoint.host())) {
            brokerIdSet.add(broker.id());
        }
    }
    Preconditions.checkArgument(!checkPresence ||
            brokerHostnameSet.size() == brokerIdSet.size(),
            "Some hostnames could not be found! We found: " + brokerIdSet);

    return brokerIdSet;
}
 
Example #10
Source File: BrokerServiceImpl.java    From kafka-eagle with Apache License 2.0 6 votes vote down vote up
/** Get topic list {@link #topicList()} include cgroups from zookeeper. */
public List<String> topicList(String clusterAlias) {
	List<String> topics = new ArrayList<>();
	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.sasl.cgroup.enable")) {
		topics = SystemConfigUtils.getPropertyArrayList(clusterAlias + ".kafka.eagle.sasl.cgroup.topics", ",");
	} else if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.ssl.cgroup.enable")) {
		topics = SystemConfigUtils.getPropertyArrayList(clusterAlias + ".kafka.eagle.ssl.cgroup.topics", ",");
	} else {
		KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
		try {
			if (zkc.pathExists(BROKER_TOPICS_PATH)) {
				Seq<String> subBrokerTopicsPaths = zkc.getChildren(BROKER_TOPICS_PATH);
				topics = JavaConversions.seqAsJavaList(subBrokerTopicsPaths);
				excludeTopic(topics);
			}
		} catch (Exception e) {
			LOG.error("Get topic list has error, msg is " + e.getCause().getMessage());
			e.printStackTrace();
		}
		if (zkc != null) {
			kafkaZKPool.release(clusterAlias, zkc);
			zkc = null;
		}
	}
	return topics;
}
 
Example #11
Source File: Topicutil.java    From dk-fitting with Apache License 2.0 6 votes vote down vote up
/**
     * 查询所有topic,包括已经被标记删除,还没有删除的topic。
     * @return topic的list
     */
    public static List<String> queryAllTopic(){
        ZkUtils zkUtils = ZkUtils.apply(zkUrl, sessionTimeout, connectionTimeout, JaasUtils.isZkSecurityEnabled());
        ArrayList<String> topics = new ArrayList<String>();
//        AdminUtils.topicExists()
        scala.collection.Map<String, Properties> stringPropertiesMap = AdminUtils.fetchAllTopicConfigs(zkUtils);
        Map<String, Properties> javaMap = JavaConversions.mapAsJavaMap(stringPropertiesMap);
        Iterator<String> iterator = javaMap.keySet().iterator();
        while(iterator.hasNext()){
            String key = iterator.next();
            Properties properties = javaMap.get(key);
            topics.add(key);
        }
        zkUtils.close();
        return  topics;
    }
 
Example #12
Source File: BeansInitializer.java    From gsn with GNU General Public License v3.0 6 votes vote down vote up
public static AddressBean address(WrapperConf w){
    KeyValueImp [] p=new KeyValueImp[w.params().size()];
    Iterable<String> keys=JavaConversions.asJavaIterable(w.params().keys());
    int i=0;
 for (String k:keys){
  p[i]=new KeyValueImp(k,w.params().apply(k));
  i++;
 }
    AddressBean a = new AddressBean(w.wrapper(),p);
    if(w.partialKey().isDefined()){
    a.setPartialOrderKey(w.partialKey().get());
    }
    DataField [] out=new DataField[(w.output().size())];
 for (int j=0;j<out.length;j++){
  out[j]=dataField(w.output().apply(j));
 }
    a.setVsconfig(out);
 return a;
}
 
Example #13
Source File: TopicPartitionCountObserver.java    From uReplicator with Apache License 2.0 6 votes vote down vote up
private void updateTopicPartitionInfoMap(final Set<String> topicsToCheck) {
  if (topicsToCheck.size() > 0) {
    // get topic partition count and maybe update partition counts for existing topics
    scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignmentForTopics =
        zkUtils.getPartitionAssignmentForTopics(JavaConversions.asScalaBuffer(ImmutableList.copyOf(topicsToCheck)));

    for (String topic : topicsToCheck) {
      try {
        topicPartitionMap.put(topic, partitionAssignmentForTopics.get(topic).get().size());
      } catch (Exception e) {
        logger.warn("Failed to get topicPartition info for topic={} of zkPath={}",
            topic, zkPath, e);
      }
    }
  }
}
 
Example #14
Source File: KafkaHubServiceImpl.java    From kafka-eagle with Apache License 2.0 6 votes vote down vote up
private File createKafkaTempJson(Map<TopicPartition, Seq<Object>> tuple) throws IOException {
	JSONObject object = new JSONObject();
	object.put("version", 1);
	JSONArray array = new JSONArray();
	for (Entry<TopicPartition, Seq<Object>> entry : JavaConversions.mapAsJavaMap(tuple).entrySet()) {
		List<Object> replicas = JavaConversions.seqAsJavaList(entry.getValue());
		JSONObject tpObject = new JSONObject();
		tpObject.put("topic", entry.getKey().topic());
		tpObject.put("partition", entry.getKey().partition());
		tpObject.put("replicas", replicas);
		array.add(tpObject);
	}
	object.put("partitions", array);
	File f = File.createTempFile("ke_reassignment_", ".json");
	FileWriter out = new FileWriter(f);
	out.write(object.toJSONString());
	out.close();
	f.deleteOnExit();
	return f;
}
 
Example #15
Source File: KafkaStoreUtils.java    From data-highway with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings({ "rawtypes", "unchecked" })
private static void verifyTopic(ZkUtils zkUtils, String topic) {
  Set topics = new HashSet();
  topics.add(topic);

  // check # partition and the replication factor
  scala.collection.mutable.Map partitionAssignmentForTopics = zkUtils
      .getPartitionAssignmentForTopics(JavaConversions.asScalaSet(topics).toSeq());
  scala.collection.Map partitionAssignment = (scala.collection.Map) partitionAssignmentForTopics.get(topic).get();

  if (partitionAssignment.size() != 1) {
    throw new RuntimeException(String.format("The schema topic %s should have only 1 partition.", topic));
  }

  // check the retention policy
  Properties prop = AdminUtils.fetchEntityConfig(zkUtils, ConfigType.Topic(), topic);
  String retentionPolicy = prop.getProperty(LogConfig.CleanupPolicyProp());
  if (retentionPolicy == null || "compact".compareTo(retentionPolicy) != 0) {
    throw new RuntimeException(String.format("The retention policy of the schema topic %s must be compact.", topic));
  }
}
 
Example #16
Source File: FilterRowPredicate.java    From components with Apache License 2.0 5 votes vote down vote up
private List<Object> getInputFields(IndexedRecord inputRecord, String columnName) {
    // Adapt non-avpath syntax to avpath.
    // TODO: This should probably not be automatic, use the actual syntax.
    if (!columnName.startsWith("."))
        columnName = "." + columnName;
    Try<scala.collection.immutable.List<Evaluator.Ctx>> result = wandou.avpath.package$.MODULE$.select(inputRecord,
            columnName);
    List<Object> values = new ArrayList<Object>();
    if (result.isSuccess()) {
        for (Evaluator.Ctx ctx : JavaConversions.asJavaCollection(result.get())) {
            values.add(ctx.value());
        }
    } else {
        // Evaluating the expression failed, and we can handle the exception.
        Throwable t = result.failed().get();
        throw ProcessingErrorCode.createAvpathSyntaxError(t, columnName, -1);
    }
    return values;
}
 
Example #17
Source File: KafkaInfos.java    From DCMonitor with MIT License 5 votes vote down vote up
public List<String> getTopics() {
  try {
    return JavaConversions.asJavaList(ZkUtils.getAllTopics(zkClient));
  } catch (Exception e) {
    log.error(e, "could not get topics");
    return Collections.emptyList();
  }
}
 
Example #18
Source File: KafkaBrokerTopicObserver.java    From uReplicator with Apache License 2.0 5 votes vote down vote up
@Override
public void handleChildChange(String parentPath, List<String> currentChilds)
    throws Exception {
  if (!tryToRefreshCache()) {
    synchronized (_lock) {
      currentChilds.removeAll(KAFKA_INNER_TOPICS);
      LOGGER.info("starting to refresh topic list due to zk child change, currentChilds : {}", currentChilds);
      Set<String> newAddedTopics = new HashSet<>(currentChilds);
      Set<String> currentServingTopics = getAllTopics();
      newAddedTopics.removeAll(currentServingTopics);
      for (String existedTopic : currentServingTopics) {
        if (!currentChilds.contains(existedTopic)) {
          _topicPartitionInfoMap.remove(existedTopic);
        }
      }
      scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignmentForTopics =
          _zkUtils.getPartitionAssignmentForTopics(
              JavaConversions.asScalaBuffer(ImmutableList.copyOf(newAddedTopics)));

      for (String topic : newAddedTopics) {
        try {
          scala.collection.Map<Object, Seq<Object>> partitionsMap =
              partitionAssignmentForTopics.get(topic).get();
          TopicPartition tp = new TopicPartition(topic, partitionsMap.size());
          _topicPartitionInfoMap.put(topic, tp);
        } catch (Exception e) {
          LOGGER.warn("Failed to get topicPartition info for {} from kafka zk: {}", topic, e);
        }
      }
      LOGGER.info("added {} new topics to topic list in zk child change", newAddedTopics.size());
      _kafkaTopicsCounter.inc(_topicPartitionInfoMap.size() - _kafkaTopicsCounter.getCount());
    }
  }
}
 
Example #19
Source File: KafkaManager.java    From spark-streaming-direct-kafka with Apache License 2.0 5 votes vote down vote up
private static void fillInLatestOffsets(Map<TopicAndPartition, Long> offsets, Map<String, String> kafkaParams) {
    if (offsets.containsValue(null)) {

        Set<TopicAndPartition> needOffset = offsets.entrySet().stream().filter(entry -> entry.getValue() == null)
                .map(Map.Entry::getKey).collect(Collectors.toSet());
        log.info("No initial offsets for " + needOffset + " reading from Kafka");

        // The high price of calling private Scala stuff:
        @SuppressWarnings("unchecked")
        scala.collection.immutable.Map<String, String> kafkaParamsScalaMap =
                (scala.collection.immutable.Map<String, String>)
                        scala.collection.immutable.Map$.MODULE$.apply(JavaConversions.mapAsScalaMap(kafkaParams)
                                .toSeq());
        @SuppressWarnings("unchecked")
        scala.collection.immutable.Set<TopicAndPartition> needOffsetScalaSet =
                (scala.collection.immutable.Set<TopicAndPartition>)
                        scala.collection.immutable.Set$.MODULE$.apply(JavaConversions.asScalaSet(needOffset)
                                .toSeq());

        KafkaCluster kc = new KafkaCluster(kafkaParamsScalaMap);
        Map<TopicAndPartition, ?> leaderOffsets =
                JavaConversions.mapAsJavaMap(kc.getLatestLeaderOffsets(needOffsetScalaSet).right().get());
        leaderOffsets.forEach((tAndP, leaderOffsetsObj) -> {
            // Can't reference LeaderOffset class, so, hack away:
            Matcher m = Pattern.compile("LeaderOffset\\([^,]+,[^,]+,([^)]+)\\)").matcher(leaderOffsetsObj
                    .toString());
            Preconditions.checkState(m.matches());
            offsets.put(tAndP, Long.valueOf(m.group(1)));
        });
    }
}
 
Example #20
Source File: SourceRDD.java    From beam with Apache License 2.0 5 votes vote down vote up
@Override
public scala.collection.Iterator<scala.Tuple2<Source<T>, CheckpointMarkT>> compute(
    Partition split, TaskContext context) {
  @SuppressWarnings("unchecked")
  CheckpointableSourcePartition<T, CheckpointMarkT> partition =
      (CheckpointableSourcePartition<T, CheckpointMarkT>) split;
  scala.Tuple2<Source<T>, CheckpointMarkT> tuple2 =
      new scala.Tuple2<>(partition.getSource(), partition.checkpointMark);
  return JavaConversions.asScalaIterator(Collections.singleton(tuple2).iterator());
}
 
Example #21
Source File: ZKOffsetGetter.java    From kmanager with Apache License 2.0 5 votes vote down vote up
@Override
public Map<String, List<String>> getActiveTopicMap() {
	Map<String, List<String>> topicGroupsMap = new HashMap<String, List<String>>();
	List<String> consumers = ZKUtils.getChildren(ZkUtils.ConsumersPath());
	for (String consumer : consumers) {
		Map<String, scala.collection.immutable.List<ConsumerThreadId>> consumer_consumerThreadId = null;
		try {
			consumer_consumerThreadId = JavaConversions
					.mapAsJavaMap(ZKUtils.getZKUtilsFromKafka().getConsumersPerTopic(consumer, true));
		} catch (Exception e) {
			LOG.warn("getActiveTopicMap-> getConsumersPerTopic for group: " + consumer + "failed! "
					+ e.getMessage());
			// TODO /consumers/{group}/ids/{id} 节点的内容不符合要求。这个group有问题
			continue;
		}
		Set<String> topics = consumer_consumerThreadId.keySet();
		topics.forEach(topic -> {
			List<String> _groups = null;
			if (topicGroupsMap.containsKey(topic)) {
				_groups = topicGroupsMap.get(topic);
				_groups.add(consumer);
			} else {
				_groups = new ArrayList<String>();
				_groups.add(consumer);
			}
			topicGroupsMap.put(topic, _groups);
		});
	}
	return topicGroupsMap;
}
 
Example #22
Source File: EncoderHelpers.java    From beam with Apache License 2.0 5 votes vote down vote up
/**
 * Wrap a Beam coder into a Spark Encoder using Catalyst Expression Encoders (which uses java code
 * generation).
 */
public static <T> Encoder<T> fromBeamCoder(Coder<T> coder) {
  Class<? super T> clazz = coder.getEncodedTypeDescriptor().getRawType();
  ClassTag<T> classTag = ClassTag$.MODULE$.apply(clazz);
  List<Expression> serializers =
      Collections.singletonList(
          new EncodeUsingBeamCoder<>(new BoundReference(0, new ObjectType(clazz), true), coder));

  return new ExpressionEncoder<>(
      SchemaHelpers.binarySchema(),
      false,
      JavaConversions.collectionAsScalaIterable(serializers).toSeq(),
      new DecodeUsingBeamCoder<>(
          new Cast(new GetColumnByOrdinal(0, BinaryType), BinaryType), classTag, coder),
      classTag);
}
 
Example #23
Source File: KafkaAssignmentGenerator.java    From kafka-assigner with Apache License 2.0 5 votes vote down vote up
private static void printCurrentAssignment(ZkUtils zkUtils, List<String> specifiedTopics) {
    Seq<String> topics = specifiedTopics != null ?
            JavaConversions.iterableAsScalaIterable(specifiedTopics).toSeq() :
            zkUtils.getAllTopics();
    System.out.println("CURRENT ASSIGNMENT:");
    System.out.println(
            zkUtils.formatAsReassignmentJson(zkUtils.getReplicaAssignmentForTopics(
                    topics)));
}
 
Example #24
Source File: KafkaPartitionLevelConsumerTest.java    From incubator-pinot with Apache License 2.0 5 votes vote down vote up
@Override
public TopicMetadataResponse send(TopicMetadataRequest request) {
  java.util.List<String> topics = request.topics();
  TopicMetadata[] topicMetadataArray = new TopicMetadata[topics.size()];

  for (int i = 0; i < topicMetadataArray.length; i++) {
    String topic = topics.get(i);
    if (!topic.equals(topicName)) {
      topicMetadataArray[i] = new TopicMetadata(topic, null, Errors.UNKNOWN_TOPIC_OR_PARTITION.code());
    } else {
      PartitionMetadata[] partitionMetadataArray = new PartitionMetadata[partitionCount];
      for (int j = 0; j < partitionCount; j++) {
        java.util.List<BrokerEndPoint> emptyJavaList = Collections.emptyList();
        List<BrokerEndPoint> emptyScalaList = JavaConversions.asScalaBuffer(emptyJavaList).toList();
        partitionMetadataArray[j] =
            new PartitionMetadata(j, Some.apply(brokerArray[partitionLeaderIndices[j]]), emptyScalaList,
                emptyScalaList, Errors.NONE.code());
      }

      Seq<PartitionMetadata> partitionsMetadata = List.fromArray(partitionMetadataArray);
      topicMetadataArray[i] = new TopicMetadata(topic, partitionsMetadata, Errors.NONE.code());
    }
  }

  Seq<BrokerEndPoint> brokers = List.fromArray(brokerArray);
  Seq<TopicMetadata> topicsMetadata = List.fromArray(topicMetadataArray);

  return new TopicMetadataResponse(new kafka.api.TopicMetadataResponse(brokers, topicsMetadata, -1));
}
 
Example #25
Source File: SparkSqlInterpreter.java    From Explorer with Apache License 2.0 5 votes vote down vote up
private int[] getProgressFromStage_1_1x(JobProgressListener sparkListener, Stage stage) {
    int numTasks = stage.numTasks();
    int completedTasks = 0;

    try {
        Method stageIdToData = sparkListener.getClass().getMethod("stageIdToData");
        HashMap<Tuple2<Object, Object>, Object> stageIdData = (HashMap<Tuple2<Object, Object>, Object>) stageIdToData
                .invoke(sparkListener);
        Class<?> stageUIDataClass = this.getClass().forName("org.apache.spark.ui.jobs.UIData$StageUIData");

        Method numCompletedTasks = stageUIDataClass.getMethod("numCompleteTasks");

        Set<Tuple2<Object, Object>> keys = JavaConverters.asJavaSetConverter(stageIdData.keySet()).asJava();
        for (Tuple2<Object, Object> k : keys) {
            if (stage.id() == (int) k._1()) {
                Object uiData = stageIdData.get(k).get();
                completedTasks += (int) numCompletedTasks.invoke(uiData);
            }
        }
    } catch (Exception e) {
        logger.error("Error on getting progress information", e);
    }

    List<Stage> parents = JavaConversions.asJavaList(stage.parents());
    if (parents != null) {
        for (Stage s : parents) {
            int[] p = getProgressFromStage_1_1x(sparkListener, s);
            numTasks += p[0];
            completedTasks += p[1];
        }
    }
    return new int[] { numTasks, completedTasks };
}
 
Example #26
Source File: TestKryo.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testScalaCollections() {
    //Scala collections should already work with Spark + kryo; some very basic tests to check this is still the case
    SerializerInstance si = sc.env().serializer().newInstance();

    scala.collection.immutable.Map<Integer, String> emptyImmutableMap =
                    scala.collection.immutable.Map$.MODULE$.empty();
    testSerialization(emptyImmutableMap, si);

    Map<Integer, Double> m = new HashMap<>();
    m.put(0, 1.0);

    scala.collection.Map<Integer, Double> m2 = JavaConversions.mapAsScalaMap(m);
    testSerialization(m2, si);
}
 
Example #27
Source File: KafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 5 votes vote down vote up
/**
 * Obtaining metadata in zookeeper by topic.
 *
 * @param topic
 *            Selected condition.
 * @return List.
 */
public List<String> findTopicPartition(String clusterAlias, String topic) {
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	Seq<String> brokerTopicsPaths = zkc.getChildren(BROKER_TOPICS_PATH + "/" + topic + "/partitions");
	List<String> topicAndPartitions = JavaConversions.seqAsJavaList(brokerTopicsPaths);
	if (zkc != null) {
		kafkaZKPool.release(clusterAlias, zkc);
		zkc = null;
		brokerTopicsPaths = null;
	}
	return topicAndPartitions;
}
 
Example #28
Source File: SparkUtils.java    From spliceengine with GNU Affero General Public License v3.0 5 votes vote down vote up
/**
 * Convert Sort Columns, convert to 0-based index
 * @param sortColumns
 * @return
 */


public static scala.collection.mutable.Buffer<Column> convertSortColumns(ColumnOrdering[] sortColumns){
    return Arrays
            .stream(sortColumns)
            .map(column -> column.getIsAscending() ?
                    (column.getIsNullsOrderedLow() ? asc_nulls_first(ValueRow.getNamedColumn(column.getColumnId()-1)) :
                                                     asc_nulls_last(ValueRow.getNamedColumn(column.getColumnId()-1))) :
                    (column.getIsNullsOrderedLow() ? desc_nulls_last(ValueRow.getNamedColumn(column.getColumnId()-1)) :
                                                     desc_nulls_first(ValueRow.getNamedColumn(column.getColumnId()-1))))
            .collect(Collectors.collectingAndThen(Collectors.toList(), JavaConversions::asScalaBuffer));
}
 
Example #29
Source File: RangePartitionCoalescerUnitTest.java    From gatk with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Test
public void testNonIdentity() {
    List<Integer> maxEndPartitionIndexes = ImmutableList.of(1, 2, 2);
    RangePartitionCoalescer coalescer = new RangePartitionCoalescer(maxEndPartitionIndexes);
    PartitionGroup[] groups = coalescer.coalesce(rdd.getNumPartitions(), rdd.rdd());
    assertEquals(groups.length, 3);
    assertEquals(groups[0].partitions(), JavaConversions.asScalaBuffer(ImmutableList.of(partitions[0], partitions[1])));
    assertEquals(groups[1].partitions(), JavaConversions.asScalaBuffer(ImmutableList.of(partitions[1], partitions[2])));
    assertEquals(groups[2].partitions(), JavaConversions.asScalaBuffer(ImmutableList.of(partitions[2])));
}
 
Example #30
Source File: KafkaTopicConfigProvider.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Override
public Map<String, Properties> allTopicConfigs() {
  KafkaZkClient kafkaZkClient = KafkaCruiseControlUtils.createKafkaZkClient(_connectString,
                                                                            ZK_KAFKA_TOPIC_CONFIG_PROVIDER_METRIC_GROUP,
                                                                            ZK_KAFKA_TOPIC_CONFIG_PROVIDER_METRIC_TYPE,
                                                                            _zkSecurityEnabled);
  try {
    AdminZkClient adminZkClient = new AdminZkClient(kafkaZkClient);
    return JavaConversions.mapAsJavaMap(adminZkClient.getAllTopicConfigs());
  } finally {
    KafkaCruiseControlUtils.closeKafkaZkClientWithTimeout(kafkaZkClient);
  }
}