Java Code Examples for scala.collection.JavaConversions

The following examples show how to use scala.collection.JavaConversions. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: data-highway   Source File: KafkaStoreUtils.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings({ "rawtypes", "unchecked" })
private static void verifyTopic(ZkUtils zkUtils, String topic) {
  Set topics = new HashSet();
  topics.add(topic);

  // check # partition and the replication factor
  scala.collection.mutable.Map partitionAssignmentForTopics = zkUtils
      .getPartitionAssignmentForTopics(JavaConversions.asScalaSet(topics).toSeq());
  scala.collection.Map partitionAssignment = (scala.collection.Map) partitionAssignmentForTopics.get(topic).get();

  if (partitionAssignment.size() != 1) {
    throw new RuntimeException(String.format("The schema topic %s should have only 1 partition.", topic));
  }

  // check the retention policy
  Properties prop = AdminUtils.fetchEntityConfig(zkUtils, ConfigType.Topic(), topic);
  String retentionPolicy = prop.getProperty(LogConfig.CleanupPolicyProp());
  if (retentionPolicy == null || "compact".compareTo(retentionPolicy) != 0) {
    throw new RuntimeException(String.format("The retention policy of the schema topic %s must be compact.", topic));
  }
}
 
Example 2
Source Project: dk-fitting   Source File: Topicutil.java    License: Apache License 2.0 6 votes vote down vote up
/**
     * 查询所有topic,包括已经被标记删除,还没有删除的topic。
     * @return topic的list
     */
    public static List<String> queryAllTopic(){
        ZkUtils zkUtils = ZkUtils.apply(zkUrl, sessionTimeout, connectionTimeout, JaasUtils.isZkSecurityEnabled());
        ArrayList<String> topics = new ArrayList<String>();
//        AdminUtils.topicExists()
        scala.collection.Map<String, Properties> stringPropertiesMap = AdminUtils.fetchAllTopicConfigs(zkUtils);
        Map<String, Properties> javaMap = JavaConversions.mapAsJavaMap(stringPropertiesMap);
        Iterator<String> iterator = javaMap.keySet().iterator();
        while(iterator.hasNext()){
            String key = iterator.next();
            Properties properties = javaMap.get(key);
            topics.add(key);
        }
        zkUtils.close();
        return  topics;
    }
 
Example 3
Source Project: ExecDashboard   Source File: UnitTestCoverageCollector.java    License: Apache License 2.0 6 votes vote down vote up
private void updateCollectorItemMetricDetail(CollectorItemMetricDetail collectorItemMetricDetail, Row itemRow) {
    Date timeWindowDt = itemRow.getAs("timeWindow");
    List<String> scaMetricList = Arrays.asList("coverage");
    Collection<Object> javaCollection = JavaConversions.asJavaCollection(((WrappedArray) itemRow.getAs("metrics")).toList());

    Optional.ofNullable(javaCollection)
            .orElseGet(Collections::emptyList)
            .forEach(m -> {
                GenericRowWithSchema genericRowWithSchema = (GenericRowWithSchema) m;
                String existingLabelName = genericRowWithSchema.getAs("name");
                    if (scaMetricList.contains(existingLabelName)) {
                        String valueStr = genericRowWithSchema.getAs("value");
                        try{
                        double value = Double.parseDouble(valueStr);
                        MetricCount mc = getMetricCount("", value, "unit-test-coverage");
                        if (mc != null) {
                            collectorItemMetricDetail.setStrategy(getCollectionStrategy());
                            collectorItemMetricDetail.addCollectorItemMetricCount(timeWindowDt, mc);
                            collectorItemMetricDetail.setLastScanDate(timeWindowDt);
                        }
                        }catch (Exception e){
                            LOGGER.info("Exception: Not a number, 'value' = "+valueStr,e);
                        }
                    }
            });
}
 
Example 4
Source Project: ExecDashboard   Source File: LibraryPolicyCollector.java    License: Apache License 2.0 6 votes vote down vote up
private void updateCollectorItemMetricDetail(CollectorItemMetricDetail collectorItemMetricDetail, Row itemRow, String type) {
    Date timeWindowDt = itemRow.getAs("timeWindow");
    Collection<Object> javaCollection = JavaConversions.asJavaCollection(((WrappedArray) itemRow.getAs(type)).toList());

    Optional.ofNullable(javaCollection)
        .orElseGet(Collections::emptyList)
        .forEach(m -> {
            GenericRowWithSchema genericRowWithSchema = (GenericRowWithSchema) m;
            String level = genericRowWithSchema.getAs("level");
            int value = genericRowWithSchema.getAs("count");
            MetricCount mc = getMetricCount(level, value, type);
            if (mc != null) {
                collectorItemMetricDetail.setStrategy(getCollectionStrategy());
                collectorItemMetricDetail.addCollectorItemMetricCount(timeWindowDt, mc);
                collectorItemMetricDetail.setLastScanDate(timeWindowDt);
            }
        });
}
 
Example 5
Source Project: SkaETL   Source File: KafkaUnit.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * @return All topic names
 */
public List<String> listTopics() {
    String zookeeperHost = zookeeperString;
    Boolean isSucre = false;
    int sessionTimeoutMs = 200000;
    int connectionTimeoutMs = 15000;
    int maxInFlightRequests = 10;
    Time time = Time.SYSTEM;
    String metricGroup = "myGroup";
    String metricType = "myType";
    KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperHost,isSucre,sessionTimeoutMs,
            connectionTimeoutMs,maxInFlightRequests,time,metricGroup,metricType);
    AdminZkClient adminZkClient = new AdminZkClient(zkClient);
    try {
        // run
        LOGGER.info("Executing: ListTopics ");

            return JavaConversions.asJavaCollection(adminZkClient.getAllTopicConfigs().keys())
                    .stream()
                    .collect(Collectors.toList());

    } finally {
        zkClient.close();
    }
}
 
Example 6
Source Project: systemds   Source File: MLContextConversionUtil.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Convert a {@code MatrixObject} to a {@code RDD<String>} in IJV format.
 *
 * @param matrixObject
 *            the {@code MatrixObject}
 * @return the {@code MatrixObject} converted to a {@code RDD<String>}
 */
public static RDD<String> matrixObjectToRDDStringIJV(MatrixObject matrixObject) {

	// NOTE: The following works when called from Java but does not
	// currently work when called from Spark Shell (when you call
	// collect() on the RDD<String>).
	//
	// JavaRDD<String> javaRDD = jsc.parallelize(list);
	// RDD<String> rdd = JavaRDD.toRDD(javaRDD);
	//
	// Therefore, we call parallelize() on the SparkContext rather than
	// the JavaSparkContext to produce the RDD<String> for Scala.

	List<String> list = matrixObjectToListStringIJV(matrixObject);

	ClassTag<String> tag = scala.reflect.ClassTag$.MODULE$.apply(String.class);
	return sc().parallelize(JavaConversions.asScalaBuffer(list), sc().defaultParallelism(), tag);
}
 
Example 7
@Override
public PartitionGroup[] coalesce(int maxPartitions, RDD<?> parent) {
    if (maxPartitions != parent.getNumPartitions()) {
        throw new IllegalArgumentException("Cannot use " + getClass().getSimpleName() +
                " with a different number of partitions to the parent RDD.");
    }
    List<Partition> partitions = Arrays.asList(parent.getPartitions());
    PartitionGroup[] groups = new PartitionGroup[partitions.size()];

    for (int i = 0; i < partitions.size(); i++) {
        Seq<String> preferredLocations = parent.getPreferredLocations(partitions.get(i));
        scala.Option<String> preferredLocation = scala.Option.apply
                (preferredLocations.isEmpty() ? null : preferredLocations.apply(0));
        PartitionGroup group = new PartitionGroup(preferredLocation);
        List<Partition> partitionsInGroup =
                partitions.subList(i, maxEndPartitionIndexes.get(i) + 1);
        group.partitions().append(JavaConversions.asScalaBuffer(partitionsInGroup));
        groups[i] = group;
    }
    return groups;
}
 
Example 8
Source Project: samza   Source File: SamzaExecutor.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public List<String> listTables(ExecutionContext context) throws ExecutorException {
  String address = environmentVariableHandler.getEnvironmentVariable(SAMZA_SQL_SYSTEM_KAFKA_ADDRESS);
  if (address == null || address.isEmpty()) {
    address = DEFAULT_SERVER_ADDRESS;
  }
  try {
    ZkUtils zkUtils = new ZkUtils(new ZkClient(address, DEFAULT_ZOOKEEPER_CLIENT_TIMEOUT),
        new ZkConnection(address), false);
    return JavaConversions.seqAsJavaList(zkUtils.getAllTopics())
      .stream()
      .map(x -> SAMZA_SYSTEM_KAFKA + "." + x)
      .collect(Collectors.toList());
  } catch (ZkTimeoutException ex) {
    throw new ExecutorException(ex);
  }
}
 
Example 9
Source Project: kafka-assigner   Source File: KafkaAssignmentGenerator.java    License: Apache License 2.0 6 votes vote down vote up
private static Set<Integer> brokerHostnamesToBrokerIds(
        ZkUtils zkUtils, Set<String> brokerHostnameSet, boolean checkPresence) {
    List<Broker> brokers = JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster());
    Set<Integer> brokerIdSet = Sets.newHashSet();
    for (Broker broker : brokers) {
        BrokerEndPoint endpoint = broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT);
        if (brokerHostnameSet.contains(endpoint.host())) {
            brokerIdSet.add(broker.id());
        }
    }
    Preconditions.checkArgument(!checkPresence ||
            brokerHostnameSet.size() == brokerIdSet.size(),
            "Some hostnames could not be found! We found: " + brokerIdSet);

    return brokerIdSet;
}
 
Example 10
Source Project: gsn   Source File: BeansInitializer.java    License: GNU General Public License v3.0 6 votes vote down vote up
public static StreamSource source(SourceConf sc){
 StreamSource s = new StreamSource();
 s.setAlias(sc.alias());
 s.setSqlQuery(sc.query());
 if (sc.slide().isDefined())
  s.setRawSlideValue(sc.slide().get());
 if (sc.samplingRate().isDefined())
  s.setSamplingRate(((Double)sc.samplingRate().get()).floatValue());
 if (sc.disconnectBufferSize().isDefined())
  s.setDisconnectedBufferSize(((Integer)sc.disconnectBufferSize().get()));
 if (sc.storageSize().isDefined())
  s.setRawHistorySize(sc.storageSize().get());
 AddressBean[] add=new AddressBean[sc.wrappers().size()];
 int i=0;
 for (WrapperConf w:JavaConversions.asJavaIterable(sc.wrappers())){
  add[i]=address(w);
  i++;
 }
 s.setAddressing(add);
 return s;
}
 
Example 11
Source Project: kafka-eagle   Source File: KafkaHubServiceImpl.java    License: Apache License 2.0 6 votes vote down vote up
private File createKafkaTempJson(Map<TopicPartition, Seq<Object>> tuple) throws IOException {
	JSONObject object = new JSONObject();
	object.put("version", 1);
	JSONArray array = new JSONArray();
	for (Entry<TopicPartition, Seq<Object>> entry : JavaConversions.mapAsJavaMap(tuple).entrySet()) {
		List<Object> replicas = JavaConversions.seqAsJavaList(entry.getValue());
		JSONObject tpObject = new JSONObject();
		tpObject.put("topic", entry.getKey().topic());
		tpObject.put("partition", entry.getKey().partition());
		tpObject.put("replicas", replicas);
		array.add(tpObject);
	}
	object.put("partitions", array);
	File f = File.createTempFile("ke_reassignment_", ".json");
	FileWriter out = new FileWriter(f);
	out.write(object.toJSONString());
	out.close();
	f.deleteOnExit();
	return f;
}
 
Example 12
Source Project: kafka-eagle   Source File: BrokerServiceImpl.java    License: Apache License 2.0 6 votes vote down vote up
/** Get topic list {@link #topicList()} include cgroups from zookeeper. */
public List<String> topicList(String clusterAlias) {
	List<String> topics = new ArrayList<>();
	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.sasl.cgroup.enable")) {
		topics = SystemConfigUtils.getPropertyArrayList(clusterAlias + ".kafka.eagle.sasl.cgroup.topics", ",");
	} else if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.ssl.cgroup.enable")) {
		topics = SystemConfigUtils.getPropertyArrayList(clusterAlias + ".kafka.eagle.ssl.cgroup.topics", ",");
	} else {
		KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
		try {
			if (zkc.pathExists(BROKER_TOPICS_PATH)) {
				Seq<String> subBrokerTopicsPaths = zkc.getChildren(BROKER_TOPICS_PATH);
				topics = JavaConversions.seqAsJavaList(subBrokerTopicsPaths);
				excludeTopic(topics);
			}
		} catch (Exception e) {
			LOG.error("Get topic list has error, msg is " + e.getCause().getMessage());
			e.printStackTrace();
		}
		if (zkc != null) {
			kafkaZKPool.release(clusterAlias, zkc);
			zkc = null;
		}
	}
	return topics;
}
 
Example 13
Source Project: tinkerpop   Source File: Spark.java    License: Apache License 2.0 6 votes vote down vote up
public static void refresh() {
    if (null == CONTEXT)
        throw new IllegalStateException("The Spark context has not been created.");
    if (CONTEXT.isStopped())
        recreateStopped();

    final Set<String> keepNames = new HashSet<>();
    for (final RDD<?> rdd : JavaConversions.asJavaIterable(CONTEXT.persistentRdds().values())) {
        if (null != rdd.name()) {
            keepNames.add(rdd.name());
            NAME_TO_RDD.put(rdd.name(), rdd);
        }
    }
    // remove all stale names in the NAME_TO_RDD map
    NAME_TO_RDD.keySet().stream().filter(key -> !keepNames.contains(key)).collect(Collectors.toList()).forEach(NAME_TO_RDD::remove);
}
 
Example 14
Source Project: gsn   Source File: BeansInitializer.java    License: GNU General Public License v3.0 6 votes vote down vote up
public static AddressBean address(WrapperConf w){
    KeyValueImp [] p=new KeyValueImp[w.params().size()];
    Iterable<String> keys=JavaConversions.asJavaIterable(w.params().keys());
    int i=0;
 for (String k:keys){
  p[i]=new KeyValueImp(k,w.params().apply(k));
  i++;
 }
    AddressBean a = new AddressBean(w.wrapper(),p);
    if(w.partialKey().isDefined()){
    a.setPartialOrderKey(w.partialKey().get());
    }
    DataField [] out=new DataField[(w.output().size())];
 for (int j=0;j<out.length;j++){
  out[j]=dataField(w.output().apply(j));
 }
    a.setVsconfig(out);
 return a;
}
 
Example 15
Source Project: uReplicator   Source File: TopicPartitionCountObserver.java    License: Apache License 2.0 6 votes vote down vote up
private void updateTopicPartitionInfoMap(final Set<String> topicsToCheck) {
  if (topicsToCheck.size() > 0) {
    // get topic partition count and maybe update partition counts for existing topics
    scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignmentForTopics =
        zkUtils.getPartitionAssignmentForTopics(JavaConversions.asScalaBuffer(ImmutableList.copyOf(topicsToCheck)));

    for (String topic : topicsToCheck) {
      try {
        topicPartitionMap.put(topic, partitionAssignmentForTopics.get(topic).get().size());
      } catch (Exception e) {
        logger.warn("Failed to get topicPartition info for topic={} of zkPath={}",
            topic, zkPath, e);
      }
    }
  }
}
 
Example 16
Source Project: beam   Source File: EncoderHelpers.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Wrap a Beam coder into a Spark Encoder using Catalyst Expression Encoders (which uses java code
 * generation).
 */
public static <T> Encoder<T> fromBeamCoder(Coder<T> coder) {
  Class<? super T> clazz = coder.getEncodedTypeDescriptor().getRawType();
  ClassTag<T> classTag = ClassTag$.MODULE$.apply(clazz);
  List<Expression> serializers =
      Collections.singletonList(
          new EncodeUsingBeamCoder<>(new BoundReference(0, new ObjectType(clazz), true), coder));

  return new ExpressionEncoder<>(
      SchemaHelpers.binarySchema(),
      false,
      JavaConversions.collectionAsScalaIterable(serializers).toSeq(),
      new DecodeUsingBeamCoder<>(
          new Cast(new GetColumnByOrdinal(0, BinaryType), BinaryType), classTag, coder),
      classTag);
}
 
Example 17
Source Project: data-highway   Source File: KafkaAdminClient.java    License: Apache License 2.0 5 votes vote down vote up
private Map<Object, List<Object>> getPartitionInfo(String topic) {
  scala.collection.Map<String, scala.collection.Map<Object, scala.collection.Seq<Object>>> partitionAssignmentForTopics = zkUtils
      .getPartitionAssignmentForTopics(scala.collection.immutable.Nil$.MODULE$.$colon$colon(topic));
  scala.collection.Map<Object, scala.collection.Seq<Object>> topicPartitionAssignment = partitionAssignmentForTopics
      .iterator()
      .next()._2;

  return mapAsJavaMap(topicPartitionAssignment).entrySet().stream()
      .collect(Collectors.toMap(Map.Entry::getKey, e -> JavaConversions.seqAsJavaList(e.getValue())));
}
 
Example 18
Source Project: data-highway   Source File: KafkaBrokerMonitor.java    License: Apache License 2.0 5 votes vote down vote up
List<String> getBrokerIds() {
  return JavaConversions
      .seqAsJavaList(zkUtils.getAllBrokersInCluster())
      .stream()
      .map(Broker::id)
      .map(Object::toString)
      .collect(toList());
}
 
Example 19
Source Project: singer   Source File: OstrichAdminService.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("restriction")
public void start() {
  try {
    Properties properties = new Properties();
    properties.load(this.getClass().getResource("build.properties").openStream());
    LOG.info("build.properties build_revision: {}",
        properties.getProperty("build_revision", "unknown"));
  } catch (Throwable t) {
    LOG.warn("Failed to load properties from build.properties");
  }
  Duration[] defaultLatchIntervals = {Duration.apply(1, TimeUnit.MINUTES)};
  Iterator<Duration> durationIterator = Arrays.asList(defaultLatchIntervals).iterator();

  AdminServiceFactory adminServiceFactory = new AdminServiceFactory(
          this.port,
          20,
          List$.MODULE$.<StatsFactory>empty(),
          Option.<String>empty(),
          List$.MODULE$.<Regex>empty(),
          Map$.MODULE$.<String, CustomHttpHandler>empty(),
          JavaConversions.asScalaIterator(durationIterator).toList());

  RuntimeEnvironment runtimeEnvironment = new RuntimeEnvironment(this);
  AdminHttpService service = adminServiceFactory.apply(runtimeEnvironment);
  for (Map.Entry<String, CustomHttpHandler> entry : this.customHttpHandlerMap.entrySet()) {
    service.httpServer().createContext(entry.getKey(), entry.getValue());
  }
}
 
Example 20
Source Project: kafka-assigner   Source File: KafkaTopicAssigner.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Convert a Scala Kafka partition assignment into a Java one.
 * @param topicMap the output from ZkUtils#getPartitionAssignmentForTopics
 * @return a Java map representing the same data
 */
static Map<String, Map<Integer, List<Integer>>> topicMapToJavaMap(
        scala.collection.Map<String,
                scala.collection.Map<Object,
                        scala.collection.Seq<Object>>> topicMap) {
    // We can actually use utilities like Maps#transformEntries, but since that doesn't allow
    // changing the key type from Object to Integer, this code just goes into each map and makes
    // copies all the way down. Copying is also useful for avoiding possible repeated lazy
    // evaluations by the rebalancing algorithm.
    Map<String, Map<Integer, List<Integer>>> resultTopicMap = Maps.newHashMap();
    Map<String, scala.collection.Map<Object, scala.collection.Seq<Object>>> convertedTopicMap =
            JavaConversions.mapAsJavaMap(topicMap);
    for (Map.Entry<String, scala.collection.Map<Object,
            scala.collection.Seq<Object>>> topicMapEntry : convertedTopicMap.entrySet()) {
        String topic = topicMapEntry.getKey();
        Map<Object, scala.collection.Seq<Object>> convertedPartitionMap =
                JavaConversions.mapAsJavaMap(topicMapEntry.getValue());
        Map<Integer, List<Integer>> resultPartitionMap = Maps.newHashMap();
        for (Map.Entry<Object, scala.collection.Seq<Object>> partitionMapEntry :
                convertedPartitionMap.entrySet()) {
            Integer partition = (Integer) partitionMapEntry.getKey();
            List<Integer> replicaList = Lists.newArrayList(Lists.transform(
                    JavaConversions.seqAsJavaList(partitionMapEntry.getValue()),
                    new Function<Object, Integer>() {
                        @Override
                        public Integer apply(Object raw) {
                            return (Integer) raw;
                        }
                    }));
            resultPartitionMap.put(partition, replicaList);
        }
        resultTopicMap.put(topic, resultPartitionMap);
    }
    return resultTopicMap;
}
 
Example 21
Source Project: beam   Source File: EncoderHelpers.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ExprCode doGenCode(CodegenContext ctx, ExprCode ev) {
  String accessCode = ctx.addReferenceObj("coder", coder, coder.getClass().getName());
  ExprCode input = child.genCode(ctx);
  String javaType = CodeGenerator.javaType(dataType());

  List<String> parts = new ArrayList<>();
  List<Object> args = new ArrayList<>();
  /*
    CODE GENERATED
    final ${javaType} ${ev.value} = org.apache.beam.runners.spark.structuredstreaming.translation.helpers.EncoderHelpers.EncodeUsingBeamCoder.encode(${input.isNull()}, ${input.value}, ${coder});
  */
  parts.add("final ");
  args.add(javaType);
  parts.add(" ");
  args.add(ev.value());
  parts.add(
      " = org.apache.beam.runners.spark.structuredstreaming.translation.helpers.EncoderHelpers.EncodeUsingBeamCoder.encode(");
  args.add(input.isNull());
  parts.add(", ");
  args.add(input.value());
  parts.add(", ");
  args.add(accessCode);
  parts.add(");");

  StringContext sc =
      new StringContext(JavaConversions.collectionAsScalaIterable(parts).toSeq());
  Block code =
      (new Block.BlockHelper(sc)).code(JavaConversions.collectionAsScalaIterable(args).toSeq());

  return ev.copy(input.code().$plus(code), input.isNull(), ev.value());
}
 
Example 22
Source Project: tinkerpop   Source File: SparkTest.java    License: Apache License 2.0 5 votes vote down vote up
private static int getPersistedRDDSize() {
    int counter = 0;
    for (final RDD<?> rdd : JavaConversions.asJavaIterable(Spark.getContext().persistentRdds().values())) {
        if (null != rdd.name())
            counter++;
    }
    return counter;
}
 
Example 23
Source Project: kylin-on-parquet-v2   Source File: CubeMergeJob.java    License: Apache License 2.0 5 votes vote down vote up
private void mergeSegments(String cubeId, String segmentId) throws IOException {
    CubeManager mgr = CubeManager.getInstance(config);
    CubeInstance cube = mgr.getCubeByUuid(cubeId);
    CubeSegment mergedSeg = cube.getSegmentById(segmentId);
    SegmentInfo mergedSegInfo = ManagerHub.getSegmentInfo(config, getParam(MetadataConstants.P_CUBE_ID), mergedSeg.getUuid());

    Map<Long, DFLayoutMergeAssist> mergeCuboidsAssist = generateMergeAssist(mergingSegInfos, ss);
    for (DFLayoutMergeAssist assist : mergeCuboidsAssist.values()) {
        SpanningTree spanningTree = new ForestSpanningTree(JavaConversions.asJavaCollection(mergedSegInfo.toBuildLayouts()));
        Dataset<Row> afterMerge = assist.merge(config, cube.getName());
        LayoutEntity layout = assist.getLayout();

        Dataset<Row> afterSort;
        if (layout.isTableIndex()) {
            afterSort = afterMerge.sortWithinPartitions(NSparkCubingUtil.getColumns(layout.getOrderedDimensions().keySet()));
        } else {
            Column[] dimsCols = NSparkCubingUtil.getColumns(layout.getOrderedDimensions().keySet());
            Dataset<Row> afterAgg = CuboidAggregator.agg(ss, afterMerge, layout.getOrderedDimensions().keySet(),
                    layout.getOrderedMeasures(), spanningTree, false);
            afterSort = afterAgg.sortWithinPartitions(dimsCols);
        }
        buildLayoutWithUpdate.submit(new BuildLayoutWithUpdate.JobEntity() {
            @Override
            public String getName() {
                return "merge-layout-" + layout.getId();
            }

            @Override
            public LayoutEntity build() throws IOException {
                return saveAndUpdateCuboid(afterSort, mergedSegInfo, layout, assist);
            }
        }, config);

        buildLayoutWithUpdate.updateLayout(mergedSegInfo, config);
    }
}
 
Example 24
Source Project: Explorer   Source File: SparkInterpreter.java    License: Apache License 2.0 5 votes vote down vote up
private int[] getProgressFromStage_1_1x(JobProgressListener sparkListener, Stage stage) {
    int numTasks = stage.numTasks();
    int completedTasks = 0;

    try {
        Method stageIdToData = sparkListener.getClass().getMethod("stageIdToData");
        HashMap<Tuple2<Object, Object>, Object> stageIdData = (HashMap<Tuple2<Object, Object>, Object>) stageIdToData
                .invoke(sparkListener);
        Class<?> stageUIDataClass = this.getClass().forName("org.apache.spark.ui.jobs.UIData$StageUIData");

        Method numCompletedTasks = stageUIDataClass.getMethod("numCompleteTasks");

        Set<Tuple2<Object, Object>> keys = JavaConverters.asJavaSetConverter(stageIdData.keySet()).asJava();
        for (Tuple2<Object, Object> k : keys) {
            if (stage.id() == (int) k._1()) {
                Object uiData = stageIdData.get(k).get();
                completedTasks += (int) numCompletedTasks.invoke(uiData);
            }
        }
    } catch (Exception e) {
        logger.error("Error on getting progress information", e);
    }

    List<Stage> parents = JavaConversions.asJavaList(stage.parents());
    if (parents != null) {
        for (Stage s : parents) {
            int[] p = getProgressFromStage_1_1x(sparkListener, s);
            numTasks += p[0];
            completedTasks += p[1];
        }
    }
    return new int[] { numTasks, completedTasks };
}
 
Example 25
Source Project: ExecDashboard   Source File: PipelineCollector.java    License: Apache License 2.0 5 votes vote down vote up
private void updateCollectorItemMetricDetail(CollectorItemMetricDetail collectorItemMetricDetail, Row itemRow) {
    Date timeWindowDt = itemRow.getAs("timeWindow");
    LOGGER.info("TimeWindow:" +timeWindowDt );
    LOGGER.info("itemRow :" + itemRow);
    Collection<Object> javaCollection = JavaConversions.asJavaCollection(((WrappedArray) itemRow.getAs("prodStageList")).toList());

    Optional.ofNullable(javaCollection)
            .orElseGet(Collections::emptyList).stream().map(m -> (GenericRowWithSchema) m).forEach(genericRowWithSchema -> {
        Long pipelineTimeL = genericRowWithSchema.getAs("timestamp");
        Date dateObj = new Timestamp(new Date(pipelineTimeL).getTime());
        LOGGER.info("Date Object :" + dateObj);
        Long scmTimeL = genericRowWithSchema.getAs("scmCommitTimestamp");
        Long pipelineTimeAfterIgnore = pipelineTimeL/1000;
        Long scmTimeAfterIgnore = scmTimeL/1000;
        try {
            Long diffTimestamp = Math.abs(pipelineTimeAfterIgnore - scmTimeAfterIgnore);
            String strTimestampInsec = Long.toString(diffTimestamp);
            double value = Double.parseDouble(strTimestampInsec);
            MetricCount mc = getMetricCount("", value, "pipeline-lead-time");
            if (mc != null) {
                collectorItemMetricDetail.setStrategy(getCollectionStrategy());
                collectorItemMetricDetail.addCollectorItemMetricCount(dateObj, mc);
                collectorItemMetricDetail.setLastScanDate(dateObj);
            }
        } catch (NumberFormatException e) {
            LOGGER.info("Exception: Not a number, 'value' = " + scmTimeAfterIgnore, e);
        }
    });
}
 
Example 26
Source Project: micro-server   Source File: SwaggerInitializer.java    License: Apache License 2.0 5 votes vote down vote up
public SwaggerInitializer(ServerData serverData) {
	this.resourceClasses = JavaConversions.asScalaBuffer(
			serverData.getResources()
			.stream()
			.map(resource -> resource.getClass())
			.collect(Collectors.<Class<?>> toList())).toList();
	this.baseUrlPattern = serverData.getBaseUrlPattern();
}
 
Example 27
/**
 * Writes the supplied ACL information to ZK, where it will be picked up by the brokes authorizer.
 *
 * @param username    the who.
 * @param permission  the allow|deny.
 * @param resource    the thing
 * @param ops         the what.
 */
public void addUserAcl(final String username,
                       final AclPermissionType permission,
                       final Resource resource,
                       final Set<AclOperation> ops) {

  final KafkaPrincipal principal = new KafkaPrincipal("User", username);
  final PermissionType scalaPermission = PermissionType$.MODULE$.fromJava(permission);

  final Set<Acl> javaAcls = ops.stream()
      .map(Operation$.MODULE$::fromJava)
      .map(op -> new Acl(principal, scalaPermission, "*", op))
      .collect(Collectors.toSet());

  final scala.collection.immutable.Set<Acl> scalaAcls =
      JavaConversions.asScalaSet(javaAcls).toSet();

  kafka.security.auth.ResourceType scalaResType =
      ResourceType$.MODULE$.fromJava(resource.resourceType());

  final kafka.security.auth.Resource scalaResource =
      new kafka.security.auth.Resource(scalaResType, resource.name());

  authorizer.addAcls(scalaAcls, scalaResource);

  addedAcls.add(scalaResource);
}
 
Example 28
@Test
public void testIdentity() {
    List<Integer> maxEndPartitionIndexes = ImmutableList.of(0, 1, 2);
    RangePartitionCoalescer coalescer = new RangePartitionCoalescer(maxEndPartitionIndexes);
    PartitionGroup[] groups = coalescer.coalesce(rdd.getNumPartitions(), rdd.rdd());
    assertEquals(groups.length, 3);
    assertEquals(groups[0].partitions(), JavaConversions.asScalaBuffer(ImmutableList.of(partitions[0])));
    assertEquals(groups[1].partitions(), JavaConversions.asScalaBuffer(ImmutableList.of(partitions[1])));
    assertEquals(groups[2].partitions(), JavaConversions.asScalaBuffer(ImmutableList.of(partitions[2])));
}
 
Example 29
Source Project: tinkerpop   Source File: WrappedArraySerializer.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void write(final Kryo kryo, final Output output, final WrappedArray<T> iterable) {
    output.writeVarInt(iterable.size(), true);
    JavaConversions.asJavaCollection(iterable).forEach(t -> {
        kryo.writeClassAndObject(output, t);
    });
}
 
Example 30
Source Project: systemds   Source File: MLContextTest.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testInputTupleSeqNoMetadataDML() {
	System.out.println("MLContextTest - Tuple sequence no metadata DML");

	List<String> list1 = new ArrayList<>();
	list1.add("1,2");
	list1.add("3,4");
	JavaRDD<String> javaRDD1 = sc.parallelize(list1);
	RDD<String> rdd1 = JavaRDD.toRDD(javaRDD1);

	List<String> list2 = new ArrayList<>();
	list2.add("5,6");
	list2.add("7,8");
	JavaRDD<String> javaRDD2 = sc.parallelize(list2);
	RDD<String> rdd2 = JavaRDD.toRDD(javaRDD2);

	Tuple2 tuple1 = new Tuple2("m1", rdd1);
	Tuple2 tuple2 = new Tuple2("m2", rdd2);
	List tupleList = new ArrayList();
	tupleList.add(tuple1);
	tupleList.add(tuple2);
	Seq seq = JavaConversions.asScalaBuffer(tupleList).toSeq();

	Script script = dml("print('sums: ' + sum(m1) + ' ' + sum(m2));").in(seq);
	setExpectedStdOut("sums: 10.0 26.0");
	ml.execute(script);
}