Java Code Examples for java.util.LinkedList#push()

The following examples show how to use java.util.LinkedList#push() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StackPopOrder.java    From coding-interviews with MIT License 6 votes vote down vote up
public boolean IsPopOrder(int [] pushA,int [] popA) {
    if (pushA == null || popA == null || pushA.length == 0 || popA.length == 0) {
        return false;
    }
    // 辅助栈
    LinkedList<Integer> stackAux = new LinkedList<>();
    int popIndex = 0;
    for (int i = 0;i < pushA.length;i++) {
        // 按照入栈序列依次压入辅助栈中
        stackAux.push(pushA[i]);
        // 每入栈一次和出栈序列比较,如果栈顶和当前出栈元素相同,则弹出同时当前弹出元素指针前移;
        // 如果下一个栈顶元素还和当前弹出元素相同,继续弹出
        while (!stackAux.isEmpty() && stackAux.peek() == popA[popIndex]) {
            stackAux.pop();
            popIndex++;
        }
    }
    // 如果出栈顺序正确,模拟一次进出栈后,辅助栈应该为空。不为空说明序列不正确
    return stackAux.isEmpty();
}
 
Example 2
Source File: DataDstPb.java    From xresloader with MIT License 6 votes vote down vote up
private void filterMissingFields(LinkedList<String> missingFields, HashMap<String, String> oneofField,
        Descriptors.FieldDescriptor fd, boolean isMissing) throws ConvException {
    if (missingFields == null && oneofField == null) {
        return;
    }

    Descriptors.OneofDescriptor oneof = fd.getContainingOneof();
    if (isMissing && oneof == null && missingFields != null) {
        missingFields.push(fd.getName());
    }

    if (!isMissing && oneof != null && oneofField.containsKey(oneof.getFullName())) {
        String old_field = oneofField.get(oneof.getFullName());
        if (old_field != null) {
            setLastErrorMessage(
                    "field \"%s\" in oneof descriptor \"%s\" already exists, can not add another field \"%s\" with the same oneof descriptor",
                    old_field, oneof.getFullName(), fd.getName());
            throw new ConvException(getLastErrorMessage());
        }
        oneofField.replace(oneof.getFullName(), fd.getName());
    }
}
 
Example 3
Source File: BinaryTreeTraversal.java    From algorithm-primer with MIT License 6 votes vote down vote up
/**
 * 非递归方式后续遍历二叉树
 * 数据结构:栈
 * 思路:
 *      要保证根结点在左孩子和右孩子之后才能访问,因此对于任一结点node,先将其入栈。
 *      如果node不存在左孩子和右孩子,则可以直接访问它;
 *      或者node存在左孩子或者右孩子,但是其左孩子和右孩子都已经被访问过,则同样可以直接访问该结点。
 *      若非上述两种情况,则将node的右孩子和左孩子依次入栈,这样就保证了每次取栈顶元素时,左孩子在右孩子前面被访问,
 *      左孩子和右孩子都在根结点前面被访问。
 * @param root 二叉树的根结点
 */
public void iterativePostorder(Node root){
    if (root == null) return;
    Node currNode = null;
    Node pre = null;

    LinkedList<Node> stack = new LinkedList<>();
    stack.push(root);
    while (!stack.isEmpty()){
        currNode = stack.peek();
        if ((currNode.left == null && currNode.right == null) ||
                (pre != null && (pre == currNode.left || pre== currNode.right))){
            visit(currNode);
            stack.pop();
            pre = currNode;
        }
        else {
            if (currNode.right != null) stack.push(currNode.right);
            if (currNode.left != null) stack.push(currNode.left);
        }
    }
}
 
Example 4
Source File: FulltextSearchQueryGeneratorImpl.java    From openemm with GNU Affero General Public License v3.0 5 votes vote down vote up
private String applyDatabaseSpecificOperators(LinkedList<String> operandStack, LinkedList<Operator> operatorStack) {
    List<String> operands = new ArrayList<>();
    while (operatorStack.size() != 0) {
        Operator operator = operatorStack.pop();
        while (operands.size() != operator.getOperandsCount()) {
            operands.add(operandStack.pop());
        }
        Collections.reverse(operands);
        operandStack.push(operator.process(operands));
        operands.clear();
    }
    return applyWordProcessors(operandStack.stream().collect(Collectors.joining()));
}
 
Example 5
Source File: FileUtil.java    From PowerFileExplorer with GNU General Public License v3.0 5 votes vote down vote up
public static Collection<File> getFiles(File f, boolean includeFolder) {
	Log.d("getFiles f", f.getAbsolutePath());
	final List<File> fList = new LinkedList<File>();
	if (f != null) {
		final LinkedList<File> folderQueue = new LinkedList<File>();
		if (f.isDirectory()) {
			if (includeFolder) {
				fList.add(f);
			}
			folderQueue.push(f);
		} else {
			fList.add(f);
		}
		File fi = null;
		File[] fs;
		while (folderQueue.size() > 0) {
			fi = folderQueue.removeFirst();
			fs = fi.listFiles();
			if (fs != null) {
				for (File f2 : fs) {
					if (f2.isDirectory()) {
						folderQueue.push(f2);
						if (includeFolder) {
							fList.add(f2);
						}
					} else {
						fList.add(f2);
					}
				}
			}
		}
	}
	return fList;
}
 
Example 6
Source File: Solution1.java    From code with Apache License 2.0 5 votes vote down vote up
/**
 * 题目地址:https://leetcode-cn.com/problems/validate-stack-sequences/
 * -------------------------------------------------------------------
 * 思考:
 * -------------------------------------------------------------------
 * 思路:
 * -------------------------------------------------------------------
 * 时间复杂度:
 * 空间复杂度:
 */
public boolean validateStackSequences(int[] pushed, int[] popped) {
    LinkedList<Integer> stack = new LinkedList<>();

    int index = 0;
    for (int item : pushed) {
        stack.push(item);
        while (index < popped.length && !stack.isEmpty() && stack.peek() == popped[index]) {
            stack.pop();
            index++;
        }
    }

    return index == popped.length;
}
 
Example 7
Source File: Solution1.java    From code with Apache License 2.0 5 votes vote down vote up
/**
 * 题目地址:https://leetcode-cn.com/problems/binary-tree-zigzag-level-order-traversal/
 * -------------------------------------------------------------------
 * 思考:
 * -------------------------------------------------------------------
 * 思路:BFS
 * -------------------------------------------------------------------
 * 时间复杂度:
 * 空间复杂度:
 */
public List<List<Integer>> zigzagLevelOrder(TreeNode root) {
    List<List<Integer>> result = new ArrayList<>();
    if (root == null)
        return result;

    Queue<TreeNode> queue = new LinkedList<>();
    queue.add(root);
    while (!queue.isEmpty()) {
        // 遍历每层的数据
        int size = queue.size();
        LinkedList<Integer> list = new LinkedList<>();
        for (int i = 0; i < size; i++) {
            root = queue.poll();
            // result.size()=0,1,2,3代表层次
            // 根据层次来判断顺序;奇数倒序,偶数正序
            if ((result.size() & 1) == 1) {
                list.push(root.val);
            } else {
                list.add(root.val);
            }
            if (root.left != null) {
                queue.add(root.left);
            }
            if (root.right != null) {
                queue.add(root.right);
            }
        }
        result.add(list);
    }

    return result;
}
 
Example 8
Source File: Leetcode125.java    From awesome-algorithm with MIT License 5 votes vote down vote up
public boolean isPalindrome(String s) {
LinkedList<Character> stack1 = new LinkedList<Character>();
LinkedList<Character> stack2 = new LinkedList<Character>();

char[] chars = s.toCharArray();

for (int i = 0; i < chars.length; i++) {
	if (isAlph(chars[i]) || isNum(chars[i])) {
		stack1.push(chars[i]);
	}
}

for (int i = chars.length - 1; i >= 0; i--) {
	if (isAlph(chars[i]) || isNum(chars[i])) {
		stack2.push(chars[i]);
	}
}

while (!stack1.isEmpty() && !stack2.isEmpty()) {
	char c1 = stack1.pop();
	char c2 = stack2.pop();
	
	if (c1 != c2 ) {
		if (isAlph(c1) && isAlph(c2)) {
			if (Math.abs(c1 - c2) != 32) {
				return false;
			}
		} else {					
			return false;
		}
	}
}

return true;
  }
 
Example 9
Source File: TreeFlatteningRebuilder.java    From datawave with Apache License 2.0 5 votes vote down vote up
private List<JexlNode> getAndOrLeaves(JexlNode node) {
    LinkedList<JexlNode> children = new LinkedList<>();
    LinkedList<JexlNode> stack = new LinkedList<>();
    stack.push(node);
    
    while (!stack.isEmpty()) {
        JexlNode currNode = stack.pop();
        
        // only add children if
        // 1) this is the original node, or
        // 2) this node is the same type as the root node and
        // -- a) this is an OR node, or
        // -- b) this is an AND node (but not a bounded range)
        // @formatter:off
        if (currNode == node ||
            (node.getClass().isInstance(currNode) &&
                    (currNode instanceof ASTOrNode ||
                    (currNode instanceof ASTAndNode && !isBoundedRange((ASTAndNode) currNode))))) {
            // @formatter:on
            for (JexlNode child : children(currNode)) {
                stack.push(child);
            }
        } else {
            children.push(currNode);
        }
    }
    
    return children;
}
 
Example 10
Source File: FileUtil.java    From PowerFileExplorer with GNU General Public License v3.0 5 votes vote down vote up
public static StringBuilder listFilesByName(File f, String lineSep) {
	Log.d("listFileByName f", "" + f);
	StringBuilder sb = new  StringBuilder();
	if (f != null) {
		final LinkedList<File> stk = new LinkedList<File>();
		if (f.isDirectory()) {
			stk.push(f);
		} else {
			sb.append(f.getAbsolutePath()).append(": ").append(f.length()).append(" bytes.").append(lineSep);
		}
		File fi = null;
		File[] fs;
		while (stk.size() > 0) {
			fi = stk.pop();
			fs = fi.listFiles();
			if (fs != null)
				for (File f2 : fs) {
					if (f2.isDirectory()) {
						stk.push(f2);
					} else {
						sb.append(f2.getAbsolutePath()).append(": ").append(f2.length()).append(" bytes.").append(lineSep);
					}
				}
		}

	}
	return sb;
}
 
Example 11
Source File: ReflectionUtils.java    From aggregate-persistence with Apache License 2.0 5 votes vote down vote up
private static void addInterfaces(final Class<?> classToCheck, final LinkedList<Class> stack)
{
    for (Class interFace : classToCheck.getInterfaces())
    {
        stack.push(interFace);
    }
}
 
Example 12
Source File: VRPSearchState.java    From pathfinder-routing with MIT License 5 votes vote down vote up
public Optional<VRPSearchState> swapBetweenRoutes() {
    List<Transport> candidates =
        routes.keySet().stream().filter(t -> routes.get(t).size() > 1).collect(toList());
    Transport firstTransport = candidates.get(random.nextInt(candidates.size()));
    List<CommodityAction> firstRoute = routes.get(firstTransport);
    List<CommodityAction> potentialMovers = firstRoute.stream()
        .filter(ca -> ca instanceof CommodityDropoff && ((CommodityDropoff) ca).start instanceof CommodityPickup)
        .collect(toList());
    if (potentialMovers.isEmpty()) {
        return Optional.empty();
    }
    CommodityDropoff mover =
        (CommodityDropoff) potentialMovers.get(random.nextInt(potentialMovers.size()));
    List<Transport> secondTransportCandidates = routes.keySet().stream()
        .filter(t -> !t.equals(firstTransport) && canFitInFront(t, mover))
        .collect(toList());
    if (secondTransportCandidates.isEmpty()) {
        return Optional.empty();
    }
    Transport secondTransport =
        secondTransportCandidates.get(random.nextInt(secondTransportCandidates.size()));
    List<CommodityAction> newFirstRoute = new ArrayList<>(firstRoute);
    newFirstRoute.remove(mover);
    newFirstRoute.remove(mover.start);
    LinkedList<CommodityAction> newSecondRoute = new LinkedList<>(routes.get(secondTransport));
    newSecondRoute.push(mover);
    newSecondRoute.push((CommodityPickup) mover.start);
    Map<Transport, List<CommodityAction>> newRoutes = new HashMap<>(routes);
    newRoutes.put(firstTransport, newFirstRoute);
    newRoutes.put(secondTransport, newSecondRoute);
    return Optional.of(new VRPSearchState(problem, newRoutes));
}
 
Example 13
Source File: BTreeDemo.java    From algorithm-tutorial with Creative Commons Attribution Share Alike 4.0 International 5 votes vote down vote up
public static void depthOrderTraverse(TreeNode root) {
    if (root == null) {
        return;
    }
    LinkedList<TreeNode> stack = new LinkedList<>();
    stack.push(root);
    while (!stack.isEmpty()) {
        TreeNode node = stack.pop();
        System.out.print(node.val + "  ");
        if (node.left != null) stack.push(node.left);
        if (node.right != null) stack.push(node.right);
    }
}
 
Example 14
Source File: CuboidCLI.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public static int[] calculateAllLevelCount(CubeDesc cube) {
    int levels = cube.getInitialCuboidScheduler().getBuildLevel();
    int[] allLevelCounts = new int[levels + 1];

    CuboidScheduler scheduler = cube.getInitialCuboidScheduler();
    LinkedList<Long> nextQueue = new LinkedList<Long>();
    LinkedList<Long> currentQueue = new LinkedList<Long>();
    long baseCuboid = Cuboid.getBaseCuboidId(cube);
    currentQueue.push(baseCuboid);

    for (int i = 0; i <= levels; i++) {
        allLevelCounts[i] = currentQueue.size();
        while (!currentQueue.isEmpty()) {
            long cuboid = currentQueue.pop();
            Collection<Long> spnanningCuboids = scheduler.getSpanningCuboid(cuboid);

            nextQueue.addAll(spnanningCuboids);
        }
        currentQueue = nextQueue;
        nextQueue = new LinkedList<Long>();

        if (i == levels) {
            if (!currentQueue.isEmpty()) {
                throw new IllegalStateException();
            }
        }
    }

    return allLevelCounts;
}
 
Example 15
Source File: BreadthFirstPaths.java    From algorithm-primer with MIT License 5 votes vote down vote up
/**
 * 返回 source 到 vertex 的最短路径
 * @param vertex 顶点
 * @return source 到 vertex的最短路径
 */
public Iterable<Integer> pathTo(int vertex){
    if (!hasPathTo(vertex)) return null;
    LinkedList<Integer> stackPath = new LinkedList<Integer>();
    for (int parent = vertex; parent != source; parent = edgeTo[parent])
        stackPath.push(parent);  // 入栈,出栈是 pop()
    stackPath.push(source);
    return stackPath;
}
 
Example 16
Source File: MessageInterpolator.java    From super-csv-annotation with Apache License 2.0 5 votes vote down vote up
/**
 * エスケープ文字を除去した文字列を取得する。
 * @param str
 * @param escapeChar
 * @return
 */
private String removeEscapeChar(final String str, final char escapeChar) {
    
    if(str == null || str.isEmpty()) {
        return str;
    }
    
    final String escapeStr = String.valueOf(escapeChar);
    StringBuilder sb = new StringBuilder();
    
    final LinkedList<String> stack = new LinkedList<>();
    
    final int length = str.length();
    for(int i=0; i < length; i++) {
        final char c = str.charAt(i);
        
        if(StackUtils.equalsTopElement(stack, escapeStr)) {
            // スタックの一番上がエスケープ文字の場合
            StackUtils.popup(stack);
            sb.append(c);
            
        } else if(c == escapeChar) {
            // スタックに積む
            stack.push(String.valueOf(c));
            
        } else {
            sb.append(c);
        }
        
    }
    
    if(!stack.isEmpty()) {
        sb.append(StackUtils.popupAndConcat(stack));
    }
    
    return sb.toString();
    
}
 
Example 17
Source File: SchemaGraph.java    From NLIDB with Apache License 2.0 5 votes vote down vote up
/**
 * Return a list of String as join path in the form of:
 * <br> table1 table3 table2
 * <br> Shortest join path is found using BFS.
 * <br> The join keys can be found using {@link #getJoinKeys(String, String)} 
 * @param table1
 * @param table2
 * @return
 */
public List<String> getJoinPath(String table1, String table2) {
	if (!tables.containsKey(table1) || !tables.containsKey(table2)) {
		return new ArrayList<String>();
	}
	// Assume table1 and table2 are different.
	// Find shortest path using BFS.
	HashMap<String, Boolean> visited = new HashMap<>();
	for (String tableName : tables.keySet()) {
		visited.put(tableName, false);
	}
	HashMap<String, String> prev = new HashMap<>(); // the parent tableName
	LinkedList<String> queue = new LinkedList<>();
	queue.addLast(table1);
	visited.put(table1, true);
	boolean found = false;
	while (!queue.isEmpty() && !found) {
		String tableCurr = queue.removeFirst();
		for (String tableNext : connectivity.get(tableCurr)) {
			if (!visited.get(tableNext)) {
				visited.put(tableNext, true);
				queue.addLast(tableNext);
				prev.put(tableNext, tableCurr);
			}
			if (tableNext.equals(table2)) { found = true; }
		}
	}

	LinkedList<String> path = new LinkedList<>();
	if (visited.get(table2)) {
		String tableEnd = table2; 
		path.push(tableEnd);
		while (prev.containsKey(tableEnd)) {
			tableEnd = prev.get(tableEnd);
			path.push(tableEnd);
		}
	}
	return path;
}
 
Example 18
Source File: ExpParser.java    From jaamsim with Apache License 2.0 4 votes vote down vote up
public static Assignment parseAssignment(ParseContext context, String input) throws ExpError {

		ArrayList<ExpTokenizer.Token> ts;
		ts = ExpTokenizer.tokenize(input);

		TokenList tokens = new TokenList(ts);

		Assignment ret = new Assignment(input);
		ExpNode lhsNode = parseExp(context, tokens, 0, ret);

		tokens.expect(ExpTokenizer.SYM_TYPE, "=", input);

		ExpNode rhsNode = parseExp(context, tokens, 0, ret);

		// Make sure we've parsed all the tokens
		ExpTokenizer.Token peeked = tokens.peek();
		if (peeked != null) {
			throw new ExpError(input, peeked.pos, "Unexpected additional values");
		}

		rhsNode = optimizeAndValidateExpression(input, rhsNode, ret);
		ret.valueExp = rhsNode;

		// Parsing is done, now we need to unwind the lhs expression to get the necessary components

		// Note we use a linked list here, as we will be adding nodes in reverse order
		LinkedList<ExpNode> indexExps = new LinkedList<>();

		// Check for an optional index at the end
		while(lhsNode instanceof IndexCollection) {
			// the lhs ended with an index, split that off
			IndexCollection ind = (IndexCollection)lhsNode;
			if (ind.indices.size() != 1)
				throw new ExpError(input, lhsNode.tokenPos, "Assignment to collections can only take a single index");

			ExpNode indexExp = ind.indices.get(0);
			indexExp = optimizeAndValidateExpression(input, indexExp, ret);

			indexExps.push(indexExp);
			lhsNode = ind.collection;

		}
		if (indexExps.size() > 0) {

			ret.attribIndices = indexExps.toArray(new ExpNode[indexExps.size()]);
		} else {
			ret.attribIndices = null;
		}

		// Now make sure the last node of the lhs ends with a output resolve
		if (!(lhsNode instanceof ResolveOutput)) {
			throw new ExpError(input, lhsNode.tokenPos, "Assignment left side must end with an output, followed by optional indices");
		}

		ResolveOutput lhsResolve = (ResolveOutput)lhsNode;
		ExpNode entNode = lhsResolve.entNode;

		entNode = optimizeAndValidateExpression(input, entNode, ret);
		ret.entExp = entNode;

		if (ret.entExp instanceof Constant) {
			ExpResult ent = ret.entExp.evaluate(null);
			ret.assigner = context.getConstAssigner(ent, lhsResolve.outputName);
		} else {
			ret.assigner = context.getAssigner(lhsResolve.outputName);
		}

		return ret;

	}
 
Example 19
Source File: ModelSetup.java    From iBioSim with Apache License 2.0 4 votes vote down vote up
/**
 * Sets up the models for simulation.
 *
 * @param sim
 *          - the hierarchical simulator.
 * @param type
 *          - the simulation type.
 * @param wrapper
 *          - the vector wrapper for the state vector.
 * @throws XMLStreamException
 *           - if there is any issue with the SBML.
 * @throws IOException
 *           - if there is any issue with the input file.
 * @throws BioSimException
 *           - if there is an issue with a model.
 */
public static void setupModels(HierarchicalSimulation sim, ModelType type, VectorWrapper wrapper) throws XMLStreamException, IOException, BioSimException {

  Map<String, SBMLDocument> sourceMap = new HashMap<>();
  List<ModelContainer> listOfContainers = new ArrayList<>();
  AnalysisProperties properties = sim.getProperties();
  SBMLDocument document = SBMLReader.read(new File(properties.getFilename()));
  Model model = document.getModel();
  HierarchicalModel hierarchicalModel = new HierarchicalModel("topmodel", 0);
  sim.setTopmodel(hierarchicalModel);

  LinkedList<ModelContainer> unproc = new LinkedList<>();

  unproc.push(new ModelContainer(model, hierarchicalModel, null, type));
  int count = 0;

  while (!unproc.isEmpty()) {
    ModelContainer container = unproc.pop();
    listOfContainers.add(container);
    sim.addHierarchicalModel(container.getHierarchicalModel());

    if (container.getCompModel() != null) {
      for (Submodel submodel : container.getCompModel().getListOfSubmodels()) {
        model = null;
        CompSBMLDocumentPlugin compDoc = container.getCompDoc();
        if (compDoc != null) {
          if (compDoc.getListOfExternalModelDefinitions() != null && compDoc.getListOfExternalModelDefinitions().get(submodel.getModelRef()) != null) {
            ExternalModelDefinition ext = compDoc.getListOfExternalModelDefinitions().get(submodel.getModelRef());

            String source = ext.getSource();
            SBMLDocument extDoc = getDocument(source, properties, sourceMap);
            model = extDoc.getModel();
            compDoc = (CompSBMLDocumentPlugin) extDoc.getPlugin(CompConstants.namespaceURI);

            while (ext.isSetModelRef()) {
              if (compDoc.getExternalModelDefinition(ext.getModelRef()) != null) {
                ext = compDoc.getListOfExternalModelDefinitions().get(ext.getModelRef());
                source = ext.getSource().replace("file:", "");
                extDoc = getDocument(source, properties, sourceMap);
                model = extDoc.getModel();
                compDoc = (CompSBMLDocumentPlugin) extDoc.getPlugin(CompConstants.namespaceURI);
              } else if (compDoc.getModelDefinition(ext.getModelRef()) != null) {
                model = compDoc.getModelDefinition(ext.getModelRef());
                break;
              } else {
                break;
              }
            }
          } else if (compDoc.getListOfModelDefinitions() != null && compDoc.getListOfModelDefinitions().get(submodel.getModelRef()) != null) {
            model = compDoc.getModelDefinition(submodel.getModelRef());
          }

          if (model != null) {
            hierarchicalModel = new HierarchicalModel(submodel.getId(), ++count);
            unproc.push(new ModelContainer(model, hierarchicalModel, container, type));
          }
        }
      }
    }
  }
  MathInterpreter mathInterpreter = new MathInterpreter();
  CoreSetup.initializeCore(sim, listOfContainers, sim.getCurrentTime(), wrapper, mathInterpreter);
  ReplacementSetup.initializeComp(listOfContainers);
  ArraysSetup.initializeArrays(listOfContainers, sim.getAtomicType(), wrapper);
  setupOutputVariables(sim, listOfContainers);

  sim.computeRateOfChange(mathInterpreter.hasRateOf());

  if (sim instanceof HierarchicalMixedSimulator) {
    initializeHybridSimulation((HierarchicalMixedSimulator) sim, listOfContainers);
  }
}
 
Example 20
Source File: ScribeMasterRestartDistributedTest.java    From Scribengin with GNU Affero General Public License v3.0 4 votes vote down vote up
@Test
public void ScribeMasterRestartDevTest() throws InterruptedException{
  
  ScribeConsumerConfig c = new ScribeConsumerConfig();
  c.hdfsPath = helper.getHadoopConnection();
  c.topic = helper.getTopic();
  c.cleanStart = true;
  List<HostPort> bList = new LinkedList<HostPort>();
  bList.add(new HostPort("127.0.0.1","9092"));
  c.brokerList = bList;
  
  LinkedList<String> topics = new LinkedList<String>();
  topics.push(helper.getTopic());
  
  sm = new ScribeMaster(topics, c);
  sm.setScribeConsumerManager(new ClusterScribeConsumerManager());
  sm.start();
  
  sm.checkOnConsumersThreaded(1000);
  
  
  Thread.sleep(2000);
  
  LOG.info("Creating kafka data");
  //Create kafka data
  helper.createKafkaData(0);
    
  //Wait for consumption
  Thread.sleep(5000);
  //Ensure messages 0-99 were consumed
  LOG.info("Asserting data is correct");
  helper.assertHDFSmatchesKafka(0,helper.getHadoopConnection());
  
  LOG.info("Killing consumers!");
  sm.killConsumersForceRestart();
  
  
  LOG.info("Creating kafka data");
  //Create kafka data
  helper.createKafkaData(100);
    
  //Wait for consumption
  Thread.sleep(5000);
  //Ensure messages 100-199 were consumed
  LOG.info("Asserting data is correct");
  helper.assertHDFSmatchesKafka(100,helper.getHadoopConnection());
  
  
}