Java Code Examples for org.apache.commons.csv.CSVRecord#size()
The following examples show how to use
org.apache.commons.csv.CSVRecord#size() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AwReportCsvReader.java From aw-reporting with Apache License 2.0 | 6 votes |
/** * Returns the next CSV line in the file. If the line starts with the AW last line total String, * then returns null. * * @return the next line in the CSV, or {@code null} in case the file has ended, or the total line * was reached */ public String[] readNext() { String[] next = null; if (records.hasNext()) { CSVRecord record = records.next(); List<String> values = Lists.newArrayListWithCapacity(record.size()); for (int i = 0; i < record.size(); i++) { values.add(record.get(i)); } if (!hasSummary || !AW_REPORT_CSV_TOTAL.equalsIgnoreCase(Iterables.getFirst(values, null))) { next = values.toArray(new String[0]); } } return next; }
Example 2
Source File: HashmodCSVImportFileParser.java From constellation with Apache License 2.0 | 6 votes |
public List<String[]> parse(final HashmodInputSource input, final PluginParameters parameters) throws IOException { final ArrayList<String[]> results = new ArrayList<>(); try (final CSVParser csvFileParser = CSVFormat.RFC4180.parse(new InputStreamReader(input.getInputStream(), StandardCharsets.UTF_8.name()))) { final List<CSVRecord> records = csvFileParser.getRecords(); for (final CSVRecord record : records) { final String[] line = new String[record.size()]; for (int i = 0; i < record.size(); i++) { line[i] = record.get(i); } results.add(line); } } return results; }
Example 3
Source File: HashmodCSVImportFileParser.java From constellation with Apache License 2.0 | 6 votes |
public List<String[]> preview(final HashmodInputSource input, final PluginParameters parameters, final int limit) throws IOException { // Leave the header on, as the importer expects this as the first entry. final ArrayList<String[]> results = new ArrayList<>(); try (final CSVParser csvFileParser = CSVFormat.RFC4180.parse(new InputStreamReader(input.getInputStream(), StandardCharsets.UTF_8.name()))) { int count = 0; final List<CSVRecord> records = csvFileParser.getRecords(); for (final CSVRecord record : records) { final String[] line = new String[record.size()]; for (int i = 0; i < record.size(); i++) { line[i] = record.get(i); } results.add(line); count++; if (count >= limit) { return results; } } } return results; }
Example 4
Source File: HiscoreActivity.java From rs-api with ISC License | 6 votes |
/** * Creates a {@link HiscoreActivity} from a {@link CSVRecord}. * @param record The record. * @return The {@link HiscoreActivity} or {@link Optional#empty()} if the record was invalid. */ public static Optional<HiscoreActivity> fromCsv(CSVRecord record) { if (record.size() < 2) { return Optional.empty(); } Integer rank = Ints.tryParse(record.get(0)); if (rank == null) { return Optional.empty(); } Integer score = Ints.tryParse(record.get(1)); if (score == null) { return Optional.empty(); } return Optional.of(new HiscoreActivity(rank, score)); }
Example 5
Source File: ClanMate.java From rs-api with ISC License | 6 votes |
/** * Creates a {@link ClanMate} from a {@link CSVRecord}. * @param record The record. * @return The {@link ClanMate} or {@link Optional#empty()} if the record was invalid. */ public static Optional<ClanMate> fromCsv(CSVRecord record) { if (record.size() < 4) { return Optional.empty(); } String name = record.get(0); String rank = record.get(1); Long experience = Longs.tryParse(record.get(2)); if (experience == null) { return Optional.empty(); } Integer kills = Ints.tryParse(record.get(3)); if (kills == null) { return Optional.empty(); } return Optional.of(new ClanMate(name, rank, experience, kills)); }
Example 6
Source File: UserAgentFileParser.java From browscap-java with MIT License | 6 votes |
private Rule getRule(final CSVRecord record) throws ParseException { if (record.size() <= 47) { return null; } // Normalize: lowercase and remove duplicate wildcards final String pattern = normalizePattern(record.get(0)); try { final Map<BrowsCapField, String> values = getBrowsCapFields(record); final Capabilities capabilities = getCapabilities(values); final Rule rule = createRule(pattern, capabilities); // Check reconstructing the pattern if (!pattern.equals(rule.getPattern())) { throw new ParseException("Unable to parse " + pattern); } return rule; } catch (final IllegalStateException e) { throw new ParseException("Unable to parse " + pattern); } }
Example 7
Source File: Skill.java From rs-api with ISC License | 6 votes |
/** * Creates a {@link Skill} from a {@link CSVRecord}. * @param record The record. * @return The {@link Skill} or {@link Optional#empty()} if the record was invalid. */ public static Optional<Skill> fromCsv(CSVRecord record) { if (record.size() < 3) { return Optional.empty(); } Integer rank = Ints.tryParse(record.get(0)); if (rank == null) { return Optional.empty(); } Integer level = Ints.tryParse(record.get(1)); if (level == null) { return Optional.empty(); } Long experience = Longs.tryParse(record.get(2)); if (experience == null) { return Optional.empty(); } return Optional.of(new Skill(rank, level, experience)); }
Example 8
Source File: CsvParser.java From datacollector with Apache License 2.0 | 5 votes |
private String[] toArray(CSVRecord record) { String[] array = (record == null) ? null : new String[record.size()]; if (array != null) { for (int i = 0; i < record.size(); i++) { array[i] = record.get(i); } } return array; }
Example 9
Source File: CsvRecord.java From OpenEstate-IO with Apache License 2.0 | 5 votes |
/** * Loads data from {@link CsvParser} into the record. * * @param record the CSV record from * <a href="http://commons.apache.org/proper/commons-csv/">commons-csv</a> */ protected void parse(CSVRecord record) { this.values.clear(); for (int i = 0; i < record.size(); i++) { this.values.put(i, this.parse(record.get(i))); } }
Example 10
Source File: TestStep.java From Cognizant-Intelligent-Test-Scripter with Apache License 2.0 | 5 votes |
private void loadStep(CSVRecord record) { for (int i = 0; i < record.size(); i++) { stepDetails.add(record.get(i)); } while (stepDetails.size() != HEADERS.values().length) { stepDetails.add(""); } }
Example 11
Source File: FrameworkUtils.java From data-polygamy with BSD 3-Clause "New" or "Revised" License | 5 votes |
public static String[] splitStr(String val) throws IOException { CSVParser parser = new CSVParser(new StringReader(val), CSVFormat.DEFAULT); CSVRecord record = parser.getRecords().get(0); Iterator<String> valuesIt = record.iterator(); String[] input = new String[record.size()]; int i = 0; while (valuesIt.hasNext()) { input[i] = valuesIt.next(); i++; } parser.close(); return input; }
Example 12
Source File: CsvUpsertExecutor.java From phoenix with Apache License 2.0 | 5 votes |
@Override protected void execute(CSVRecord csvRecord) { try { if (csvRecord.size() < conversionFunctions.size()) { String message = String.format("CSV record does not have enough values (has %d, but needs %d)", csvRecord.size(), conversionFunctions.size()); throw new IllegalArgumentException(message); } for (int fieldIndex = 0; fieldIndex < conversionFunctions.size(); fieldIndex++) { Object sqlValue = conversionFunctions.get(fieldIndex).apply(csvRecord.get(fieldIndex)); if (sqlValue != null) { preparedStatement.setObject(fieldIndex + 1, sqlValue); } else { preparedStatement.setNull(fieldIndex + 1, dataTypes.get(fieldIndex).getSqlType()); } } preparedStatement.execute(); upsertListener.upsertDone(++upsertCount); } catch (Exception e) { if (LOGGER.isDebugEnabled()) { // Even though this is an error we only log it with debug logging because we're notifying the // listener, and it can do its own logging if needed LOGGER.debug("Error on CSVRecord " + csvRecord, e); } upsertListener.errorOnRecord(csvRecord, e); } }
Example 13
Source File: SimpleFileIOAvroRegistry.java From components with Apache License 2.0 | 5 votes |
/** * Infers an Avro schema for the given String array. This can be an expensive operation so the schema should be * cached where possible. This is always an {@link Schema.Type#RECORD}. * * @param in the DescribeSObjectResult to analyse. * @return the schema for data given from the object. */ private Schema inferCsvRecord(CSVRecord in) { List<Schema.Field> fields = new ArrayList<>(); SchemaBuilder.FieldAssembler<Schema> fa = SchemaBuilder.record(RECORD_NAME).fields(); for (int i = 0; i < in.size(); i++) { fa = fa.name(FIELD_PREFIX + i).type(Schema.create(Schema.Type.STRING)).noDefault(); } return fa.endRecord(); }
Example 14
Source File: MyCSVUtils.java From spring-boot with Apache License 2.0 | 5 votes |
/** * 获取行数据的所有单元格值 * * @param csvRecord 行数据 * @return 数组顺序就是单元格顺序 */ public static String[] getCSVArrayValues(CSVRecord csvRecord) { int size = csvRecord.size(); String[] values = new String[size]; int i = 0; while (i < size) { values[i] = csvRecord.get(i); i++; } return values; }
Example 15
Source File: DataTable.java From bookish with MIT License | 4 votes |
public void parseCSV(String csv) { try { Reader in = new StringReader(csv); CSVFormat format = CSVFormat.EXCEL.withHeader(); CSVParser parser = format.parse(in); this.firstColIsIndex = false; for (CSVRecord record : parser) { if ( !firstColIsIndex && Character.isAlphabetic(record.get(0).charAt(0)) ) { // latch if we see alpha not number firstColIsIndex = true; } List<String> row = new ArrayList<>(); for (int i = 0; i<record.size(); i++) { String v = record.get(i); boolean isInt = false; try { Integer.parseInt(v); isInt = true; } catch (NumberFormatException nfe) { isInt = false; } if ( !isInt && !NumberUtils.isDigits(v) && NumberUtils.isCreatable(v) ) { v = String.format("%.4f",Precision.round(Double.valueOf(v), 4)); } else { v = abbrevString(v, 25); } row.add(v); } rows.add(row); } Set<String> colNames = parser.getHeaderMap().keySet(); if ( !firstColIsIndex ) { colNames.remove(""); // remove index column name } this.colNames.addAll(colNames); } catch (Exception e) { throw new RuntimeException(e); } }
Example 16
Source File: Utils.java From systemsgenetics with GNU General Public License v3.0 | 4 votes |
/** * Reads tab delimited file and returns them as list of list, with [x] = * colummn and [x][y] is value in column. Needed for reading counts * file, as there the rows are the samples, as opposed to expression and * genotype file where the columns are the samples. Needs to be read in * memory, so minimal memory requirement is larger than the size of the * counts file. * * * @param filepath The path to a tab delimited file to read * * @return A 2D array with each array being one column from filepath except first column * and a 1D array with the first column (without header) * * @throws IOException If file at filepath can not be read */ public static Object[] readTabDelimitedColumns(String filepath) throws IOException { List<List<String>> allColumns = new ArrayList<List<String>>(); // parses file on tabs CSVParser parser = new CSVParser(new FileReader(filepath), CSVFormat.newFormat('\t')); Boolean header = true; int rowNumber = 0; int columnIndexHeader = 0; List<String> firstColumn = new ArrayList<String>(); for (CSVRecord row : parser) { rowNumber++; // starts at 1 because 1st element of column is the samplename, unless its the header row int columnStart = 1; if(header){ columnStart = 0; } for (int columnIndex = columnStart; columnIndex < row.size(); columnIndex++) { // header can start from 0 if it is R styled, so check if element 0 has a value // R style is e.g. // colNameA colNameB // rowNameA AAValue AAvalue // rownameB ABValue BAvalue // while csv style has a tab before colNameA if(header){ String columnValue = row.get(columnIndex); if(columnValue.length() == 0){ continue; } allColumns = addSingleValueTo2DArray(allColumns, columnIndexHeader,columnValue); columnIndexHeader++; continue; } else{ // This changes the allColumns list of list in place, e.g. for example loop -> [[]] -> [[1]] -> [[1,2]] -> [[1,2],[3]] -> etc allColumns = addSingleValueTo2DArray(allColumns, columnIndex - 1, row.get(columnIndex)); } } if(!header){ firstColumn.add(row.get(0)); if(row.size()-1 != columnIndexHeader){ DeconvolutionLogger.log.info(String.format("Table %s does not have the same number of columns as there are in the header at row %d",filepath,rowNumber)); DeconvolutionLogger.log.info(String.format("Number of header columns: %d",columnIndexHeader)); DeconvolutionLogger.log.info(String.format("Number of columns at row %d: %d", rowNumber, row.size()-1)); DeconvolutionLogger.log.info(row.toString()); parser.close(); throw new RuntimeException(String.format("Cellcount percentage table does not have the same number of columns as there are celltypes at row %d",rowNumber)); } } if(header){ header = false; } } parser.close(); return new Object[] {firstColumn, allColumns}; }
Example 17
Source File: ExecutionStep.java From Cognizant-Intelligent-Test-Scripter with Apache License 2.0 | 4 votes |
private void loadStep(CSVRecord record) { for (int i = 0; i < record.size(); i++) { exeStepDetails.add(record.get(i)); } }
Example 18
Source File: CSVFormatFactory.java From incubator-batchee with Apache License 2.0 | 4 votes |
static CSVFormat newFormat(final String format, final String delimiter, final String quoteCharacter, final String quoteMode, final String commentMarker, final String escapeCharacter, final String ignoreSurroundingSpaces, final String ignoreEmptyLines, final String recordSeparator, final String nullString, final String headerComments, final String header, final String skipHeaderRecord, final String allowMissingColumnNames, final String readHeaders) { //CHECKSTYLE:ON CSVFormat out = format == null ? CSVFormat.DEFAULT : CSVFormat.valueOf(format); if (delimiter != null) { out = out.withDelimiter(delimiter.charAt(0)); } if (quoteCharacter != null) { out = out.withQuote(quoteCharacter.charAt(0)); } if (quoteMode != null) { out = out.withQuoteMode(QuoteMode.valueOf(quoteMode)); } if (commentMarker != null) { out = out.withCommentMarker(commentMarker.charAt(0)); } if (escapeCharacter != null) { out = out.withEscape(escapeCharacter.charAt(0)); } if (ignoreSurroundingSpaces != null) { out = out.withIgnoreSurroundingSpaces(Boolean.parseBoolean(ignoreSurroundingSpaces)); } if (ignoreEmptyLines != null) { out = out.withIgnoreEmptyLines(Boolean.parseBoolean(ignoreEmptyLines)); } if (recordSeparator != null) { if ("\\n".equals(recordSeparator)) { out = out.withRecordSeparator('\n'); } else if ("\\r\\n".equals(recordSeparator)) { out = out.withRecordSeparator("\r\n"); } else { out = out.withRecordSeparator(recordSeparator); } } if (nullString != null) { out = out.withNullString(nullString); } if (headerComments != null && !headerComments.trim().isEmpty()) { out = out.withHeaderComments(headerComments.split(" *, *")); } if (Boolean.parseBoolean(readHeaders)) { out = out.withHeader(); } if (header != null && !header.trim().isEmpty()) { try { // headers can have CSV header names so parse it there final Iterator<CSVRecord> iterator = out.withHeader(new String[0]).parse(new StringReader(header + '\n' + header)).iterator(); final CSVRecord record = iterator.next(); final List<String> list = new ArrayList<String>(record.size()); for (final String h : record) { list.add(h); } out = out.withHeader(list.toArray(new String[record.size()])); } catch (final IOException e) { // can't occur actually out = out.withHeader(header.split(" *, *")); } } if (skipHeaderRecord != null) { out = out.withSkipHeaderRecord(Boolean.parseBoolean(skipHeaderRecord)); } if (allowMissingColumnNames != null) { out = out.withAllowMissingColumnNames(Boolean.parseBoolean(allowMissingColumnNames)); } return out; }
Example 19
Source File: HiscoreClient.java From runelite with BSD 2-Clause "Simplified" License | 4 votes |
private HiscoreResultBuilder lookupUsername(String username, HttpUrl hiscoreUrl) throws IOException { HttpUrl url = hiscoreUrl.newBuilder() .addQueryParameter("player", username) .build(); log.debug("Built URL {}", url); Request okrequest = new Request.Builder() .url(url) .build(); String responseStr; try (Response okresponse = client.newCall(okrequest).execute()) { if (!okresponse.isSuccessful()) { switch (okresponse.code()) { case 404: return null; default: throw new IOException("Error retrieving data from Jagex Hiscores: " + okresponse); } } responseStr = okresponse.body().string(); } CSVParser parser = CSVParser.parse(responseStr, CSVFormat.DEFAULT); HiscoreResultBuilder hiscoreBuilder = new HiscoreResultBuilder(); hiscoreBuilder.setPlayer(username); int count = 0; for (CSVRecord record : parser.getRecords()) { if (count++ >= HiscoreSkill.values().length) { log.warn("Jagex Hiscore API returned unexpected data"); break; // rest is other things? } // rank, level, experience int rank = Integer.parseInt(record.get(0)); int level = Integer.parseInt(record.get(1)); // items that are not skills do not have an experience parameter long experience = -1; if (record.size() == 3) { experience = Long.parseLong(record.get(2)); } Skill skill = new Skill(rank, level, experience); hiscoreBuilder.setNextSkill(skill); } return hiscoreBuilder; }
Example 20
Source File: CSVRecordReader.java From nifi with Apache License 2.0 | 4 votes |
@Override public Record nextRecord(final boolean coerceTypes, final boolean dropUnknownFields) throws IOException, MalformedRecordException { try { final RecordSchema schema = getSchema(); final List<RecordField> recordFields = getRecordFields(); final int numFieldNames = recordFields.size(); for (final CSVRecord csvRecord : csvParser) { final Map<String, Object> values = new LinkedHashMap<>(recordFields.size() * 2); for (int i = 0; i < csvRecord.size(); i++) { final String rawValue = csvRecord.get(i); final String rawFieldName; final DataType dataType; if (i >= numFieldNames) { if (!dropUnknownFields) { values.put("unknown_field_index_" + i, rawValue); } continue; } else { final RecordField recordField = recordFields.get(i); rawFieldName = recordField.getFieldName(); dataType = recordField.getDataType(); } final Object value; if (coerceTypes) { value = convert(rawValue, dataType, rawFieldName); } else { // The CSV Reader is going to return all fields as Strings, because CSV doesn't have any way to // dictate a field type. As a result, we will use the schema that we have to attempt to convert // the value into the desired type if it's a simple type. value = convertSimpleIfPossible(rawValue, dataType, rawFieldName); } values.put(rawFieldName, value); } return new MapRecord(schema, values, coerceTypes, dropUnknownFields); } } catch (Exception e) { throw new MalformedRecordException("Error while getting next record. Root cause: " + Throwables.getRootCause(e), e); } return null; }