org.supercsv.io.ICsvBeanWriter Java Examples

The following examples show how to use org.supercsv.io.ICsvBeanWriter. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MainController.java    From video-streaming-service with MIT License 6 votes vote down vote up
@RequestMapping("download-csv/{id}")
public void downloadCSV(HttpServletResponse response, @PathVariable("id") Long id) throws IOException {
    final Material material = materialRepository.findOne(id);
    if (material == null)
        throw new IllegalArgumentException("[" + id + "] data is not exist.");

    String videoName = material.getVideoName();
    int lastIndexOf = videoName.lastIndexOf("/");
    videoName = lastIndexOf >= 0 ? videoName.substring(lastIndexOf + 1, videoName.length()) : videoName;
    final String csvFileName = videoName + "_" + (new SimpleDateFormat("yyyy-MM-dd'T'HH:mm").format(material.getCreatedAt())) + ".csv";

    response.setContentType("text/csv");

    String headerKey = "Content-Disposition";
    String headerValue = String.format("attachment; filename=\"%s\"", csvFileName);
    response.setHeader(headerKey, headerValue);

    final ICsvBeanWriter csvWriter = new CsvBeanWriter(response.getWriter(), CsvPreference.STANDARD_PREFERENCE);
    final String[] header = {"timestamp", "key"};
    csvWriter.writeHeader(header);

    for (MaterialData data : material.getMaterialDataList())
        csvWriter.write(data, header);

    csvWriter.close();
}
 
Example #2
Source File: CitationsFileWriter.java    From occurrence with Apache License 2.0 6 votes vote down vote up
/**
 * Creates the dataset citation file using the the search query response.
 *
 * @param datasetUsages          record count per dataset
 * @param citationFileName       output file name
 * @param occDownloadService     occurrence downlaod service
 * @param downloadKey            download key
 */
public static void createCitationFile(Map<UUID, Long> datasetUsages, String citationFileName,
                                      OccurrenceDownloadService occDownloadService, String downloadKey) {
  if (datasetUsages != null && !datasetUsages.isEmpty()) {
    try (ICsvBeanWriter beanWriter = new CsvBeanWriter(new FileWriterWithEncoding(citationFileName, Charsets.UTF_8),
                                                       CsvPreference.TAB_PREFERENCE)) {
      for (Entry<UUID, Long> entry : datasetUsages.entrySet()) {
        if (entry.getKey() != null) {
          beanWriter.write(new Facet.Count(entry.getKey().toString(), entry.getValue()), HEADER, PROCESSORS);
        }
      }
      beanWriter.flush();
      persistUsages(occDownloadService, downloadKey, datasetUsages);
    } catch (IOException e) {
      LOG.error("Error creating citations file", e);
      throw Throwables.propagate(e);
    }
  }
}
 
Example #3
Source File: CSVExporter.java    From robe with GNU Lesser General Public License v3.0 5 votes vote down vote up
@Override
public void exportStream(OutputStream outputStream, Iterator<T> iterator) throws IOException, ClassNotFoundException, IllegalAccessException {
    if (iterator == null)
        throw new NullPointerException("List can not be null or empty.");

    Writer writer = new OutputStreamWriter(outputStream, "UTF-8");

    ICsvBeanWriter beanWriter = new CsvBeanWriter(writer, preference);

    while (iterator.hasNext()) {
        T entry = iterator.next();
        beanWriter.write(entry, fieldNames, processors);
    }
    beanWriter.flush();
}
 
Example #4
Source File: ReadWriteCSV.java    From AIDR with GNU Affero General Public License v3.0 5 votes vote down vote up
public ICsvBeanWriter getCSVBeanWriter(String fileToWrite) {
	try {
		return new CsvBeanWriter(new FileWriter(fileToWrite, true),
				new CsvPreference.Builder(CsvPreference.EXCEL_PREFERENCE)
		.useEncoder(new DefaultCsvEncoder())
		.build() );
	} catch (IOException e) {
		// TODO Auto-generated catch block
		logger.error("Error in creating CSV Bean writer!");
		logger.error("Exception",e);
	}
	return null;
}
 
Example #5
Source File: ReadWriteCSV.java    From AIDR with GNU Affero General Public License v3.0 5 votes vote down vote up
public ICsvBeanWriter writeCollectorTweetIDSCSV(ICsvBeanWriter beanWriter, List<Tweet> tweetsList, String collectionDIR, String fileName) {
	try {
		// the header elements are used to map the bean values to each column (names must match)
		//final String[] header = new String[]{"tweetID", "message","userID", "userName", "userURL", "createdAt", "tweetURL"};
		//final CellProcessor[] processors = getProcessors();

		// koushik: shouldn't we be writing only the tweetIDs?
		final String[] header = new String[]{"tweetID"};

		final CellProcessor[] processors = getProcessors4TweetIDSCCSV();

		String persisterDIR = PersisterConfigurator.getInstance().getProperty(PersisterConfigurationProperty.DEFAULT_PERSISTER_FILE_PATH);
		//fileName = StringUtils.substringBefore(fileName, ".json"); //removing .json extension
		String fileToWrite = persisterDIR + collectionDIR + "/" + fileName;
		logger.info(collectionDIR + ": Writing CSV file : " + fileToWrite);

		if (null == beanWriter) { 
			beanWriter = getCSVBeanWriter(fileToWrite);
			// write the header
			beanWriter.writeHeader(header);
		}

		for (final Tweet tweet : tweetsList) {
			try {
				if (tweet.getTweetID() != null) {
					beanWriter.write(tweet, header, processors);
				}
			} catch (SuperCsvCellProcessorException e) {
				logger.error(collectionDIR + ": SuperCSV error");
			}
		}

	} catch (IOException ex) {
		logger.error(collectionDIR + ": IO Exception occured");
	} 
	//return fileName+".csv";
	return beanWriter;
}
 
Example #6
Source File: ReadWriteCSV.java    From AIDR with GNU Affero General Public License v3.0 5 votes vote down vote up
public ICsvBeanWriter writeCollectorTweetsCSV(List<Tweet> tweetsList, String collectionDIR, String fileName, ICsvBeanWriter beanWriter) {

		try {
			final String[] header = new String[]{"tweetID", "message","userID", "userName", "userURL", "createdAt", "tweetURL"};
			final CellProcessor[] processors = getCollectorTweetsProcessors();

			if(null == beanWriter){
				String persisterDIR = PersisterConfigurator.getInstance().getProperty(PersisterConfigurationProperty.DEFAULT_PERSISTER_FILE_PATH);
				//fileName = StringUtils.substringBefore(fileName, ".json"); //removing .json extension
				String fileToWrite = persisterDIR + collectionDIR + "/" + fileName;
				logger.info(collectionDIR + ": Writing CSV file : " + fileToWrite);
				beanWriter = getCSVBeanWriter(fileToWrite);
				beanWriter.writeHeader(header);
			}

			for (final Tweet tweet : tweetsList) {
				try {
					beanWriter.write(tweet, header, processors);
				} catch (SuperCsvCellProcessorException e) {
					logger.error(collectionDIR + ": SuperCSV error");
				}
			}

		} catch (IOException ex) {
			logger.error(collectionDIR + ": IO Exception occured");
		}
		return beanWriter;
	}
 
Example #7
Source File: DownloadDwcaActor.java    From occurrence with Apache License 2.0 5 votes vote down vote up
/**
 * Writes the multimedia objects into the file referenced by multimediaCsvWriter.
 */
private static void writeMediaObjects(ICsvBeanWriter multimediaCsvWriter, Occurrence occurrence) throws IOException {
  List<MediaObject> multimedia = occurrence.getMedia();
  if (multimedia != null) {
    for (MediaObject mediaObject : multimedia) {
      multimediaCsvWriter.write(new InnerMediaObject(mediaObject, occurrence.getKey()),
                                MULTIMEDIA_COLUMNS,
                                MEDIA_CELL_PROCESSORS);
    }
  }
}
 
Example #8
Source File: DownloadDwcaActor.java    From occurrence with Apache License 2.0 5 votes vote down vote up
/**
 * Executes the job.query and creates a data file that will contains the records from job.from to job.to positions.
 */
public void doWork(DownloadFileWork work) throws IOException {

  DatasetUsagesCollector datasetUsagesCollector = new DatasetUsagesCollector();

  try (
    ICsvMapWriter intCsvWriter = new CsvMapWriter(new FileWriterWithEncoding(work.getJobDataFileName()
                                                                             + TableSuffixes.INTERPRETED_SUFFIX,
                                                                             Charsets.UTF_8),
                                                  CsvPreference.TAB_PREFERENCE);
    ICsvMapWriter verbCsvWriter = new CsvMapWriter(new FileWriterWithEncoding(work.getJobDataFileName()
                                                                              + TableSuffixes.VERBATIM_SUFFIX,
                                                                              Charsets.UTF_8),
                                                   CsvPreference.TAB_PREFERENCE);
    ICsvBeanWriter multimediaCsvWriter = new CsvBeanWriter(new FileWriterWithEncoding(work.getJobDataFileName()
                                                                                      + TableSuffixes.MULTIMEDIA_SUFFIX,
                                                                                      Charsets.UTF_8),
                                                           CsvPreference.TAB_PREFERENCE)) {
    SearchQueryProcessor.processQuery(work, occurrence -> {
        try {
          // Writes the occurrence record obtained from Elasticsearch as Map<String,Object>.

          if (occurrence != null) {
            datasetUsagesCollector.incrementDatasetUsage(occurrence.getDatasetKey().toString());
            intCsvWriter.write(OccurrenceMapReader.buildInterpretedOccurrenceMap(occurrence), INT_COLUMNS);
            verbCsvWriter.write(OccurrenceMapReader.buildVerbatimOccurrenceMap(occurrence), VERB_COLUMNS);
            writeMediaObjects(multimediaCsvWriter, occurrence);
          }
        } catch (Exception e) {
          throw Throwables.propagate(e);
        }
      });
  } finally {
    // Unlock the assigned lock.
    work.getLock().unlock();
    LOG.info("Lock released, job detail: {} ", work);
  }
  getSender().tell(new Result(work, datasetUsagesCollector.getDatasetUsages()), getSelf());
}