org.rocksdb.InfoLogLevel Java Examples

The following examples show how to use org.rocksdb.InfoLogLevel. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RocksDbTtlCompactFiltersManager.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private static org.rocksdb.Logger createRocksDbNativeLogger() {
	if (LOG.isDebugEnabled()) {
		// options are always needed for org.rocksdb.Logger construction (no other constructor)
		// the logger level gets configured from the options in native code
		try (DBOptions opts = new DBOptions().setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL)) {
			return new org.rocksdb.Logger(opts) {
				@Override
				protected void log(InfoLogLevel infoLogLevel, String logMsg) {
					LOG.debug("RocksDB filter native code log: " + logMsg);
				}
			};
		}
	} else {
		return null;
	}
}
 
Example #2
Source File: RocksDbTtlCompactFiltersManager.java    From flink with Apache License 2.0 6 votes vote down vote up
private static org.rocksdb.Logger createRocksDbNativeLogger() {
	if (LOG.isDebugEnabled()) {
		// options are always needed for org.rocksdb.Logger construction (no other constructor)
		// the logger level gets configured from the options in native code
		try (DBOptions opts = new DBOptions().setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL)) {
			return new org.rocksdb.Logger(opts) {
				@Override
				protected void log(InfoLogLevel infoLogLevel, String logMsg) {
					LOG.debug("RocksDB filter native code log: " + logMsg);
				}
			};
		}
	} else {
		return null;
	}
}
 
Example #3
Source File: RocksToSLF4JLogger.java    From biomedicus with Apache License 2.0 6 votes vote down vote up
@Override
protected void log(InfoLogLevel infoLogLevel, String logMsg) {
  switch (infoLogLevel) {
    case DEBUG_LEVEL:
      slf4jLogger.debug(logMsg);
      break;
    case INFO_LEVEL:
      slf4jLogger.info(logMsg);
      break;
    case WARN_LEVEL:
      slf4jLogger.warn(logMsg);
      break;
    case ERROR_LEVEL:
      slf4jLogger.error(logMsg);
      break;
    case FATAL_LEVEL:
      slf4jLogger.error(logMsg);
      break;
    case HEADER_LEVEL:
      slf4jLogger.error(logMsg);
      break;
    case NUM_INFO_LOG_LEVELS:
      slf4jLogger.error(logMsg);
      break;
  }
}
 
Example #4
Source File: RocksDbTtlCompactFiltersManager.java    From flink with Apache License 2.0 6 votes vote down vote up
private static org.rocksdb.Logger createRocksDbNativeLogger() {
	if (LOG.isDebugEnabled()) {
		// options are always needed for org.rocksdb.Logger construction (no other constructor)
		// the logger level gets configured from the options in native code
		try (DBOptions opts = new DBOptions().setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL)) {
			return new org.rocksdb.Logger(opts) {
				@Override
				protected void log(InfoLogLevel infoLogLevel, String logMsg) {
					LOG.debug("RocksDB filter native code log: " + logMsg);
				}
			};
		}
	} else {
		return null;
	}
}
 
Example #5
Source File: RocksDBMetronome.java    From nifi with Apache License 2.0 6 votes vote down vote up
@Override
protected void log(final InfoLogLevel infoLogLevel, final String logMsg) {
    switch (infoLogLevel) {
        case ERROR_LEVEL:
        case FATAL_LEVEL:
            logger.error(logMsg);
            break;
        case WARN_LEVEL:
            logger.warn(logMsg);
            break;
        case DEBUG_LEVEL:
            logger.debug(logMsg);
            break;
        case INFO_LEVEL:
        case HEADER_LEVEL:
        default:
            logger.info(logMsg);
            break;
    }
}
 
Example #6
Source File: RocksDBNormalizerModel.java    From biomedicus with Apache License 2.0 5 votes vote down vote up
RocksDBNormalizerModel(Path dbPath) {
  RocksDB.loadLibrary();

  try (Options options = new Options().setInfoLogLevel(InfoLogLevel.ERROR_LEVEL)) {
    db = RocksDB.openReadOnly(options, dbPath.toString());
  } catch (RocksDBException e) {
    throw new RuntimeException(e);
  }
}
 
Example #7
Source File: RocksDbIdentifiers.java    From biomedicus with Apache License 2.0 5 votes vote down vote up
public RocksDbIdentifiers(Path identifiersPath) {
  RocksDB.loadLibrary();

  try (Options options = new Options().setInfoLogLevel(InfoLogLevel.ERROR_LEVEL)) {
    indices = RocksDB.openReadOnly(options, identifiersPath.toString());
  } catch (RocksDBException e) {
    throw new RuntimeException(e);
  }
}
 
Example #8
Source File: RocksDBMetronome.java    From nifi with Apache License 2.0 5 votes vote down vote up
/**
 * @return A RocksDB logger capturing all logging output from RocksDB
 */
private org.rocksdb.Logger getRocksLogger() {
    try (Options options = new Options()
            // make RocksDB give us everything, and we'll decide what we want to log in our wrapper
            .setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL)) {
        return new LogWrapper(options);
    }
}
 
Example #9
Source File: RocksDbCacheOperator.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public RocksDbCacheOperator(TopologyContext context, String cacheDir) {
    this.stormConf = context.getStormConf();

    this.maxFlushSize = ConfigExtension.getTransactionCacheBatchFlushSize(stormConf);

    Options rocksDbOpt = new Options();
    rocksDbOpt.setCreateMissingColumnFamilies(true).setCreateIfMissing(true);
    long bufferSize =
            ConfigExtension.getTransactionCacheBlockSize(stormConf) != null ? ConfigExtension.getTransactionCacheBlockSize(stormConf) : (1 * SizeUnit.GB);
    rocksDbOpt.setWriteBufferSize(bufferSize);
    int maxBufferNum = ConfigExtension.getTransactionMaxCacheBlockNum(stormConf) != null ? ConfigExtension.getTransactionMaxCacheBlockNum(stormConf) : 3;
    rocksDbOpt.setMaxWriteBufferNumber(maxBufferNum);

    // Config for log of RocksDb
    rocksDbOpt.setMaxLogFileSize(1073741824); // 1G
    rocksDbOpt.setKeepLogFileNum(1);
    rocksDbOpt.setInfoLogLevel(InfoLogLevel.WARN_LEVEL);
    
    try {
        Map<Object, Object> conf = new HashMap<Object, Object>();
        conf.put(ROCKSDB_ROOT_DIR, cacheDir);
        conf.put(ROCKSDB_RESET, true);
        initDir(conf);
        initDb(null, rocksDbOpt);
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    kryo = new Kryo();
    output = new Output(200, 2000000000);
    input = new Input(1);

    LOG.info("Finished rocksDb cache init: maxFlushSize={}, bufferSize={}, maxBufferNum={}", maxFlushSize, bufferSize, maxBufferNum);
}
 
Example #10
Source File: DBStoreBuilder.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
private DBOptions getDbProfile() {
  if (rocksDBOption != null) {
    return rocksDBOption;
  }
  DBOptions option = null;
  if (StringUtil.isNotBlank(dbname)) {
    List<ColumnFamilyDescriptor> columnFamilyDescriptors = new LinkedList<>();

    for (TableConfig tc : tables) {
      columnFamilyDescriptors.add(tc.getDescriptor());
    }

    if (columnFamilyDescriptors.size() > 0) {
      try {
        option = DBConfigFromFile.readFromFile(dbname,
            columnFamilyDescriptors);
        if(option != null) {
          LOG.info("Using Configs from {}.ini file", dbname);
        }
      } catch (IOException ex) {
        LOG.info("Unable to read RocksDB config from {}", dbname, ex);
      }
    }
  }

  if (option == null) {
    LOG.debug("Using default options: {}", dbProfile);
    option = dbProfile.getDBOptions();
  }

  if (rocksDBConfiguration.isRocksdbLoggingEnabled()) {
    org.rocksdb.Logger logger = new org.rocksdb.Logger(option) {
      @Override
      protected void log(InfoLogLevel infoLogLevel, String s) {
        ROCKS_DB_LOGGER.info(s);
      }
    };
    InfoLogLevel level = InfoLogLevel.valueOf(rocksDBConfiguration
        .getRocksdbLogLevel() + "_LEVEL");
    logger.setInfoLogLevel(level);
    option.setLogger(logger);
  }

  if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
    Statistics statistics = new Statistics();
    statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat));
    option = option.setStatistics(statistics);
  }
  return option;
}
 
Example #11
Source File: RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.java    From kcache with Apache License 2.0 4 votes vote down vote up
@Override
public Options setInfoLogLevel(final InfoLogLevel infoLogLevel) {
    dbOptions.setInfoLogLevel(infoLogLevel);
    return this;
}
 
Example #12
Source File: RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.java    From kcache with Apache License 2.0 4 votes vote down vote up
@Override
public InfoLogLevel infoLogLevel() {
    return dbOptions.infoLogLevel();
}
 
Example #13
Source File: RocksDBCache.java    From kcache with Apache License 2.0 4 votes vote down vote up
private void openDB() {
    // initialize the default rocksdb options

    final DBOptions dbOptions = new DBOptions();
    final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
    userSpecifiedOptions = new RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter(dbOptions, columnFamilyOptions);
    userSpecifiedOptions.setComparator(new RocksDBKeySliceComparator<>(keySerde, comparator));

    final BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
    cache = new LRUCache(BLOCK_CACHE_SIZE);
    tableConfig.setBlockCache(cache);
    tableConfig.setBlockSize(BLOCK_SIZE);

    filter = new BloomFilter();
    tableConfig.setFilterPolicy(filter);

    userSpecifiedOptions.optimizeFiltersForHits();
    userSpecifiedOptions.setTableFormatConfig(tableConfig);
    userSpecifiedOptions.setWriteBufferSize(WRITE_BUFFER_SIZE);
    userSpecifiedOptions.setCompressionType(COMPRESSION_TYPE);
    userSpecifiedOptions.setCompactionStyle(COMPACTION_STYLE);
    userSpecifiedOptions.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
    userSpecifiedOptions.setCreateIfMissing(true);
    userSpecifiedOptions.setErrorIfExists(false);
    userSpecifiedOptions.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL);
    // this is the recommended way to increase parallelism in RocksDb
    // note that the current implementation of setIncreaseParallelism affects the number
    // of compaction threads but not flush threads (the latter remains one). Also
    // the parallelism value needs to be at least two because of the code in
    // https://github.com/facebook/rocksdb/blob/62ad0a9b19f0be4cefa70b6b32876e764b7f3c11/util/options.cc#L580
    // subtracts one from the value passed to determine the number of compaction threads
    // (this could be a bug in the RocksDB code and their devs have been contacted).
    userSpecifiedOptions.setIncreaseParallelism(Math.max(Runtime.getRuntime().availableProcessors(), 2));

    wOptions = new WriteOptions();
    wOptions.setDisableWAL(true);

    fOptions = new FlushOptions();
    fOptions.setWaitForFlush(true);

    dbDir = new File(new File(rootDir, parentDir), name);

    try {
        Files.createDirectories(dbDir.getParentFile().toPath());
        Files.createDirectories(dbDir.getAbsoluteFile().toPath());
    } catch (final IOException fatal) {
        throw new CacheInitializationException("Could not create directories", fatal);
    }

    openRocksDB(dbOptions, columnFamilyOptions);
    open = true;
}
 
Example #14
Source File: RocksDBDAO.java    From hudi with Apache License 2.0 4 votes vote down vote up
/**
 * Initialized Rocks DB instance.
 */
private void init() {
  try {
    LOG.info("DELETING RocksDB persisted at " + rocksDBBasePath);
    FileIOUtils.deleteDirectory(new File(rocksDBBasePath));

    managedHandlesMap = new ConcurrentHashMap<>();
    managedDescriptorMap = new ConcurrentHashMap<>();

    // If already present, loads the existing column-family handles

    final DBOptions dbOptions = new DBOptions().setCreateIfMissing(true).setCreateMissingColumnFamilies(true)
        .setWalDir(rocksDBBasePath).setStatsDumpPeriodSec(300).setStatistics(new Statistics());
    dbOptions.setLogger(new org.rocksdb.Logger(dbOptions) {
      @Override
      protected void log(InfoLogLevel infoLogLevel, String logMsg) {
        LOG.info("From Rocks DB : " + logMsg);
      }
    });
    final List<ColumnFamilyDescriptor> managedColumnFamilies = loadManagedColumnFamilies(dbOptions);
    final List<ColumnFamilyHandle> managedHandles = new ArrayList<>();
    FileIOUtils.mkdir(new File(rocksDBBasePath));
    rocksDB = RocksDB.open(dbOptions, rocksDBBasePath, managedColumnFamilies, managedHandles);

    ValidationUtils.checkArgument(managedHandles.size() == managedColumnFamilies.size(),
        "Unexpected number of handles are returned");
    for (int index = 0; index < managedHandles.size(); index++) {
      ColumnFamilyHandle handle = managedHandles.get(index);
      ColumnFamilyDescriptor descriptor = managedColumnFamilies.get(index);
      String familyNameFromHandle = new String(handle.getName());
      String familyNameFromDescriptor = new String(descriptor.getName());

      ValidationUtils.checkArgument(familyNameFromDescriptor.equals(familyNameFromHandle),
          "Family Handles not in order with descriptors");
      managedHandlesMap.put(familyNameFromHandle, handle);
      managedDescriptorMap.put(familyNameFromDescriptor, descriptor);
    }
  } catch (RocksDBException | IOException re) {
    LOG.error("Got exception opening Rocks DB instance ", re);
    throw new HoodieException(re);
  }
}