com.aerospike.client.policy.ClientPolicy Java Examples

The following examples show how to use com.aerospike.client.policy.ClientPolicy. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AerospikeAbstractSink.java    From pulsar with Apache License 2.0 6 votes vote down vote up
private void createClient(NioEventLoops eventLoops) {
    String[] hosts = aerospikeSinkConfig.getSeedHosts().split(",");
    if (hosts.length <= 0) {
        throw new RuntimeException("Invalid Seed Hosts");
    }
    Host[] aeroSpikeHosts = new Host[hosts.length];
    for (int i = 0; i < hosts.length; ++i) {
        String[] hostPort = hosts[i].split(":");
        aeroSpikeHosts[i] = new Host(hostPort[0], Integer.valueOf(hostPort[1]));
    }
    ClientPolicy policy = new ClientPolicy();
    if (aerospikeSinkConfig.getUserName() != null && !aerospikeSinkConfig.getUserName().isEmpty()
        && aerospikeSinkConfig.getPassword() != null && !aerospikeSinkConfig.getPassword().isEmpty()) {
        policy.user = aerospikeSinkConfig.getUserName();
        policy.password = aerospikeSinkConfig.getPassword();
    }
    policy.eventLoops = eventLoops;
    client = new AerospikeClient(policy, aeroSpikeHosts);
}
 
Example #2
Source File: AerospikeBeanConfig.java    From datacollector with Apache License 2.0 6 votes vote down vote up
/**
 * initialize and validate configuration options
 *
 * @param context
 * @param issues
 */
public void init(Target.Context context, List<Target.ConfigIssue> issues) {
  List<Host> hosts = getAerospikeHosts(issues, connectionString, Groups.AEROSPIKE.getLabel(), "aerospikeBeanConfig.connectionString", context);
  ClientPolicy cp = new ClientPolicy();
  try {
    client = new AerospikeClient(cp, hosts.toArray(new Host[hosts.size()]));
    int retries = 0;
    while (!client.isConnected() && retries <= maxRetries) {
      if (retries > maxRetries) {
        issues.add(context.createConfigIssue(Groups.AEROSPIKE.getLabel(), "aerospikeBeanConfig.connectionString", AerospikeErrors.AEROSPIKE_03, connectionString));
        return;
      }
      retries++;
      try {
        Thread.sleep(100);
      } catch (InterruptedException ignored) {
      }
    }

  } catch (AerospikeException ex) {
    issues.add(context.createConfigIssue(Groups.AEROSPIKE.getLabel(), "aerospikeBeanConfig.connectionString", AerospikeErrors.AEROSPIKE_03, connectionString));
  }
}
 
Example #3
Source File: AerospikeRecordWriter.java    From aerospike-hadoop with Apache License 2.0 6 votes vote down vote up
protected void init() throws IOException {

        String host = AerospikeConfigUtil.getOutputHost(cfg);
        int port = AerospikeConfigUtil.getOutputPort(cfg);

        namespace = AerospikeConfigUtil.getOutputNamespace(cfg);
        setName = AerospikeConfigUtil.getOutputSetName(cfg);

        log.info(String.format("init: %s %d %s %s",
                               host, port, namespace, setName));

        ClientPolicy policy = new ClientPolicy();
        policy.user = "";
        policy.password = "";
        policy.failIfNotConnected = true;

        client = AerospikeClientSingleton.getInstance(policy, host, port);

        writePolicy = new WritePolicy();
    }
 
Example #4
Source File: AerospikeRecordWriter.java    From deep-spark with Apache License 2.0 6 votes vote down vote up
protected void init() throws IOException {

        String host = AerospikeConfigUtil.getOutputHost(cfg);
        int port = AerospikeConfigUtil.getOutputPort(cfg);

        namespace = AerospikeConfigUtil.getOutputNamespace(cfg);
        setName = AerospikeConfigUtil.getOutputSetName(cfg);

        log.info(String.format("init: %s %d %s %s",
                               host, port, namespace, setName));

        ClientPolicy policy = new ClientPolicy();
        policy.user = "";
        policy.password = "";
        policy.failIfNotConnected = true;

        client = AerospikeClientSingleton.getInstance(policy, host, port);

        writePolicy = new WritePolicy();
    }
 
Example #5
Source File: AerospikeRecordReader.java    From aerospike-hadoop with Apache License 2.0 5 votes vote down vote up
public void run() {
    try {
        AerospikeClient client =
            AerospikeClientSingleton.getInstance(new ClientPolicy(),
                                                 host, port);

        log.info(String.format("scanNode %s:%d:%s:%s",
                               host, port, namespace, setName));
        ScanPolicy scanPolicy = new ScanPolicy();
        scanPolicy.scanPercent = scanPercent;
        CallBack cb = new CallBack();
        log.info("scan starting with scan percent: " + scanPolicy.scanPercent + "%");
        isRunning = true;
        if (binNames != null) 
            client.scanNode(scanPolicy, node, namespace, setName,
                            cb, binNames);
        else
            client.scanNode(scanPolicy, node, namespace, setName,
                            cb);
        isFinished = true;
        log.info("scan finished");
    }
    catch (Exception ex) {
        log.error("exception in ASSCanReader.run: " + ex);
        isError = true;
        return;
    }
}
 
Example #6
Source File: SampleData.java    From aerospike-hadoop with Apache License 2.0 5 votes vote down vote up
public static void run(String[] args) throws Exception {

        int argi = 0;
        String asspec = args[argi++];
        String dataType = args[argi++];

        log.info(String.format("saw %s %s", asspec, dataType));

        String[] inparam = asspec.split(":");
        host = inparam[0];
        port = Integer.parseInt(inparam[1]);
        namespace = inparam[2];
        setName = inparam[3];
        binName = inparam[4];

        ClientPolicy policy = new ClientPolicy();
        policy.user = "";
        policy.password = "";
        policy.failIfNotConnected = true;

        client = new AerospikeClient(policy, host, port);

        writePolicy = new WritePolicy();

        if (dataType.equals("text-file"))
            runTextFile(args, argi);
        else if (dataType.equals("seq-int"))
            runSeqInt(args, argi);
        else
            throw new RuntimeException(String.format("unknown dataType \"%s\"",
                                                     dataType));
    }
 
Example #7
Source File: AerospikeRecordReader.java    From deep-spark with Apache License 2.0 5 votes vote down vote up
public void run() {
    try {
        AerospikeClient client =
            AerospikeClientSingleton.getInstance(new ClientPolicy(),
                                                 host, port);

        log.info(String.format("scanNode %s:%d:%s:%s",
                               host, port, namespace, setName));
        ScanPolicy scanPolicy = new ScanPolicy();
        CallBack cb = new CallBack();
        log.info("scan starting");
        isRunning = true;
        if (binNames != null) 
            client.scanNode(scanPolicy, node, namespace, setName,
                            cb, binNames);
        else
            client.scanNode(scanPolicy, node, namespace, setName,
                            cb);
        isFinished = true;
        log.info("scan finished");
    }
    catch (Exception ex) {
        log.error("exception in ASSCanReader.run: " + ex);
        isError = true;
        return;
    }
}
 
Example #8
Source File: AerospikeRecordReader.java    From aerospike-hadoop with Apache License 2.0 4 votes vote down vote up
public void run() {
    try {
        AerospikeClient client =
            AerospikeClientSingleton.getInstance(new ClientPolicy(),
                                                 host, port);
        log.info(String.format("queryNode %s:%d %s:%s:%s[%d:%d]",
                               host, port, namespace, setName,
                               numrangeBin, numrangeBegin,
                               numrangeEnd));
        Statement stmt = new Statement();
        stmt.setNamespace(namespace);
        stmt.setSetName(setName);
        stmt.setFilters(Filter.range(numrangeBin,
                                     numrangeBegin,
                                     numrangeEnd));
        if (binNames != null)
            stmt.setBinNames(binNames);
        QueryPolicy queryPolicy = new QueryPolicy();
        RecordSet rs = client.queryNode(queryPolicy,
                                        stmt,
                                        client.getNode(node));
        isRunning = true;
        try {
            log.info("query starting");
            while (rs.next()) {
                Key key = rs.getKey();
                Record record = rs.getRecord();
                queue.put(new KeyRecPair(new AerospikeKey(key),
                                         new AerospikeRecord(record)));
            }
        }
        finally {
            rs.close();
            isFinished = true;
            log.info("query finished");
        }
    }
    catch (Exception ex) {
        isError = true;
        return;
    }
}
 
Example #9
Source File: AerospikeInputFormat.java    From aerospike-hadoop with Apache License 2.0 4 votes vote down vote up
public org.apache.hadoop.mapred.InputSplit[]
    getSplits(JobConf job, int numSplits) throws IOException {
    try {

        String oper = AerospikeConfigUtil.getInputOperation(job);
        String host = AerospikeConfigUtil.getInputHost(job);
        int port = AerospikeConfigUtil.getInputPort(job);
        String namespace = AerospikeConfigUtil.getInputNamespace(job);
        String setName = AerospikeConfigUtil.getInputSetName(job);
        String[] binNames = AerospikeConfigUtil.getInputBinNames(job);
        String numrangeBin = "";
        long numrangeBegin = 0;
        long numrangeEnd = 0;
        int scanPercent = 100;
        if (oper.equals("numrange")) {
            numrangeBin = AerospikeConfigUtil.getInputNumRangeBin(job);
            numrangeBegin = AerospikeConfigUtil.getInputNumRangeBegin(job);
            numrangeEnd = AerospikeConfigUtil.getInputNumRangeEnd(job);
        } else if (oper.equals("scan")) {
            scanPercent = AerospikeConfigUtil.getInputScanPercent(job);
        }
        
        log.info(String.format("using: %s %d %s %s",
                               host, port, namespace, setName));

        AerospikeClient client =
            AerospikeClientSingleton.getInstance(new ClientPolicy(),
                                                 host, port);
        Node[] nodes = client.getNodes();
        int nsplits = nodes.length;
        if (nsplits == 0) {
            throw new IOException("no Aerospike nodes found");
        }
        log.info(String.format("found %d nodes", nsplits));
        AerospikeSplit[] splits = new AerospikeSplit[nsplits];
        for (int ii = 0; ii < nsplits; ii++) {
            Node node = nodes[ii];
            String nodeName = node.getName();

            // We want to avoid 127.0.0.1 as a hostname
            // because this value will be transferred to a
            // different hadoop node to be processed.
            //
            List<Host> aliases = getAliases(node.getHost());
            Host nodehost = aliases.get(0);
            if (aliases.size() > 1) {
                for (Host a : aliases) {
                    if (!a.name.equals("127.0.0.1")) {
                        nodehost = a;
                        break;
                    }
                }
            }
            splits[ii] = new AerospikeSplit(oper, nodeName,
                                            nodehost.name, nodehost.port,
                                            namespace, setName, binNames,
                                            numrangeBin, numrangeBegin,
                                            numrangeEnd, scanPercent);
            log.info("split: " + splits[ii]);
        }
        return splits;
    }
    catch (Exception ex) {
        throw new IOException("exception in getSplits", ex);
    }
}
 
Example #10
Source File: AerospikeRecordReader.java    From deep-spark with Apache License 2.0 4 votes vote down vote up
public void run() {
    try {
        AerospikeClient client =
            AerospikeClientSingleton.getInstance(new ClientPolicy(),
                                                 host, port);
        log.info(String.format("queryNode %s:%d %s:%s:%s[%d:%d]",
                               host, port, namespace, setName,
                               numrangeBin, numrangeBegin,
                               numrangeEnd));
        Statement stmt = new Statement();
        stmt.setNamespace(namespace);
        stmt.setSetName(setName);
        stmt.setFilters(Filter.range(numrangeBin,
                                     numrangeBegin,
                                     numrangeEnd));
        if (binNames != null)
            stmt.setBinNames(binNames);
        QueryPolicy queryPolicy = new QueryPolicy();
        RecordSet rs = client.queryNode(queryPolicy,
                                        stmt,
                                        client.getNode(node));
        isRunning = true;
        try {
            log.info("query starting");
            while (rs.next()) {
                Key key = rs.getKey();
                Record record = rs.getRecord();
                queue.put(new KeyRecPair(new AerospikeKey(key),
                                         new AerospikeRecord(record)));
            }
        }
        finally {
            rs.close();
            isFinished = true;
            log.info("query finished");
        }
    }
    catch (Exception ex) {
        isError = true;
        return;
    }
}
 
Example #11
Source File: AerospikeInputFormat.java    From deep-spark with Apache License 2.0 4 votes vote down vote up
public org.apache.hadoop.mapred.InputSplit[]
    getSplits(JobConf job, int numSplits) throws IOException {
    try {

        String oper = AerospikeConfigUtil.getInputOperation(job);
        String host = AerospikeConfigUtil.getInputHost(job);
        int port = AerospikeConfigUtil.getInputPort(job);
        String namespace = AerospikeConfigUtil.getInputNamespace(job);
        String setName = AerospikeConfigUtil.getInputSetName(job);
        String[] binNames = AerospikeConfigUtil.getInputBinNames(job);
        String numrangeBin = "";
        long numrangeBegin = 0;
        long numrangeEnd = 0;
        if (oper.equals("numrange")) {
            numrangeBin = AerospikeConfigUtil.getInputNumRangeBin(job);
            numrangeBegin = AerospikeConfigUtil.getInputNumRangeBegin(job);
            numrangeEnd = AerospikeConfigUtil.getInputNumRangeEnd(job);
        }
        
        log.info(String.format("using: %s %d %s %s",
                               host, port, namespace, setName));

        AerospikeClient client =
            AerospikeClientSingleton.getInstance(new ClientPolicy(),
                                                 host, port);
        Node[] nodes = client.getNodes();
        int nsplits = nodes.length;
        if (nsplits == 0) {
            throw new IOException("no Aerospike nodes found");
        }
        log.info(String.format("found %d nodes", nsplits));
        AerospikeSplit[] splits = new AerospikeSplit[nsplits];
        for (int ii = 0; ii < nsplits; ii++) {
            Node node = nodes[ii];
            String nodeName = node.getName();

            // We want to avoid 127.0.0.1 as a hostname
            // because this value will be transferred to a
            // different hadoop node to be processed.
            //
            Host[] aliases = node.getAliases();
            Host nodehost = aliases[0];
            if (aliases.length > 1) {
                for (int jj = 0; jj < aliases.length; ++jj) {
                    if (!aliases[jj].name.equals("127.0.0.1")) {
                        nodehost = aliases[jj];
                        break;
                    }
                }
            }
            splits[ii] = new AerospikeSplit(oper, nodeName,
                                            nodehost.name, nodehost.port,
                                            namespace, setName, binNames,
                                            numrangeBin, numrangeBegin,
                                            numrangeEnd);
            log.info("split: " + splits[ii]);
        }
        return splits;
    }
    catch (Exception ex) {
        throw new IOException("exception in getSplits", ex);
    }
}