Java Code Examples for me.prettyprint.hector.api.mutation.Mutator#execute()

The following examples show how to use me.prettyprint.hector.api.mutation.Mutator#execute() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CassandraMetadataRepository.java    From archiva with Apache License 2.0 6 votes vote down vote up
protected void recordLicenses( String projectVersionMetadataKey, List<License> licenses )
{

    if ( licenses == null || licenses.isEmpty() )
    {
        return;
    }
    Mutator<String> licenseMutator = this.licenseTemplate.createMutator();

    for ( License license : licenses )
    {
        // we don't care about the key as the real used one with the projectVersionMetadata
        String keyLicense = UUID.randomUUID().toString();
        String cfLicense = cassandraArchivaManager.getLicenseFamilyName();

        addInsertion( licenseMutator, keyLicense, cfLicense, "projectVersionMetadataModel.key",
                      projectVersionMetadataKey );

        addInsertion( licenseMutator, keyLicense, cfLicense, NAME.toString(), license.getName() );

        addInsertion( licenseMutator, keyLicense, cfLicense, URL.toString(), license.getUrl() );

    }
    licenseMutator.execute();
}
 
Example 2
Source File: CassandraMetadataRepository.java    From archiva with Apache License 2.0 6 votes vote down vote up
protected void recordChecksums( String repositoryId, String artifactMetadataKey, Map<String, String> checksums)
{
    if ( checksums == null || checksums.isEmpty() )
    {
        return;
    }
    Mutator<String> checksumMutator = this.checksumTemplate.createMutator();
    for ( Map.Entry<String, String> entry : checksums.entrySet())
    {
        // we don't care about the key as the real used one with the projectVersionMetadata
        String keyChecksums = UUID.randomUUID().toString();
        String cfChecksums = cassandraArchivaManager.getChecksumFamilyName();

        addInsertion( checksumMutator, keyChecksums, cfChecksums, ARTIFACT_METADATA_MODEL_KEY,
                artifactMetadataKey );
        addInsertion( checksumMutator, keyChecksums, cfChecksums, CHECKSUM_ALG.toString(), entry.getKey());
        addInsertion( checksumMutator, keyChecksums, cfChecksums, CHECKSUM_VALUE.toString(),
                entry.getValue() );
        addInsertion(checksumMutator, keyChecksums, cfChecksums, REPOSITORY_NAME.toString(), repositoryId);

    }
    checksumMutator.execute();
}
 
Example 3
Source File: CassandraUserStoreManager.java    From carbon-identity with Apache License 2.0 6 votes vote down vote up
/**
 * Maps the users to a role list. Adds the (username, tenantId) -> roleList
 * and (role, tenantId) -> userName
 *
 * @param userName The username of the user the roles need to be added to.
 * @param roleList The list of roles that needs to be mapped against the user.
 */
private void addUserToRoleList(String userName, String[] roleList) {

    Mutator<Composite> mutator = HFactory.createMutator(keyspace, CompositeSerializer.get());

    if (roleList != null) {
        for (String role : roleList) {
            Composite key = new Composite();
            key.addComponent(userName, stringSerializer);
            key.addComponent(tenantIdString, stringSerializer);

            mutator.addInsertion(key, CFConstants.UM_USER_ROLE, HFactory.createColumn(role, role));

            Composite keyRole = new Composite();
            keyRole.addComponent(role, stringSerializer);
            keyRole.addComponent(tenantIdString, stringSerializer);

            mutator.addInsertion(keyRole, CFConstants.UM_ROLE_USER_INDEX, HFactory.createColumn(userName, userName));

        }
        mutator.execute();
    }
}
 
Example 4
Source File: AbstractSearch.java    From usergrid with Apache License 2.0 6 votes vote down vote up
/**
 * Write the updated client pointer
 *
 * @param lastReturnedId This is a null safe parameter. If it's null, this won't be written since it means we didn't
 * read any messages
 */
protected void writeClientPointer( UUID queueId, UUID consumerId, UUID lastReturnedId ) {
    // nothing to do
    if ( lastReturnedId == null ) {
        return;
    }

    // we want to set the timestamp to the value from the time uuid. If this is
    // not the max time uuid to ever be written
    // for this consumer, we want this to be discarded to avoid internode race
    // conditions with clock drift.
    long colTimestamp = UUIDUtils.getTimestampInMicros( lastReturnedId );

    Mutator<UUID> mutator = CountingMutator.createFlushingMutator( ko, ue );

    if ( logger.isDebugEnabled() ) {
        logger.debug( "Writing last client id pointer of '{}' for queue '{}' and consumer '{}' with timestamp '{}",
                        lastReturnedId, queueId, consumerId, colTimestamp
                );
    }

    mutator.addInsertion( consumerId, CONSUMERS.getColumnFamily(),
            createColumn( queueId, lastReturnedId, colTimestamp, ue, ue ) );

    mutator.execute();
}
 
Example 5
Source File: HectorPolicyManagerImpl.java    From ck with Apache License 2.0 6 votes vote down vote up
@Override
public void persist(PolicyDAO policy) {
    PolicyDAOImpl impl = getImpl(policy, PolicyDAOImpl.class);
    UUID policyID = impl.getPolicyID().getUUID();

    Mutator<UUID> m = Schema.POLICIES.createMutator(_keyspace);

    Schema.POLICIES.SHORT_NAME.addInsertion(m, policyID, policy.getShortName());
    Schema.POLICIES.DESCRIPTION.addInsertion(m, policyID, policy.getDescription());

    // We're saving changes, so update the edit time
    Schema.POLICIES.LAST_EDITED.addInsertion(m, policyID, new Date());

    // TODO: error handling? Throws HectorException.
    m.execute();
}
 
Example 6
Source File: Cassandra12xMapDAO.java    From cumulusrdf with Apache License 2.0 5 votes vote down vote up
@Override
public void setAll(final Map<K, V> pairs) {
	final Mutator<K> mutator = createMutator(_keyspace, _serializer_k);

	for (final K key : pairs.keySet()) {
		mutator.addInsertion(key, _cf_name, createColumn(COLUMN_NAME, pairs.get(key), BYTE_SERIALIZER, _serializer_v));
	}

	try {
		mutator.execute();
	} catch (final Exception exception) {
		_log.error(MessageCatalog._00057_ADD_FAILURE, exception);
	}
}
 
Example 7
Source File: CassandraMetadataRepository.java    From archiva with Apache License 2.0 5 votes vote down vote up
protected void recordMailingList( String projectVersionMetadataKey, List<MailingList> mailingLists )
{
    if ( mailingLists == null || mailingLists.isEmpty() )
    {
        return;
    }
    Mutator<String> mailingMutator = this.mailingListTemplate.createMutator();
    for ( MailingList mailingList : mailingLists )
    {
        // we don't care about the key as the real used one with the projectVersionMetadata
        String keyMailingList = UUID.randomUUID().toString();
        String cfMailingList = cassandraArchivaManager.getMailingListFamilyName();

        addInsertion( mailingMutator, keyMailingList, cfMailingList, "projectVersionMetadataModel.key",
                      projectVersionMetadataKey );
        addInsertion( mailingMutator, keyMailingList, cfMailingList, NAME.toString(), mailingList.getName() );
        addInsertion( mailingMutator, keyMailingList, cfMailingList, "mainArchiveUrl",
                      mailingList.getMainArchiveUrl() );
        addInsertion( mailingMutator, keyMailingList, cfMailingList, "postAddress", mailingList.getPostAddress() );
        addInsertion( mailingMutator, keyMailingList, cfMailingList, "subscribeAddress",
                      mailingList.getSubscribeAddress() );
        addInsertion( mailingMutator, keyMailingList, cfMailingList, "unsubscribeAddress",
                      mailingList.getUnsubscribeAddress() );
        int idx = 0;
        for ( String otherArchive : mailingList.getOtherArchives() )
        {
            addInsertion( mailingMutator, keyMailingList, cfMailingList, "otherArchive." + idx, otherArchive );
            idx++;
        }

    }
    mailingMutator.execute();
}
 
Example 8
Source File: ConsumerTransaction.java    From usergrid with Apache License 2.0 5 votes vote down vote up
/**
 * Write the transaction timeouts
 *
 * @param messages The messages to load
 * @param futureTimeout The time these message should expire
 * @param queueId The queue UUId
 * @param consumerId The consumer Id
 */
protected void writeTransactions( List<Message> messages, final long futureTimeout, UUID queueId, UUID consumerId )
{

    Mutator<ByteBuffer> mutator = CountingMutator.createFlushingMutator( ko, be );

    ByteBuffer key = getQueueClientTransactionKey( queueId, consumerId );

    int counter = 0;

    long time = cass.createTimestamp();

    for ( Message message : messages )
    {
        // note we're not incrementing futureSnapshot on purpose. The uuid
        // generation should give us a sequenced unique ID for each response, even
        // if the
        // time is the same since we increment the counter. If we read more than
        // 10k messages in a single transaction, our millisecond will roll to the
        // next due to 10k being the max amount of 1/10 microsecond headroom. Not
        // possible to avoid this given the way time uuids are encoded.
        UUID expirationId = UUIDUtils.newTimeUUID( futureTimeout, counter );
        UUID messageId = message.getUuid();

        if (logger.isTraceEnabled()) {
            logger.trace("Writing new timeout at '{}' for message '{}'", expirationId, messageId);
        }

        mutator.addInsertion( key, CONSUMER_QUEUE_TIMEOUTS.getColumnFamily(),
                createColumn( expirationId, messageId, time, ue, ue ) );

        // add the transactionid to the message
        message.setTransaction( expirationId );
        counter++;
    }

    mutator.execute();
}
 
Example 9
Source File: ConsumerTransaction.java    From usergrid with Apache License 2.0 5 votes vote down vote up
/**
 * Delete all re-read transaction pointers
 *
 * @param pointers The list of transaction pointers
 * @param maxIndex The index to stop at (exclusive)
 * @param queueId The queue id
 * @param consumerId The consumer id
 */
protected void deleteTransactionPointers( List<TransactionPointer> pointers, int maxIndex, UUID queueId,
                                          UUID consumerId )
{

    if ( maxIndex == 0 || pointers.size() == 0 )
    {
        return;
    }

    Mutator<ByteBuffer> mutator = CountingMutator.createFlushingMutator( ko, be );
    ByteBuffer key = getQueueClientTransactionKey( queueId, consumerId );

    for ( int i = 0; i < maxIndex && i < pointers.size(); i++ )
    {
        UUID pointer = pointers.get( i ).expiration;

        if ( logger.isTraceEnabled() )
        {
            logger.trace( "Removing transaction pointer '{}' for queue '{}' and consumer '{}'",
                    pointer, queueId, consumerId
            );
        }

        mutator.addDeletion( key, CONSUMER_QUEUE_TIMEOUTS.getColumnFamily(), pointer, ue, cass.createTimestamp() );
    }

    mutator.execute();
}
 
Example 10
Source File: CassandraUserStoreManager.java    From carbon-identity with Apache License 2.0 5 votes vote down vote up
/**
 * Deletes a user by userName.
 */
@Override
public void doDeleteUser(String userName) throws UserStoreException {

    Mutator<Composite> mutator = HFactory.createMutator(keyspace, CompositeSerializer.get());
    String[] roles = doGetExternalRoleListOfUser(userName, "");
    for (String role : roles) {
        Composite key = new Composite();
        key.addComponent(role, stringSerializer);
        key.addComponent(tenantIdString, stringSerializer);
        ColumnFamilyTemplate<Composite, String> userCFTemplate = new ThriftColumnFamilyTemplate<Composite, String>(
                keyspace, CFConstants.UM_ROLE_USER_INDEX, CompositeSerializer.get(), StringSerializer.get());
        try {
            userCFTemplate.deleteColumn(key, userName);
        } catch (HectorException e) {
            log.error("Error during deletion ", e);
        }
    }

    Composite userKey = new Composite();
    userKey.addComponent(userName, stringSerializer);
    userKey.addComponent(tenantIdString, stringSerializer);
    mutator.addDeletion(userKey, CFConstants.UM_USER_ROLE, null, CompositeSerializer.get());
    mutator.addDeletion(userKey, CFConstants.UM_USER, null, CompositeSerializer.get());
    mutator.execute();

    if (log.isDebugEnabled()) {
        log.debug("Deleted user " + userName + " successfully");
    }
}
 
Example 11
Source File: PerfDataAccessor.java    From oneops with Apache License 2.0 4 votes vote down vote up
public void purgeMetrics(long time, String bucket) {

        int rowCount = 0;
        int totalColCount = 0;
        int totalColDeleted = 0;

        Long start = 1L;
        Long end = time;

        // safeguard not to delete anything in past week
        long now = System.currentTimeMillis() / 1000;
        logger.info("       now: " + now);
        logger.info("startEpoch: " + time);

        if (time + (60 * 60 * 24 * 7) > now) {
            logger.error("input time too soon - cannot be within past week");
            return;
        }

        int maxColumns = (int) (end - start);
        int pageSize = 1000;
        String lastKey = null;

        Mutator<byte[]> mutator = HFactory.createMutator(keyspace, bytesSerializer);

        RangeSlicesQuery<String, Long, Double> query = HFactory
                .createRangeSlicesQuery(keyspace, stringSerializer, longSerializer, doubleSerializer)
                .setColumnFamily(DATA_CF).setReturnKeysOnly()
                .setRowCount(pageSize);

        while (true) {
            query.setKeys(lastKey, null);

            QueryResult<OrderedRows<String, Long, Double>> result = query.execute();
            OrderedRows<String, Long, Double> orderedRows = result.get();
            Iterator<Row<String, Long, Double>> rowsIterator = orderedRows.iterator();

            // we'll skip this first one, since it is the same as the last one
            // from previous time we executed
            if (lastKey != null && rowsIterator != null)
                rowsIterator.next();

            while (rowsIterator.hasNext()) {
                Row<String, Long, Double> row = rowsIterator.next();

                if (!row.getKey().endsWith("-" + bucket)) {
                    continue;
                }

                rowCount++;
                lastKey = row.getKey();

                List<byte[]> keys = new ArrayList<byte[]>();
                keys.add(row.getKey().getBytes());

                MultigetSliceQuery<byte[], Long, Double> multigetSliceQuery = HFactory
                        .createMultigetSliceQuery(keyspace, bytesSerializer, longSerializer, doubleSerializer)
                        .setColumnFamily(DATA_CF).setKeys(keys)
                        .setRange(start, end, false, maxColumns);

                QueryResult<Rows<byte[], Long, Double>> colResult = multigetSliceQuery.execute();
                Rows<byte[], Long, Double> rows = colResult.get();

                int sampleCount = 0;
                int deletedCount = 0;
                for (Row<byte[], Long, Double> rowResult : rows) {

                    List<HColumn<Long, Double>> cols = rowResult.getColumnSlice().getColumns();
                    Iterator<HColumn<Long, Double>> listIter = cols.listIterator();

                    while (listIter.hasNext()) {
                        HColumn<Long, Double> c = (HColumn<Long, Double>) listIter.next();

                        if (c.getName() < time) {
                            mutator.addDeletion(row.getKey().getBytes(), DATA_CF, c.getName(), longSerializer);
                            deletedCount++;
                        }
                        sampleCount++;
                    }

                    totalColDeleted += deletedCount;
                    totalColCount += sampleCount;

                    mutator.execute();
                }

                logger.info(row.getKey() + ": " + sampleCount + " deleted: " + deletedCount);
                if (rows.getCount() < pageSize)
                    break;

            }
            logger.info("rows: " + rowCount + " cols: " + totalColCount
                    + " deleted: " + totalColDeleted);

            if (orderedRows.getCount() < pageSize)
                break;

        }

    }
 
Example 12
Source File: CassandraUserStoreManager.java    From carbon-identity with Apache License 2.0 4 votes vote down vote up
/**
 * Adds the user to the user store.
 */
@Override
public void doAddUser(String userName, Object credential, String[] roleList, Map<String, String> claims,
                      String profileName, boolean requirePasswordChange) throws UserStoreException {

    String userId = UUID.randomUUID().toString();
    String saltValue = null;

    if (TRUE.equalsIgnoreCase(realmConfig.getUserStoreProperties().get(JDBCRealmConstants.STORE_SALTED_PASSWORDS))) {
        saltValue = Util.getSaltValue();
    }

    String password = Util.preparePassword((String) credential, saltValue);

    if (doCheckExistingUser(userName)) {

        String message = "User with credentials " + userName + "exists";

        UserStoreException userStoreException = new UserStoreException(message);
        log.error(message, userStoreException);
        throw userStoreException;
    } else {

        Mutator<Composite> mutator = HFactory.createMutator(keyspace, CompositeSerializer.get());

        Composite key = new Composite();
        key.addComponent(userName, stringSerializer);
        key.addComponent(tenantIdString, stringSerializer);

        // add user ID
        mutator.addInsertion(key, CFConstants.UM_USER,
                HFactory.createColumn(CFConstants.UM_USER_ID, userId, stringSerializer, stringSerializer));
        mutator.addInsertion(key, CFConstants.UM_USER,
                HFactory.createColumn(CFConstants.UM_USER_NAME, userName, stringSerializer, stringSerializer));
        mutator.addInsertion(key, CFConstants.UM_USER,
                HFactory.createColumn(CFConstants.UM_SECRET, password, stringSerializer, stringSerializer));
        mutator.addInsertion(key, CFConstants.UM_USER,
                HFactory.createColumn(CFConstants.UM_SALT_VALUE, saltValue, stringSerializer, stringSerializer));
        mutator.addInsertion(key, CFConstants.UM_USER, HFactory.createColumn(CFConstants.UM_REQUIRE_CHANGE_BOOLEAN,
                "false", stringSerializer, stringSerializer));
        mutator.addInsertion(key, CFConstants.UM_USER,
                HFactory.createColumn(CFConstants.UM_TENANT_ID, tenantIdString, stringSerializer, stringSerializer));
        mutator = addUserToRoleList(userName, roleList, mutator);

        if (claims != null) {
            mutator = addClaimsForUser(userId, claims, mutator);
        }

        try {
            mutator.execute();
            if (log.isDebugEnabled()) {
                log.debug("Added user " + userName + " successfully");
            }
        } catch (HectorException e) {
            // TODO- research and check how to identify cassandra failure
            // and handle it efficiently.
            throw new UserStoreException("Adding user failed.", e);
        }
        mutator.execute();

    }
}
 
Example 13
Source File: PerfHeaderDao.java    From oneops with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
public String createSingleHeader(String key, String step, String metricsJson)
		throws IOException {

		logger.info("createHeader: "+key+" step: "+step+" metrics: "+metricsJson);

		PerfHeader header = new PerfHeader(); 
		header.setStep(60);
		PerfArchive.setDefaultArchives(header);
		
		// write info header
		List<HColumn<String, String>> columns = Arrays.asList(
				HFactory.createStringColumn(UPDATED, "0"),
				HFactory.createStringColumn(STEP, "60"),
				HFactory.createStringColumn(IP, "" )
		);
		
		Mutator<byte[]> mutator = createMutator(keyspace, bytesSerializer);			

		mutator.insert(key.getBytes(), HEADER, HFactory.createSuperColumn(INFO,
				columns, stringSerializer, stringSerializer, stringSerializer));

		logger.debug("write INFO :: UPDATED:"+new Long(header.getUpdated()).toString()+
				     " STEP:"+new Integer(header.getStep()).toString() +
				     " IP:"+ header.getIp());
		
		String keysPending = "info";		
		
		//"metrics":"{\"WriteOperations\":{\"display\":true,\"unit\":\"Per Second\",\"dstype\":\"DERIVE\",\"description\":\"Write Operations\"}}"
		//"metrics":"{\"ReadOperations\":{\"display\":true,\"unit\":\"per second\",\"dstype\":\"DERIVE\",\"description\":\"Read Operations\"}}"
		HashMap<String,HashMap<String,String>> metricMap = 
				mapper.readValue(metricsJson, new TypeReference<Map<String,Map<String,String>>>() { });
		
		for (String metricKey : metricMap.keySet() ) {

			HashMap<String,String> attrs = metricMap.get(metricKey);
			String dsType = PerfDatasource.GAUGE;
			if (attrs.containsKey(DS_TYPE)) {
				dsType = attrs.get(DS_TYPE);
			}
			
			columns = Arrays.asList(
					HFactory.createStringColumn(TYPE, dsType ), 
					HFactory.createStringColumn(HEARTBEAT, "300" ), 
					HFactory.createStringColumn(MIN, NAN ), 
					HFactory.createStringColumn(MAX, NAN ), 
					HFactory.createStringColumn(LAST, NAN ),
					HFactory.createStringColumn(INPUT, NAN ),
					HFactory.createStringColumn(PDP, NAN )
			);
			
			mutator.insert(key.getBytes(), HEADER, HFactory
					.createSuperColumn(metricKey, columns, stringSerializer, stringSerializer, stringSerializer));
			keysPending += ", "+metricKey;		
		}
		
		Map<String,PerfArchive> rraMap = header.getRraMap();
		
		for (String rraKey : rraMap.keySet() ) {
			PerfArchive rra = rraMap.get(rraKey);

			columns = Arrays.asList(
					HFactory.createStringColumn(CF, rra.getConsolidationFunction() ),
					HFactory.createStringColumn(XFF, new Double(rra.getXff()).toString() ),
					HFactory.createStringColumn(STEPS, new Integer(rra.getSteps()).toString() ),
					HFactory.createStringColumn(ROWS, new Integer(rra.getRows()).toString()  )
			);
			
			mutator.insert(key.getBytes(), HEADER, HFactory.createSuperColumn(
					rraKey, columns, stringSerializer, stringSerializer,
					stringSerializer));
			keysPending += ", "+rraKey;		
		}					
		
		logger.debug("write keys:"+keysPending);

		// perform the insert
		mutator.execute();	
		
		return "{\"result_code\":200}\n";
	}
 
Example 14
Source File: CassandraUserStoreManager.java    From carbon-identity with Apache License 2.0 4 votes vote down vote up
/**
 * Update the user list mapped to a role.
 */
@Override
public void doUpdateUserListOfRole(String roleName, String[] deletedUsers, String[] newUsers)
        throws UserStoreException {

    Mutator<Composite> mutator = HFactory.createMutator(keyspace, CompositeSerializer.get());
    RoleContext ctx = createRoleContext(roleName);
    roleName = ctx.getRoleName();
    boolean isShared = ctx.isShared();
    if (!isShared) {
        //TODO TO BE Implemented
    }
    if (deletedUsers != null && deletedUsers.length > 0) {
        if (isShared) {
            //TODO TO BE Implemented
        } else {
            if (deletedUsers.length > 0) {
                Composite key = new Composite();
                key.addComponent(roleName, stringSerializer);
                key.addComponent(tenantIdString, stringSerializer);

                for (String user : deletedUsers) {

                    Composite userKey = new Composite();
                    userKey.addComponent(user, stringSerializer);
                    userKey.addComponent(tenantIdString, stringSerializer);

                    ColumnFamilyTemplate<Composite, String> userCFTemplate = new ThriftColumnFamilyTemplate<Composite, String>(
                            keyspace, CFConstants.UM_USER_ROLE, CompositeSerializer.get(), StringSerializer.get());
                    ColumnFamilyTemplate<Composite, String> roleCFTemplate = new ThriftColumnFamilyTemplate<Composite, String>(
                            keyspace, CFConstants.UM_ROLE_USER_INDEX, CompositeSerializer.get(),
                            StringSerializer.get());
                    try {
                        roleCFTemplate.deleteColumn(mutator, key, user);
                        userCFTemplate.deleteColumn(mutator, userKey, roleName);
                    } catch (HectorException e) {
                        log.error(e.getMessage(), e);
                        throw new UserStoreException("Error during the updating of a user's role list");
                    }
                }
            }

        }
    }
    // need to clear user roles cache upon roles update
    clearUserRolesCacheByTenant(this.tenantId);

    if (newUsers != null && newUsers.length > 0) {
        if (isShared) {
            //TODO TO BE Implemented
        } else {
            addRoleToUsersList(newUsers, roleName, mutator);
        }
    }
    mutator.execute();

}
 
Example 15
Source File: CassandraMetadataRepository.java    From archiva with Apache License 2.0 4 votes vote down vote up
private void updateFacets( final FacetedMetadata facetedMetadata,
                           final ArtifactMetadataModel artifactMetadataModel )
{

    String cf = cassandraArchivaManager.getMetadataFacetFamilyName();

    for ( final String facetId : getSupportedFacets() )
    {
        MetadataFacet metadataFacet = facetedMetadata.getFacet( facetId );
        if ( metadataFacet == null )
        {
            continue;
        }
        // clean first

        QueryResult<OrderedRows<String, String, String>> result =
            HFactory.createRangeSlicesQuery( keyspace, ss, ss, ss ) //
                .setColumnFamily( cf ) //
                .setColumnNames( REPOSITORY_NAME.toString() ) //
                .addEqualsExpression( REPOSITORY_NAME.toString(), artifactMetadataModel.getRepositoryId() ) //
                .addEqualsExpression( NAMESPACE_ID.toString(), artifactMetadataModel.getNamespace() ) //
                .addEqualsExpression( PROJECT_ID.toString(), artifactMetadataModel.getProject() ) //
                .addEqualsExpression( PROJECT_VERSION.toString(), artifactMetadataModel.getProjectVersion() ) //
                .addEqualsExpression( FACET_ID.toString(), facetId ) //
                .execute();

        for ( Row<String, String, String> row : result.get().getList() )
        {
            this.metadataFacetTemplate.deleteRow( row.getKey() );
        }

        Map<String, String> properties = metadataFacet.toProperties();

        for ( Map.Entry<String, String> entry : properties.entrySet() )
        {
            String key = new MetadataFacetModel.KeyBuilder().withKey( entry.getKey() ).withArtifactMetadataModel(
                artifactMetadataModel ).withFacetId( facetId ).withName( metadataFacet.getName() ).build();
            Mutator<String> mutator = metadataFacetTemplate.createMutator() //
                .addInsertion( key, cf, column( REPOSITORY_NAME.toString(), artifactMetadataModel.getRepositoryId() ) ) //
                .addInsertion( key, cf, column( NAMESPACE_ID.toString(), artifactMetadataModel.getNamespace() ) ) //
                .addInsertion( key, cf, column( PROJECT_ID.toString(), artifactMetadataModel.getProject() ) ) //
                .addInsertion( key, cf, column( PROJECT_VERSION.toString(), artifactMetadataModel.getProjectVersion() ) ) //
                .addInsertion( key, cf, column( FACET_ID.toString(), facetId ) ) //
                .addInsertion( key, cf, column( KEY.toString(), entry.getKey() ) ) //
                .addInsertion( key, cf, column( VALUE.toString(), entry.getValue() ) );

            if ( metadataFacet.getName() != null )
            {
                mutator.addInsertion( key, cf, column( NAME.toString(), metadataFacet.getName() ) );
            }

            mutator.execute();
        }
    }
}
 
Example 16
Source File: PerfDataAccessor.java    From oneops with Apache License 2.0 4 votes vote down vote up
public void writeSampleToHeaderAndBuckets(String key, long endTime, PerfHeader header, Map<String, Double> data, HashMap<String, PerfEvent> perfEventMap) throws IOException {

        StringBuilder pendingKeys = new StringBuilder("");
        Mutator<byte[]> mutator = createMutator(keyspace, bytesSerializer);

        phd.putHeader(key, header, mutator);
        // write the buckets / archives
        for (String dsRraTime : data.keySet()) {
            // only supporting avg due to volume
            if (!dsRraTime.contains("rra-average"))
                continue;

            String[] dsRraTimeParts = dsRraTime.split("::");
            String dsRra = dsRraTimeParts[0];
            String bucketKey = key + ":" + dsRra;
            long bucketEndTime = Long.parseLong(dsRraTimeParts[1]);

            Double cdpValue = Math.round(data.get(dsRraTime).doubleValue() * 1000.0) / 1000.0;
            if (dsRra.endsWith(LOGBUCKET)) {
                logger.info("write " + bucketKey + " : " + cdpValue);
            }
            String shard = dsRra.substring(dsRra.length() - 3).replace("-", "");
            int ttl = getTTL(shard);
            HColumn<Long, Double> column = createDataColumn(bucketEndTime, cdpValue.doubleValue());
            column.setTtl(ttl);

            String dataCF = DATA_CF + "_" + shard;
            if (isTestMode)
                dataCF += "_test";

            mutator.addInsertion(bucketKey.getBytes(), dataCF, column);
            pendingKeys.append(" ," + bucketKey);

            // send the consolidated perf event to sensor
            PerfEvent pe = null;
            String[] rraParts = dsRra.split("-");
            String eventBucket = rraParts[rraParts.length - 1];
            if (perfEventMap.containsKey(eventBucket)) {
                pe = perfEventMap.get(eventBucket);
            } else {
                pe = setEventBucket(perfEventMap, eventBucket);
            }

            String ds = rraParts[0].replace(":rra", "");
            String rraType = rraParts[1];

            if (rraType.equalsIgnoreCase(AVERAGE)) {
                pe.getMetrics().addAvg(ds, cdpValue);
            } else if (rraType.equalsIgnoreCase(COUNT)) {
                pe.getMetrics().addCount(ds, cdpValue);
            } else if (rraType.equalsIgnoreCase(MAX)) {
                pe.getMetrics().addMax(ds, cdpValue);
            } else if (rraType.equalsIgnoreCase(MIN)) {
                pe.getMetrics().addMin(ds, cdpValue);
            } else if (rraType.equalsIgnoreCase(SUM)) {
                pe.getMetrics().addSum(ds, cdpValue);
            }

        }

        logger.debug("write keys:" + pendingKeys);

        // perform the insert/updates
        mutator.execute();
    }
 
Example 17
Source File: PerfDataAccessor.java    From oneops with Apache License 2.0 4 votes vote down vote up
public void execute(Mutator<String> mutator) {
    mutator.execute();
}
 
Example 18
Source File: HectorPolicyManagerImpl.java    From ck with Apache License 2.0 3 votes vote down vote up
@Override
public void deletePolicy(PolicyDAO policy) {
    PolicyDAOImpl impl = getImpl(policy, PolicyDAOImpl.class);
    UUID policyID = impl.getPolicyID().getUUID();

    Mutator<UUID> m = Schema.POLICIES.createMutator(_keyspace);

    Schema.POLICIES.addRowDeletion(m, policyID);

    m.execute();

    // TODO: this will need to delete from other ColumnFamilies too and trigger recalcs
}
 
Example 19
Source File: PerfHeaderDao.java    From oneops with Apache License 2.0 3 votes vote down vote up
public String removeHeader(String key) {

		Mutator<byte[]> mutator = createMutator(keyspace, bytesSerializer);			

		mutator.delete(key.getBytes(), HEADER, INFO, stringSerializer);
		// perform the insert
		mutator.execute();	
		
		return "{\"result_code\":200}\n";		
	}
 
Example 20
Source File: CassandraPersistenceUtils.java    From usergrid with Apache License 2.0 2 votes vote down vote up
public static MutationResult batchExecute( Mutator<?> m, int retries ) {
    return m.execute();

}