Java Code Examples for org.apache.solr.common.SolrException.ErrorCode#SERVER_ERROR

The following examples show how to use org.apache.solr.common.SolrException.ErrorCode#SERVER_ERROR . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LocalDatasetGraph.java    From SolRDF with Apache License 2.0 6 votes vote down vote up
@Override
protected boolean _containsGraph(final Node graphNode) {
    final SolrIndexSearcher.QueryCommand cmd = new SolrIndexSearcher.QueryCommand();
    cmd.setQuery(new MatchAllDocsQuery());
    cmd.setLen(0);
    cmd.setFilterList(new TermQuery(new Term(Field.C, asNtURI(graphNode))));				
    
    final SolrIndexSearcher.QueryResult result = new SolrIndexSearcher.QueryResult();
    try {
		request.getSearcher().search(result, cmd);
	    return result.getDocListAndSet().docList.matches() > 0;
	} catch (final Exception exception) {
		LOGGER.error(MessageCatalog._00113_NWS_FAILURE, exception);
		throw new SolrException(ErrorCode.SERVER_ERROR, exception);
	}	    
}
 
Example 2
Source File: HttpCacheHeaderUtil.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * Calculate the appropriate last-modified time for Solr relative the current request.
 * 
 * @return the timestamp to use as a last modified time.
 */
public static long calcLastModified(final SolrQueryRequest solrReq) {
  final SolrCore core = solrReq.getCore();
  final SolrIndexSearcher searcher = solrReq.getSearcher();
  
  final LastModFrom lastModFrom
    = core.getSolrConfig().getHttpCachingConfig().getLastModFrom();

  long lastMod;
  try {
    // assume default, change if needed (getOpenTime() should be fast)
    lastMod =
      LastModFrom.DIRLASTMOD == lastModFrom
      ? IndexDeletionPolicyWrapper.getCommitTimestamp(searcher.getIndexReader().getIndexCommit())
      : searcher.getOpenTimeStamp().getTime();
  } catch (IOException e) {
    // we're pretty freaking screwed if this happens
    throw new SolrException(ErrorCode.SERVER_ERROR, e);
  }
  // Get the time where the searcher has been opened
  // We get rid of the milliseconds because the HTTP header has only
  // second granularity
  return lastMod - (lastMod % 1000L);
}
 
Example 3
Source File: TolerantUpdateProcessorFactory.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("rawtypes")
@Override
public void init( NamedList args ) {

  Object maxErrorsObj = args.get(MAX_ERRORS_PARAM); 
  if (maxErrorsObj != null) {
    try {
      defaultMaxErrors = Integer.parseInt(maxErrorsObj.toString());
    } catch (Exception e) {
      throw new SolrException(ErrorCode.SERVER_ERROR, "Unnable to parse maxErrors parameter: " + maxErrorsObj, e);
    }
    if (defaultMaxErrors < -1) {
      throw new SolrException(ErrorCode.SERVER_ERROR, "Config option '"+MAX_ERRORS_PARAM + "' must either be non-negative, or -1 to indicate 'unlimiited': " + maxErrorsObj.toString());
    }
  }
}
 
Example 4
Source File: OverseerSolrResponseSerializer.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("deprecation")
public static OverseerSolrResponse deserialize(byte[] responseBytes) {
  Objects.requireNonNull(responseBytes);
  try {
    @SuppressWarnings("unchecked")
    NamedList<Object> response = (NamedList<Object>) Utils.fromJavabin(responseBytes);
    return new OverseerSolrResponse(response);
  } catch (IOException|RuntimeException e) {
    if (useUnsafeDeserialization()) {
      return (OverseerSolrResponse) SolrResponse.deserialize(responseBytes);
    }
    throw new SolrException(ErrorCode.SERVER_ERROR, "Exception deserializing response from Javabin", e);
  }
}
 
Example 5
Source File: SolrRequestParsers.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public static  SolrException getParameterIncompatibilityException() {
  return new SolrException(ErrorCode.SERVER_ERROR,
    "Solr requires that request parameters sent using application/x-www-form-urlencoded " +
    "content-type can be read through the request input stream. Unfortunately, the " +
    "stream was empty / not available. This may be caused by another servlet filter calling " +
    "ServletRequest.getParameter*() before SolrDispatchFilter, please remove it."
  );
}
 
Example 6
Source File: AbstractFullDistribZkTestBase.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
protected void waitForActiveReplicaCount(CloudSolrClient client, String collection, int expectedNumReplicas) throws TimeoutException, NotInClusterStateException {
  log.info("Waiting to see {} active replicas in collection: {}", expectedNumReplicas, collection);
  AtomicInteger nReplicas = new AtomicInteger();
  try {
    client.getZkStateReader().waitForState(collection, 30, TimeUnit.SECONDS, (liveNodes, collectionState) -> {
        if (collectionState == null) {
          return false;
        }
        int activeReplicas = 0;
        for (Slice slice : collectionState) {
          for (Replica replica : slice) {
            if (replica.isActive(liveNodes)) {
              activeReplicas++;
            }
          }
        }
        nReplicas.set(activeReplicas);
        return (activeReplicas == expectedNumReplicas);
      });
  } catch (TimeoutException | InterruptedException e) {
    try {
      printLayout();
    } catch (Exception e1) {
      throw new RuntimeException(e1);
    }
    throw new NotInClusterStateException(ErrorCode.SERVER_ERROR,
        "Number of replicas in the state does not match what we set:" + nReplicas + " vs " + expectedNumReplicas);
  }
}
 
Example 7
Source File: OverseerCollectionMessageHandler.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
String waitForCoreNodeName(String collectionName, String msgNodeName, String msgCore) {
  int retryCount = 320;
  while (retryCount-- > 0) {
    final DocCollection docCollection = zkStateReader.getClusterState().getCollectionOrNull(collectionName);
    if (docCollection != null && docCollection.getSlicesMap() != null) {
      Map<String,Slice> slicesMap = docCollection.getSlicesMap();
      for (Slice slice : slicesMap.values()) {
        for (Replica replica : slice.getReplicas()) {
          // TODO: for really large clusters, we could 'index' on this

          String nodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP);
          String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);

          if (nodeName.equals(msgNodeName) && core.equals(msgCore)) {
            return replica.getName();
          }
        }
      }
    }
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {
      Thread.currentThread().interrupt();
    }
  }
  throw new SolrException(ErrorCode.SERVER_ERROR, "Could not find coreNodeName");
}
 
Example 8
Source File: CoreContainer.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private static void checkForDuplicateCoreNames(List<CoreDescriptor> cds) {
  Map<String, Path> addedCores = Maps.newHashMap();
  for (CoreDescriptor cd : cds) {
    final String name = cd.getName();
    if (addedCores.containsKey(name))
      throw new SolrException(ErrorCode.SERVER_ERROR,
          String.format(Locale.ROOT, "Found multiple cores with the name [%s], with instancedirs [%s] and [%s]",
              name, addedCores.get(name), cd.getInstanceDir()));
    addedCores.put(name, cd.getInstanceDir());
  }
}
 
Example 9
Source File: TestInjection.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private static boolean injectSplitFailure(String probability, String label) {
  if (probability != null)  {
    Random rand = random();
    if (null == rand) return true;

    Pair<Boolean,Integer> pair = parseValue(probability);
    boolean enabled = pair.first();
    int chanceIn100 = pair.second();
    if (enabled && rand.nextInt(100) >= (100 - chanceIn100)) {
      log.info("Injecting failure: {}", label);
      throw new SolrException(ErrorCode.SERVER_ERROR, "Error: " + label);
    }
  }
  return true;
}
 
Example 10
Source File: OverseerCollectionMessageHandler.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
ClusterState waitForNewShard(String collectionName, String sliceName) throws KeeperException, InterruptedException {
  log.debug("Waiting for slice {} of collection {} to be available", sliceName, collectionName);
  RTimer timer = new RTimer();
  int retryCount = 320;
  while (retryCount-- > 0) {
    ClusterState clusterState = zkStateReader.getClusterState();
    DocCollection collection = clusterState.getCollection(collectionName);

    if (collection == null) {
      throw new SolrException(ErrorCode.SERVER_ERROR,
          "Unable to find collection: " + collectionName + " in clusterstate");
    }
    Slice slice = collection.getSlice(sliceName);
    if (slice != null) {
      if (log.isDebugEnabled()) {
        log.debug("Waited for {}ms for slice {} of collection {} to be available",
            timer.getTime(), sliceName, collectionName);
      }
      return clusterState;
    }
    Thread.sleep(1000);
  }
  throw new SolrException(ErrorCode.SERVER_ERROR,
      "Could not find new slice " + sliceName + " in collection " + collectionName
          + " even after waiting for " + timer.getTime() + "ms"
  );
}
 
Example 11
Source File: ZkController.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void rejoinShardLeaderElection(SolrParams params) {

    String collectionName = params.get(COLLECTION_PROP);
    String shardId = params.get(SHARD_ID_PROP);
    String coreNodeName = params.get(CORE_NODE_NAME_PROP);
    String coreName = params.get(CORE_NAME_PROP);
    String electionNode = params.get(ELECTION_NODE_PROP);
    String baseUrl = params.get(BASE_URL_PROP);

    try {
      MDCLoggingContext.setCoreDescriptor(cc, cc.getCoreDescriptor(coreName));

      log.info("Rejoin the shard leader election.");

      ContextKey contextKey = new ContextKey(collectionName, coreNodeName);

      ElectionContext prevContext = electionContexts.get(contextKey);
      if (prevContext != null) prevContext.cancelElection();

      ZkNodeProps zkProps = new ZkNodeProps(BASE_URL_PROP, baseUrl, CORE_NAME_PROP, coreName, NODE_NAME_PROP, getNodeName(), CORE_NODE_NAME_PROP, coreNodeName);

      LeaderElector elect = ((ShardLeaderElectionContextBase) prevContext).getLeaderElector();
      ShardLeaderElectionContext context = new ShardLeaderElectionContext(elect, shardId, collectionName,
          coreNodeName, zkProps, this, getCoreContainer());

      context.leaderSeqPath = context.electionPath + LeaderElector.ELECTION_NODE + "/" + electionNode;
      elect.setup(context);
      electionContexts.put(contextKey, context);

      elect.retryElection(context, params.getBool(REJOIN_AT_HEAD_PROP, false));
    } catch (Exception e) {
      throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to rejoin election", e);
    } finally {
      MDCLoggingContext.clear();
    }
  }
 
Example 12
Source File: HdfsDirectoryFactory.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public String getDataHome(CoreDescriptor cd) throws IOException {
  if (hdfsDataDir == null) {
    throw new SolrException(ErrorCode.SERVER_ERROR, "You must set the "
        + this.getClass().getSimpleName() + " param " + HDFS_HOME
        + " for relative dataDir paths to work");
  }
  
  // by default, we go off the instance directory
  String path;
  if (cd.getCloudDescriptor() != null) {
    path = URLEncoder.encode(cd.getCloudDescriptor().getCollectionName(),
        "UTF-8")
        + "/"
        + URLEncoder.encode(cd.getCloudDescriptor().getCoreNodeName(),
            "UTF-8");
  } else {
    path = cd.getName();
  }
  
  return normalize(SolrPaths.normalizeDir(ZkController
      .trimLeadingAndTrailingSlashes(hdfsDataDir)
      + "/"
      + path
      + "/"
      + cd.getDataDir()));
}
 
Example 13
Source File: CoreContainer.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings({"unchecked"})
private void initializeAuditloggerPlugin(Map<String, Object> auditConf) {
  auditConf = Utils.getDeepCopy(auditConf, 4);
  int newVersion = readVersion(auditConf);
  //Initialize the Auditlog module
  SecurityPluginHolder<AuditLoggerPlugin> old = auditloggerPlugin;
  SecurityPluginHolder<AuditLoggerPlugin> newAuditloggerPlugin = null;
  if (auditConf != null) {
    String klas = (String) auditConf.get("class");
    if (klas == null) {
      throw new SolrException(ErrorCode.SERVER_ERROR, "class is required for auditlogger plugin");
    }
    if (old != null && old.getZnodeVersion() == newVersion && newVersion > 0) {
      log.debug("Auditlogger config not modified");
      return;
    }
    log.info("Initializing auditlogger plugin: {}", klas);
    newAuditloggerPlugin = new SecurityPluginHolder<>(newVersion,
        getResourceLoader().newInstance(klas, AuditLoggerPlugin.class));

    newAuditloggerPlugin.plugin.init(auditConf);
    newAuditloggerPlugin.plugin.initializeMetrics(solrMetricsContext, "/auditlogging");
  } else {
    log.debug("Security conf doesn't exist. Skipping setup for audit logging module.");
  }
  this.auditloggerPlugin = newAuditloggerPlugin;
  if (old != null) {
    try {
      old.plugin.close();
    } catch (Exception e) {
      log.error("Exception while attempting to close old auditlogger plugin", e);
    }
  }
}
 
Example 14
Source File: SolrResponse.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public Exception getException() {
  @SuppressWarnings({"rawtypes"})
  NamedList exp = (NamedList) getResponse().get("exception");
  if (exp == null) {
    return null;
  }
  Integer rspCode = (Integer) exp.get("rspCode");
  ErrorCode errorCode = rspCode != null && rspCode != -1 ? ErrorCode.getErrorCode(rspCode) : ErrorCode.SERVER_ERROR;
  return new SolrException(errorCode, (String)exp.get("msg"));
}
 
Example 15
Source File: ManagedResourceStorage.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
/**
 * Creates a new StorageIO instance for a Solr core, taking into account
 * whether the core is running in cloud mode as well as initArgs. 
 */
public static StorageIO newStorageIO(String collection, SolrResourceLoader resourceLoader, NamedList<String> initArgs) {
  StorageIO storageIO;

  SolrZkClient zkClient = null;
  String zkConfigName = null;
  if (resourceLoader instanceof ZkSolrResourceLoader) {
    zkClient = ((ZkSolrResourceLoader)resourceLoader).getZkController().getZkClient();
    try {
      zkConfigName = ((ZkSolrResourceLoader)resourceLoader).getZkController().
          getZkStateReader().readConfigName(collection);
    } catch (Exception e) {
      log.error("Failed to get config name due to", e);
      throw new SolrException(ErrorCode.SERVER_ERROR,
          "Failed to load config name for collection:" + collection  + " due to: ", e);
    }
    if (zkConfigName == null) {
      throw new SolrException(ErrorCode.SERVER_ERROR, 
          "Could not find config name for collection:" + collection);
    }
  }
  
  if (initArgs.get(STORAGE_IO_CLASS_INIT_ARG) != null) {
    storageIO = resourceLoader.newInstance(initArgs.get(STORAGE_IO_CLASS_INIT_ARG), StorageIO.class); 
  } else {
    if (zkClient != null) {
      String znodeBase = "/configs/"+zkConfigName;
      log.debug("Setting up ZooKeeper-based storage for the RestManager with znodeBase: {}", znodeBase);
      storageIO = new ManagedResourceStorage.ZooKeeperStorageIO(zkClient, znodeBase);
    } else {
      storageIO = new FileStorageIO();        
    }
  }
  
  if (storageIO instanceof FileStorageIO) {
    // using local fs, if storageDir is not set in the solrconfig.xml, assume the configDir for the core
    if (initArgs.get(STORAGE_DIR_INIT_ARG) == null) {
      File configDir = new File(resourceLoader.getConfigDir());
      boolean hasAccess = false;
      try {
        hasAccess = configDir.isDirectory() && configDir.canWrite();
      } catch (java.security.AccessControlException ace) {}
      
      if (hasAccess) {
        initArgs.add(STORAGE_DIR_INIT_ARG, configDir.getAbsolutePath());
      } else {
        // most likely this is because of a unit test 
        // that doesn't have write-access to the config dir
        // while this failover approach is not ideal, it's better
        // than causing the core to fail esp. if managed resources aren't being used
        log.warn("Cannot write to config directory {} ; switching to use InMemory storage instead.", configDir.getAbsolutePath());
        storageIO = new ManagedResourceStorage.InMemoryStorageIO();
      }
    }       
  }
  
  storageIO.configure(resourceLoader, initArgs);     
  
  return storageIO;
}
 
Example 16
Source File: ClusteringComponent.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
private void checkAvailable(String name, ClusteringEngine engine) {
  if (!engine.isAvailable()) {
    throw new SolrException(ErrorCode.SERVER_ERROR, 
        "Clustering engine declared, but not available, check the logs: " + name);
  }
}
 
Example 17
Source File: ShardLeaderElectionContext.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
private boolean waitForReplicasToComeUp(int timeoutms) throws InterruptedException {
  long timeoutAt = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeoutms, TimeUnit.MILLISECONDS);
  final String shardsElectZkPath = electionPath + LeaderElector.ELECTION_NODE;

  DocCollection docCollection = zkController.getClusterState().getCollectionOrNull(collection);
  Slice slices = (docCollection == null) ? null : docCollection.getSlice(shardId);
  int cnt = 0;
  while (!isClosed && !cc.isShutDown()) {
    // wait for everyone to be up
    if (slices != null) {
      int found = 0;
      try {
        found = zkClient.getChildren(shardsElectZkPath, null, true).size();
      } catch (KeeperException e) {
        if (e instanceof KeeperException.SessionExpiredException) {
          // if the session has expired, then another election will be launched, so
          // quit here
          throw new SolrException(ErrorCode.SERVER_ERROR,
              "ZK session expired - cancelling election for " + collection + " " + shardId);
        }
        SolrException.log(log,
            "Error checking for the number of election participants", e);
      }

      // on startup and after connection timeout, wait for all known shards
      if (found >= slices.getReplicas(EnumSet.of(Replica.Type.TLOG, Replica.Type.NRT)).size()) {
        log.info("Enough replicas found to continue.");
        return true;
      } else {
        if (cnt % 40 == 0) {
          if (log.isInfoEnabled()) {
            log.info("Waiting until we see more replicas up for shard {}: total={} found={} timeoute in={}ms"
                , shardId, slices.getReplicas(EnumSet.of(Replica.Type.TLOG, Replica.Type.NRT)).size(), found,
                TimeUnit.MILLISECONDS.convert(timeoutAt - System.nanoTime(), TimeUnit.NANOSECONDS));
          }
        }
      }

      if (System.nanoTime() > timeoutAt) {
        log.info("Was waiting for replicas to come up, but they are taking too long - assuming they won't come back till later");
        return false;
      }
    } else {
      log.warn("Shard not found: {} for collection {}", shardId, collection);

      return false;

    }

    Thread.sleep(500);
    docCollection = zkController.getClusterState().getCollectionOrNull(collection);
    slices = (docCollection == null) ? null : docCollection.getSlice(shardId);
    cnt++;
  }
  return false;
}
 
Example 18
Source File: CursorMark.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
/**
 * Generates an empty CursorMark bound for use with the 
 * specified schema and {@link SortSpec}.
 *
 * @param schema used for basic validation
 * @param sortSpec bound to this totem (un)marshalling serialized values
 */
public CursorMark(IndexSchema schema, SortSpec sortSpec) {

  final SchemaField uniqueKey = schema.getUniqueKeyField();
  if (null == uniqueKey) {
    throw new SolrException(ErrorCode.BAD_REQUEST,
                            "Cursor functionality is not available unless the IndexSchema defines a uniqueKey field");
  }

  final Sort sort = sortSpec.getSort();
  if (null == sort) {
    // pure score, by definition we don't include the mandatyr uniqueKey tie breaker
    throw new SolrException(ErrorCode.BAD_REQUEST,
                            "Cursor functionality requires a sort containing a uniqueKey field tie breaker");
  }
  
  if (!sortSpec.getSchemaFields().contains(uniqueKey)) {
    throw new SolrException(ErrorCode.BAD_REQUEST,
                            "Cursor functionality requires a sort containing a uniqueKey field tie breaker");
  }

  if (0 != sortSpec.getOffset()) {
    throw new SolrException(ErrorCode.BAD_REQUEST,
                            "Cursor functionality requires start=0");
  }

  for (SortField sf : sort.getSort()) {
    if (sf.getType().equals(SortField.Type.DOC)) {
      throw new SolrException(ErrorCode.BAD_REQUEST,
                              "Cursor functionality can not be used with internal doc ordering sort: _docid_");
    }
  }

  if (sort.getSort().length != sortSpec.getSchemaFields().size()) {
      throw new SolrException(ErrorCode.SERVER_ERROR,
                              "Cursor SortSpec failure: sort length != SchemaFields: " 
                              + sort.getSort().length + " != " + 
                              sortSpec.getSchemaFields().size());
  }

  this.sortSpec = sortSpec;
  this.values = null;
}
 
Example 19
Source File: IndexSchema.java    From lucene-solr with Apache License 2.0 3 votes vote down vote up
/**
 * Copies this schema, deletes the named dynamic field from the copy, creates a new dynamic
 * field with the same field name pattern using the given args, then rebinds any referring
 * dynamic copy fields to the replacement dynamic field.
 *
 * <p>
 * The schema will not be persisted.
 * <p>
 * Requires synchronizing on the object returned by {@link #getSchemaUpdateLock()}.
 *
 * @param fieldNamePattern The glob for the dynamic field to be replaced
 * @param replacementFieldType  The field type of the replacement dynamic field                                   
 * @param replacementArgs Initialization params for the replacement dynamic field
 * @return a new IndexSchema based on this schema with the named dynamic field replaced
 */
public ManagedIndexSchema replaceDynamicField
    (String fieldNamePattern, FieldType replacementFieldType, Map<String,?> replacementArgs) {
  String msg = "This IndexSchema is not mutable.";
  log.error(msg);
  throw new SolrException(ErrorCode.SERVER_ERROR, msg);
}
 
Example 20
Source File: FieldType.java    From lucene-solr with Apache License 2.0 3 votes vote down vote up
/**
 * Check's {@link org.apache.solr.schema.SchemaField} instances constructed 
 * using this field type to ensure that they are valid.
 *
 * <p>
 * This method is called by the <code>SchemaField</code> constructor to 
 * check that its initialization does not violate any fundamental
 * requirements of the <code>FieldType</code>.
 * Subclasses may choose to throw a {@link SolrException}
 * if invariants are violated by the <code>SchemaField.</code>
 * </p>
 */
public void checkSchemaField(final SchemaField field) {
  if (field.hasDocValues()) {
    checkSupportsDocValues();
  }
  if (field.isLarge() && field.multiValued()) {
    throw new SolrException(ErrorCode.SERVER_ERROR, "Field type " + this + " is 'large'; can't support multiValued");
  }
  if (field.isLarge() && getNumberType() != null) {
    throw new SolrException(ErrorCode.SERVER_ERROR, "Field type " + this + " is 'large'; can't support numerics");
  }
}