/*
 * Copyright 2006-2007 the original author or authors.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.openspaces.persistency.hibernate;

import com.gigaspaces.datasource.*;
import com.j_spaces.core.client.SQLQuery;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.hibernate.Criteria;
import org.hibernate.Query;
import org.hibernate.StatelessSession;
import org.hibernate.Transaction;
import org.hibernate.criterion.Projections;
import org.hibernate.criterion.Restrictions;
import org.hibernate.metadata.ClassMetadata;
import org.openspaces.persistency.hibernate.iterator.*;

import java.util.List;
import java.util.Map;

/**
 * An external data source implementation based on Hiberante {@link org.hibernate.StatelessSession}.
 *
 * <p>Note, stateless session is much faster than regular Hibernate session, but at the expense of not having
 * a first level cache, as well as not performing any cascading operations (both in read operations as well as
 * dirty operations).
 *
 * @author kimchy
 * @deprecated since 9.5 - use {@link StatelessHibernateSpaceDataSource} or {@link StatelessHibernateSpaceSynchronizationEndpoint} instead.
 */
public class StatelessHibernateExternalDataSource extends AbstractHibernateExternalDataSource implements BulkDataPersister, SQLDataProvider {

    private Log batchingLogger = LogFactory.getLog("org.hibernate.jdbc.BatchingBatcher");

    /**
     * Perform the given bulk changes using Hibernate {@link org.hibernate.StatelessSession}. First, tries to perform
     * "optimistic" operations without checking in advance for existence of certain entity. If this fails, will
     * try and perform the same operations again, simply with checking if the entry exists or not.
     */
    public void executeBulk(List<BulkItem> bulkItems) throws DataSourceException {
        StatelessSession session = getSessionFactory().openStatelessSession();
        Transaction tr = session.beginTransaction();
        Exception batchModeException = null;
        try {
            for (BulkItem bulkItem : bulkItems) {
                if(!isManaged(bulkItem))
                    continue;
                
                switch (bulkItem.getOperation()) {
                    case BulkItem.REMOVE:
                        executeRemove(session, bulkItem);
                        break;
                    case BulkItem.WRITE:
                        executeWrite(session, bulkItem);
                        break;
                    case BulkItem.UPDATE:
                        executeUpdate(session, bulkItem);
                        break;
                    case BulkItem.PARTIAL_UPDATE:
                        executePartialUpdate(session, bulkItem);
                        break;
                    default:
                        break;
                }
            }
            tr.commit();
        } catch (Exception e) {
            rollbackTx(tr);
            batchModeException = new DataSourceException("Failed to execute bulk operation in batch mode", e);
        } finally {
            closeSession(session);
        }
        if (batchModeException == null) {
            // all is well, return
            return;
        } else {
            batchingLogger.error("Ignoring Hibernate StaleStateException, trying with exists batching");
        }

        // if something went wrong, do it with exists checks

        Object latest = null;
        session = getSessionFactory().openStatelessSession();
        tr = session.beginTransaction();
        try {
            for (BulkItem bulkItem : bulkItems) {
                if (!isManaged(bulkItem)) 
                    continue;
                
                latest = bulkItem;
                switch (bulkItem.getOperation()) {
                    case BulkItem.REMOVE:
                        executeRemoveIfExists(session, bulkItem);
                        break;
                    case BulkItem.WRITE:
                        executeWriteIfExists(session, bulkItem);
                        break;
                    case BulkItem.UPDATE:
                        executeUpdateIfExists(session, bulkItem);
                        break;
                    case BulkItem.PARTIAL_UPDATE:
                        executePartialUpdateIfExists(session, bulkItem);
                    default:
                        break;
                }
            }
            tr.commit();
        } catch (Exception e) {
            rollbackTx(tr);
            throw new DataSourceException("Failed to execute bulk operation, latest object [" + latest + "]", e);
        } finally {
            closeSession(session);
        }
    }

    private void executePartialUpdateIfExists(StatelessSession session, BulkItem bul