/*
 * Copyright (c) 2011-2018, Meituan Dianping. All Rights Reserved.
 *
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements. See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License. You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package com.dianping.zebra.shard.jdbc;

import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.net.URL;
import java.sql.Array;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.Connection;
import java.sql.Date;
import java.sql.NClob;
import java.sql.PreparedStatement;
import java.sql.Ref;
import java.sql.ResultSet;
import java.sql.RowId;
import java.sql.SQLException;
import java.sql.SQLXML;
import java.sql.Statement;
import java.sql.Time;
import java.sql.Timestamp;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.Future;

import com.alibaba.druid.sql.ast.SQLStatement;
import com.alibaba.druid.sql.ast.expr.SQLVariantRefExpr;
import com.alibaba.druid.sql.ast.statement.SQLSelect;
import com.alibaba.druid.sql.ast.statement.SQLSelectQuery;
import com.alibaba.druid.sql.ast.statement.SQLSelectStatement;
import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock;
import com.dianping.zebra.filter.DefaultJdbcFilterChain;
import com.dianping.zebra.filter.JdbcFilter;
import com.dianping.zebra.group.jdbc.param.ArrayParamContext;
import com.dianping.zebra.group.jdbc.param.AsciiParamContext;
import com.dianping.zebra.group.jdbc.param.BigDecimalParamContext;
import com.dianping.zebra.group.jdbc.param.BinaryStreamParamContext;
import com.dianping.zebra.group.jdbc.param.BlobParamContext;
import com.dianping.zebra.group.jdbc.param.BooleanParamContext;
import com.dianping.zebra.group.jdbc.param.ByteArrayParamContext;
import com.dianping.zebra.group.jdbc.param.ByteParamContext;
import com.dianping.zebra.group.jdbc.param.CharacterStreamParamContext;
import com.dianping.zebra.group.jdbc.param.ClobParamContext;
import com.dianping.zebra.group.jdbc.param.DateParamContext;
import com.dianping.zebra.group.jdbc.param.DoubleParamContext;
import com.dianping.zebra.group.jdbc.param.FloatParamContext;
import com.dianping.zebra.group.jdbc.param.IntParamContext;
import com.dianping.zebra.group.jdbc.param.LongParamContext;
import com.dianping.zebra.group.jdbc.param.NCharacterStreamParamContext;
import com.dianping.zebra.group.jdbc.param.NClobParamContext;
import com.dianping.zebra.group.jdbc.param.NStringParamContext;
import com.dianping.zebra.group.jdbc.param.NullParamContext;
import com.dianping.zebra.group.jdbc.param.ObjectParamContext;
import com.dianping.zebra.group.jdbc.param.ParamContext;
import com.dianping.zebra.group.jdbc.param.RefParamContext;
import com.dianping.zebra.group.jdbc.param.RowIdParamContext;
import com.dianping.zebra.group.jdbc.param.SQLXMLParamContext;
import com.dianping.zebra.group.jdbc.param.ShortParamContext;
import com.dianping.zebra.group.jdbc.param.StringParamContext;
import com.dianping.zebra.group.jdbc.param.TimeParamContext;
import com.dianping.zebra.group.jdbc.param.TimestampParamContext;
import com.dianping.zebra.group.jdbc.param.URLParamContext;
import com.dianping.zebra.group.jdbc.param.UnicodeStreamParamContext;
import com.dianping.zebra.group.router.LocalContextReadWriteStrategy;
import com.dianping.zebra.group.util.DaoContextHolder;
import com.dianping.zebra.shard.exception.ShardBatchInsertException;
import com.dianping.zebra.shard.jdbc.parallel.*;
import com.dianping.zebra.shard.jdbc.unsupport.UnsupportedShardPreparedStatement;
import com.dianping.zebra.shard.merge.MergeContext;
import com.dianping.zebra.shard.merge.RowData;
import com.dianping.zebra.shard.merge.ShardResultSetMerger;
import com.dianping.zebra.shard.merge.orderby.OrderByDataMerger;
import com.dianping.zebra.shard.parser.*;
import com.dianping.zebra.shard.router.RouterResult;
import com.dianping.zebra.shard.router.RouterResult.RouterTarget;
import com.dianping.zebra.util.SqlType;
import com.dianping.zebra.util.SqlUtils;

/**
 * @author Leo Liang
 * @author hao.zhu
 */
public class ShardPreparedStatement extends UnsupportedShardPreparedStatement implements PreparedStatement {

	protected String sql;

	private int autoGeneratedKeys = -1;

	private int[] columnIndexes;

	private String[] columnNames;

	private Map<Integer, ParamContext> params = new TreeMap<Integer, ParamContext>();

	private int concurrencyLevel = 1; // 单库并发度

	protected ShardPreparedStatement(List<JdbcFilter> filters) {
		super(filters);
	}

	protected PreparedStatement createPrepareStatement(Connection connection, String targetSql) throws SQLException {
		PreparedStatement stmt = null;
		if (getResultSetType() != -1 && getResultSetConcurrency() != -1 && getResultSetHoldability() != -1) {
			stmt = connection.prepareStatement(targetSql, getResultSetType(), getResultSetConcurrency(),
			      getResultSetHoldability());
		} else if (getResultSetType() != -1 && getResultSetConcurrency() != -1) {
			stmt = connection.prepareStatement(targetSql, getResultSetType(), getResultSetConcurrency());
		} else if (autoGeneratedKeys != -1) {
			stmt = connection.prepareStatement(targetSql, autoGeneratedKeys);
		} else if (columnIndexes != null) {
			stmt = connection.prepareStatement(targetSql, columnIndexes);
		} else if (columnNames != null) {
			stmt = connection.prepareStatement(targetSql, columnNames);
		} else {
			stmt = connection.prepareStatement(targetSql, Statement.RETURN_GENERATED_KEYS);
		}

		return stmt;
	}

	@Override
	public void clearParameters() throws SQLException {
		params.clear();
	}

	@Override
	public boolean execute() throws SQLException {
		SqlType sqlType = getSqlType(sql);

		if (sqlType == SqlType.SELECT || sqlType == SqlType.SELECT_FOR_UPDATE) {
			executeQuery();

			return true;
		} else if (sqlType == SqlType.INSERT || sqlType == SqlType.UPDATE || sqlType == SqlType.DELETE
		      || sqlType == SqlType.REPLACE) { // add for replace
			executeUpdate();

			return false;
		} else {
			throw new SQLException("only select, insert, update, delete, replace sql is supported");
		}
	}

	@Override
	public ResultSet executeQuery() throws SQLException {
		checkClosed();

		if (filters != null && filters.size() > 0) {
			JdbcFilter chain = new DefaultJdbcFilterChain(filters) {
				@Override
				public ResultSet executeShardQuery(ShardStatement source, String sql, JdbcFilter chain) throws SQLException {
					if (index < filters.size()) {
						return filters.get(index++).executeShardQuery(source, sql, chain);
					} else {
						return ((ShardPreparedStatement) source).executeQueryWithFilter();
					}
				}
			};
			return chain.executeShardQuery(this, sql, chain);
		} else {
			return executeQueryWithFilter();
		}
	}

	private ResultSet executeQueryWithFilter() throws SQLException {
		ResultSet specRS = beforeQuery(sql);
		if (specRS != null) {
			this.results = specRS;
			this.updateCount = -1;
			attachedResultSets.add(specRS);

			return this.results;
		}

		RouterResult routerTarget = routingAndCheck(sql, getParams());
		rewriteAndMergeParms(routerTarget.getParams());
		ShardResultSet rs = new ShardResultSet();
		rs.setStatement(this);
		rs.setRouterTarget(routerTarget);
		attachedResultSets.add(rs);
		this.results = rs;
		this.updateCount = -1;

		MergeContext context = routerTarget.getMergeContext();

		// 有orderby和limit的单个查询用切分成多个的方式进行数据获取
		if (context.isOrderBySplitSql()) {
			executeOrderyByLimitQuery(rs, sql, routerTarget);
		} else {
			normalSelectExecute(rs, sql, routerTarget);
		}

		return this.results;
	}

	private ResultSet normalSelectExecute(ShardResultSet rs, String sql, RouterResult routerTarget) throws SQLException {
		List<ResultSet> originalResults = executeQueryByOriginal(routerTarget);
		for (ResultSet result : originalResults) {
			rs.addResultSet(result);
		}

		if (this.filters != null && this.filters.size() > 0) {
			JdbcFilter chain = new DefaultJdbcFilterChain(this.filters) {
				@Override
				public void shardMerge(ShardResultSet rs, JdbcFilter chain) throws SQLException {
					if (index < filters.size()) {
						filters.get(index++).shardMerge(rs, chain);
					}
				}
			};
			chain.shardMerge(rs, chain);
		}

		rs.init();

		return rs;
	}

	private void replaceLimitParams(SQLParsedResult parseResult) {
		if (parseResult != null) {
			SQLStatement sqlStatement = parseResult.getStmt();
			if (parseResult.getStmt() != null && sqlStatement instanceof SQLSelectStatement) {
				SQLSelect sqlSelect = ((SQLSelectStatement) sqlStatement).getSelect();
				if (sqlSelect != null) {
					SQLSelectQuery sqlSelectQuery = sqlSelect.getQuery();
					if (sqlSelectQuery != null && sqlSelectQuery instanceof MySqlSelectQueryBlock) {
						MySqlSelectQueryBlock sqlSelectQueryBlock = (MySqlSelectQueryBlock) sqlSelectQuery;
						MySqlSelectQueryBlock.Limit limitExpr = sqlSelectQueryBlock.getLimit();
						if (limitExpr != null) {
							int offsetRefIndex = -1;
							int countRefIndex = -1;
							if (limitExpr.getOffset() instanceof SQLVariantRefExpr
							      && limitExpr.getRowCount() instanceof SQLVariantRefExpr) {
								SQLVariantRefExpr offsetExpr = (SQLVariantRefExpr) limitExpr.getOffset();
								SQLVariantRefExpr countExpr = (SQLVariantRefExpr) limitExpr.getRowCount();

								offsetRefIndex = offsetExpr.getIndex();
								countRefIndex = countExpr.getIndex();

								if (offsetRefIndex > countRefIndex && offsetRefIndex != -1 && countRefIndex != -1) {
									offsetExpr.setIndex(countRefIndex);
									countExpr.setIndex(offsetRefIndex);
								}
							}
						}
					}
				}
			}
		}
	}

	private ResultSet executeOrderyByLimitQuery(ShardResultSet srs, String sql, RouterResult routerTarget)
	      throws SQLException {
		if (isSingleTarget(routerTarget)) {
			// 单表查询,直接设置下限为0,并执行原始sql
			routerTarget.getMergeContext().setOffset(MergeContext.NO_OFFSET);
			return normalSelectExecute(srs, sql, routerTarget);
		}

		ShardResultSetMerger shardResultSetMerger = new ShardResultSetMerger();
		OrderByDataMerger orderByDataMerger = new OrderByDataMerger();
		List<List<RowData>> firstResult = new ArrayList<List<RowData>>();
		List<List<RowData>> secondResult = new ArrayList<List<RowData>>();
		List<RowData> startOffsetDataList = new ArrayList<RowData>();
		List<RowData> endOffsetDataList = new ArrayList<RowData>();
		List<Object> newParams = new ArrayList<Object>(getParams());
		List<Long> countResult = null;
		ArrayList<RouterTarget> emptyFirstResultRouter = new ArrayList<RouterTarget>();
		RowData startData, endData;
		int splitNum = 0;

		SQLParsedResult parseResult = SQLParser.parseWithoutCache(sql);
		replaceLimitParams(parseResult);
		for (RouterTarget target : routerTarget.getSqls()) {
			splitNum += target.getSqls().size();
		}

		// 根据路由数拆分offset
		String splitLimitSql = new ShardLimitSqlSplitRewrite().rewrite(parseResult, splitNum, newParams);
		params = covertToParamContext(newParams);
		RouterResult router = routingAndCheck(splitLimitSql, newParams);
		List<ResultSet> firstResultSets = executeQueryByOriginal(router);

		// 拿到每个路由的返回数据,与路由顺序一一对应,后面需要与二次查询的数据进行对应比较
		// 记下返回值为空的router,后面进行count查询
		int i = 0;
		for (ResultSet rs : firstResultSets) {
			List<RowData> rowDatas = shardResultSetMerger.popResultSet(rs, router.getMergeContext());
			firstResult.add(rowDatas);
			if (rowDatas.size() > 0) {
				startOffsetDataList.add(rowDatas.get(0));
				endOffsetDataList.add(rowDatas.get(rowDatas.size() - 1));
			} else {
				emptyFirstResultRouter.add(router.getTargetByOffset(i));
			}
			i++;
		}

		// 第一次查询数据为空,则结果为空
		boolean isEmpty = true;
		for (List<RowData> data : firstResult) {
			if (data.size() != 0) {
				isEmpty = false;
				break;
			}
		}
		if (isEmpty) {
			srs.setResultSets(Arrays.asList(firstResultSets.get(0)));
			return srs;
		}
		if (!emptyFirstResultRouter.isEmpty()) {
			List<ParamContext> countParams = new ArrayList<ParamContext>(params.values());
			Map<Integer, ParamContext> tmpParams = params;
			// params = countParams;
			for (RouterTarget emptyRouter : emptyFirstResultRouter) {
				emptyRouter.getSqls()
				      .add(new SqlToCountSqlRewrite().rewrite(((ArrayList<String>) emptyRouter.getSqls()).remove(0),
				            countParams));
			}
			countResult = new ArrayList<Long>(emptyFirstResultRouter.size());
			RouterResult emptyRouterResult = new RouterResult();
			emptyRouterResult.setSqls(emptyFirstResultRouter);
			emptyRouterResult.setParams(newParams);
			List<ResultSet> countResultSets = executeQueryByOriginal(emptyRouterResult);
			for (ResultSet countResultSet : countResultSets) {
				countResultSet.next();
				countResult.add(countResultSet.getLong(SqlToCountSqlRewrite.countAlias));
			}
			params = tmpParams;
		}

		// 把分片的每个数据集的首尾数据进行排序然后取出第一个和最后一个数据
		startOffsetDataList = orderByDataMerger.process(startOffsetDataList, router.getMergeContext());
		endOffsetDataList = orderByDataMerger.process(endOffsetDataList, router.getMergeContext());
		startData = startOffsetDataList.get(0);
		endData = endOffsetDataList.get(endOffsetDataList.size() - 1);

		// 用结果作为条件重写SQL并进行二次查询
		String limitSql2 = new ShardLimitSqlWithConditionRewrite().rewrite(sql, startData, endData,
		      router.getMergeContext(), newParams);
		RouterResult router2 = routingAndCheck(limitSql2, newParams);
		params = covertToParamContext(newParams);
		List<ResultSet> secondResultSets = executeQueryByOriginal(router2);

		for (ResultSet rs : secondResultSets) {
			secondResult.add(shardResultSetMerger.popResultSet(rs, router2.getMergeContext()));
		}

		if (this.filters != null && this.filters.size() > 0) {
			JdbcFilter chain = new DefaultJdbcFilterChain(this.filters) {
				@Override
				public void shardMerge(ShardResultSet rs, JdbcFilter chain) throws SQLException {
					if (index < filters.size()) {
						filters.get(index++).shardMerge(rs, chain);
					}
				}
			};
			chain.shardMerge(srs, chain);
		}

		srs.init(firstResult, secondResult, router.getMergeContext(), routerTarget.getMergeContext().getOffset(), router
		      .getMergeContext().getOffset(), splitNum, routerTarget.getMergeContext().getLimit(), countResult);

		return srs;
	}

	/*
	 * 获得每个分片原始查询结果的List
	 */
	private List<ResultSet> executeQueryByOriginal(RouterResult routerTarget) throws SQLException {
		ArrayList<ResultSet> resultList = new ArrayList<ResultSet>();

		if (isSingleTarget(routerTarget)) {
			// if has only one sql,then serial execute it
			for (RouterTarget targetedSql : routerTarget.getSqls()) {
				int index = 0;
				for (String executableSql : targetedSql.getSqls()) {
					if (LOGGER.isDebugEnabled()) {
						LOGGER.debug("db:" + targetedSql.getDatabaseName());
						LOGGER.debug("sql:" + executableSql);
					}
					Connection conn = connection.getRealConnection(targetedSql.getDatabaseName(), autoCommit);
					PreparedStatement stmt = createPrepareStatement(conn, executableSql);
					actualStatements.add(stmt);
					if (routerTarget.isOptimizeShardKeyInSql()) {
						setParams(stmt, targetedSql.getSkInIgnoreParams(index), false);
					} else {
						setParams(stmt);
					}
					resultList.add(stmt.executeQuery());
					index++;
				}
			}
		} else {
			// if has multiple sqls,then parallel execute them
			List<Callable<List<ResultSet>>> tasks = new ArrayList<Callable<List<ResultSet>>>();

			int concurrencyLevel = this.concurrencyLevel;
			if (routerTarget.getConcurrencyLevel() > 0) {
				concurrencyLevel = routerTarget.getConcurrencyLevel();
			}

			for (RouterTarget targetedSql : routerTarget.getSqls()) {
				if (concurrencyLevel <= 1 || !autoCommit) {
					List<PreparedStatement> stmtList = new ArrayList<PreparedStatement>();
					int index = 0;
					for (String executableSql : targetedSql.getSqls()) {
						Connection conn = connection.getRealConnection(targetedSql.getDatabaseName(), autoCommit);
						PreparedStatement stmt = createPrepareStatement(conn, executableSql);
						actualStatements.add(stmt);
						if (routerTarget.isOptimizeShardKeyInSql()) {
							setParams(stmt, targetedSql.getSkInIgnoreParams(index), false);
						} else {
							setParams(stmt);
						}
						stmtList.add(stmt);
						index++;
					}
					tasks.add(new PreparedStatementExecuteQueryCallable(stmtList, DaoContextHolder.getSqlName(),
					      LocalContextReadWriteStrategy.getReadFromMaster()));
				} else {
					List<List<PreparedStatement>> stmtLists = new ArrayList<List<PreparedStatement>>(concurrencyLevel);
					Connection[] connections = new Connection[concurrencyLevel];
					connection.resetConcurrentConnectionIndexes();

					int index = 0;
					for (String executableSql : targetedSql.getSqls()) {
						int groupIndex = index % concurrencyLevel;
						Connection conn = connections[groupIndex];
						if (conn == null) {
							conn = connection.getRealConcurrentConnection(targetedSql.getDatabaseName(), autoCommit);
							connections[groupIndex] = conn;
						}
						PreparedStatement stmt = createPrepareStatement(conn, executableSql);
						actualStatements.add(stmt);
						if (routerTarget.isOptimizeShardKeyInSql()) {
							setParams(stmt, targetedSql.getSkInIgnoreParams(index), false);
						} else {
							setParams(stmt);
						}
						List<PreparedStatement> stmtList = null;
						if (groupIndex < stmtLists.size()) {
							stmtList = stmtLists.get(groupIndex);
						} else {
							stmtList = new ArrayList<PreparedStatement>();
							stmtLists.add(stmtList);
						}
						stmtList.add(stmt);
						index++;
					}
					for (List<PreparedStatement> stmtList : stmtLists) {
						if (stmtList != null && !stmtList.isEmpty()) {
							tasks.add(new PreparedStatementExecuteQueryCallable(stmtList, DaoContextHolder.getSqlName(),
							      LocalContextReadWriteStrategy.getReadFromMaster()));
						}
					}
				}
			}

			List<Future<List<ResultSet>>> futures = SQLThreadPoolExecutor.getInstance(true).invokeSQLs(tasks);
			for (Future<List<ResultSet>> f : futures) {
				try {
					resultList.addAll(f.get());
				} catch (Exception e) {
					// normally can't be here!
					throw new SQLException(e);
				}
			}
		}

		return resultList;
	}

	@Override
	public int executeUpdate() throws SQLException {
		checkClosed();

		if (filters != null && filters.size() > 0) {
			JdbcFilter chain = new DefaultJdbcFilterChain(filters) {
				@Override
				public int executeShardUpdate(ShardStatement source, String sql, int autoGeneratedKeys,
				      int[] columnIndexes, String[] columnNames, JdbcFilter chain) throws SQLException {
					if (index < filters.size()) {
						return filters.get(index++).executeShardUpdate(source, sql, -1, null, null, chain);
					} else {
						return ((ShardPreparedStatement) source).executeUpdateWithFilter();
					}
				}
			};
			return chain.executeShardUpdate(this, sql, -1, null, null, chain);
		} else {
			return executeUpdateWithFilter();
		}
	}

	private int executeUpdateWithFilter() throws SQLException {
		RouterResult routerTarget = routingAndCheck(sql, getParams());

		rewriteAndMergeParms(routerTarget.getParams());
		boolean isBatchInsert = routerTarget.isBatchInsert();

		int affectedRows = 0;
		int updateCount = 0;

		if (isSingleTarget(routerTarget)) {
			// if has only one sql,then serial execute it
			for (RouterTarget targetedSql : routerTarget.getSqls()) {
				Map<Integer, List<Integer>> paramIndexMapping = targetedSql.getParamIndexMapping();

				int index = 0;
				for (String executableSql : targetedSql.getSqls()) {
					Connection conn = connection.getRealConnection(targetedSql.getDatabaseName(), autoCommit);
					PreparedStatement stmt = createPrepareStatement(conn, executableSql);
					actualStatements.add(stmt);
					if (isBatchInsert) {
						setParams(stmt, paramIndexMapping.get(index));
					} else if (routerTarget.isOptimizeShardKeyInSql()) {
						setParams(stmt, targetedSql.getSkInIgnoreParams(index), false);
					} else if (routerTarget.isMultiQueries()) {
						setParams(stmt, targetedSql.getVariantRefIndexes(index), true);
					} else {
						setParams(stmt);
					}
					index++;
					affectedRows += stmt.executeUpdate();
					updateCount = stmt.getUpdateCount() == -1 ? -1 : affectedRows;

					SqlType sqlType = SqlUtils.getSqlType(sql);
					if ((SqlType.INSERT == sqlType || SqlType.REPLACE == sqlType)
					      && (autoGeneratedKeys != -1 || columnNames != null || columnIndexes != null)) {
						this.generatedKey = stmt.getGeneratedKeys();
					}
				}
			}
		} else {
			// if has multiple sqls,then parallel execute them
			List<Callable<UpdateResult>> tasks = new ArrayList<Callable<UpdateResult>>();
			List<TaskExecuteResult> taskExecuteResults = new ArrayList<TaskExecuteResult>();
			int concurrencyLevel = this.concurrencyLevel;
			if (routerTarget.getConcurrencyLevel() > 0) {
				concurrencyLevel = routerTarget.getConcurrencyLevel();
			}

			for (RouterTarget targetedSql : routerTarget.getSqls()) {

				Map<Integer, List<Integer>> paramIndexMapping = targetedSql.getParamIndexMapping();
				if (concurrencyLevel <= 1 || !autoCommit) {
					List<PreparedStatement> stmtList = new ArrayList<PreparedStatement>();
					int index = 0;
					for (String executableSql : targetedSql.getSqls()) {
						Connection conn = connection.getRealConnection(targetedSql.getDatabaseName(), autoCommit);
						PreparedStatement stmt = createPrepareStatement(conn, executableSql);
						actualStatements.add(stmt);
						if (isBatchInsert) {
							setParams(stmt, paramIndexMapping.get(index));
						} else if (routerTarget.isOptimizeShardKeyInSql()) {
							setParams(stmt, targetedSql.getSkInIgnoreParams(index), false);
						} else if (routerTarget.isMultiQueries()) {
							setParams(stmt, targetedSql.getVariantRefIndexes(index), true);
						} else {
							setParams(stmt);
						}
						stmtList.add(stmt);
						index++;
					}

					PreparedStatementExecuteUpdateCallable task = new PreparedStatementExecuteUpdateCallable(stmtList,
					      DaoContextHolder.getSqlName());
					if (isBatchInsert) {
						TaskExecuteResult ter = new TaskExecuteResult(targetedSql.getDatabaseName(),
						      targetedSql.getPhysicalTables(), targetedSql.getSqls());
						task.setTaskExecuteResult(ter);
						taskExecuteResults.add(ter);
					}
					tasks.add(task);
				} else {
					int index = 0;
					List<List<PreparedStatement>> stmtLists = new ArrayList<List<PreparedStatement>>(concurrencyLevel);
					Connection[] connections = new Connection[concurrencyLevel];
					connection.resetConcurrentConnectionIndexes();
					TaskExecuteResult[] terArray = new TaskExecuteResult[concurrencyLevel];
					List<String> physicalTables = targetedSql.getPhysicalTables();

					for (String executableSql : targetedSql.getSqls()) {
						int groupIndex = index % concurrencyLevel;
						Connection conn = connections[groupIndex];
						if (conn == null) {
							conn = connection.getRealConcurrentConnection(targetedSql.getDatabaseName(), autoCommit);
							connections[groupIndex] = conn;
						}
						PreparedStatement stmt = createPrepareStatement(conn, executableSql);
						actualStatements.add(stmt);
						if (isBatchInsert) {
							setParams(stmt, paramIndexMapping.get(index));
						} else if (routerTarget.isOptimizeShardKeyInSql()) {
							setParams(stmt, targetedSql.getSkInIgnoreParams(index), false);
						} else if (routerTarget.isMultiQueries()) {
							setParams(stmt, targetedSql.getVariantRefIndexes(index), true);
						} else {
							setParams(stmt);
						}
						List<PreparedStatement> stmtList = null;
						if (groupIndex < stmtLists.size()) {
							stmtList = stmtLists.get(groupIndex);
						} else {
							stmtList = new ArrayList<PreparedStatement>();
							stmtLists.add(stmtList);
						}
						stmtList.add(stmt);

						// record physical table and sql
						if (isBatchInsert) {
							TaskExecuteResult ter = terArray[groupIndex];
							if (ter == null) {
								ter = new TaskExecuteResult(targetedSql.getDatabaseName());
								terArray[groupIndex] = ter;
							}
							if (physicalTables != null && index < physicalTables.size()) {
								ter.addTableAndSql(physicalTables.get(index), executableSql);
							}
						}

						index++;
					}
					for (int i = 0; i < stmtLists.size(); ++i) {
						List<PreparedStatement> stmtList = stmtLists.get(i);
						if (stmtList != null && !stmtList.isEmpty()) {
							PreparedStatementExecuteUpdateCallable task = new PreparedStatementExecuteUpdateCallable(stmtList,
							      DaoContextHolder.getSqlName());
							if (isBatchInsert) {
								task.setTaskExecuteResult(terArray[i]);
								taskExecuteResults.add(terArray[i]);
							}
							tasks.add(task);
						}
					}
				}
			}

			List<Future<UpdateResult>> futures = null;
			try {
				futures = SQLThreadPoolExecutor.getInstance(false).invokeSQLs(tasks);
			} catch (SQLException e) {
				throw (isBatchInsert ? new SQLException(new ShardBatchInsertException(e.getMessage(), e.getCause(),
				      taskExecuteResults)) : e);
			}
			for (Future<UpdateResult> f : futures) {
				try {
					UpdateResult updateResult = f.get();
					affectedRows += updateResult.getAffectedRows();
					updateCount = affectedRows;
				} catch (Exception e) {
					// normally can't be here
					throw new SQLException(isBatchInsert ? new ShardBatchInsertException(e, taskExecuteResults) : e);
				}
			}
		}

		this.results = null;
		this.updateCount = updateCount;

		return affectedRows;
	}

	public int getAutoGeneratedKeys() {
		return autoGeneratedKeys;
	}

	public int[] getColumnIndexes() {
		return columnIndexes;
	}

	public String[] getColumnNames() {
		return columnNames;
	}

	protected List<Object> getParams() {
		List<Object> parameters = new ArrayList<Object>();
		for (ParamContext context : params.values()) {
			parameters.add(context.getValues()[0]);
		}

		return parameters;
	}

	public String getSql() {
		return sql;
	}

	protected void rewriteAndMergeParms(List<Object> newParams) {
		if (newParams == null) {
			return;
		}
		int index = 0;
		ArrayList<ParamContext> paramContexts = new ArrayList<ParamContext>(params.values());
		for (Object newParam : newParams) {
			ParamContext context = paramContexts.get(index++);
			context.getValues()[0] = newParam;
		}
	}

	@Override
	public void setArray(int parameterIndex, Array x) throws SQLException {
		params.put(parameterIndex, new ArrayParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException {
		params.put(parameterIndex, new AsciiParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException {
		params.put(parameterIndex, new AsciiParamContext(parameterIndex, new Object[] { x, length }));
	}

	@Override
	public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException {
		params.put(parameterIndex, new AsciiParamContext(parameterIndex, new Object[] { x, length }));
	}

	public void setAutoGeneratedKeys(int autoGeneratedKeys) {
		this.autoGeneratedKeys = autoGeneratedKeys;
	}

	@Override
	public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException {
		params.put(parameterIndex, new BigDecimalParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException {
		params.put(parameterIndex, new BinaryStreamParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException {
		params.put(parameterIndex, new BinaryStreamParamContext(parameterIndex, new Object[] { x, length }));
	}

	@Override
	public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException {
		params.put(parameterIndex, new BinaryStreamParamContext(parameterIndex, new Object[] { x, length }));
	}

	@Override
	public void setBlob(int parameterIndex, Blob x) throws SQLException {
		params.put(parameterIndex, new BlobParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException {
		params.put(parameterIndex, new BlobParamContext(parameterIndex, new Object[] { inputStream }));
	}

	@Override
	public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException {
		params.put(parameterIndex, new BlobParamContext(parameterIndex, new Object[] { inputStream, length }));
	}

	@Override
	public void setBoolean(int parameterIndex, boolean x) throws SQLException {
		params.put(parameterIndex, new BooleanParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setByte(int parameterIndex, byte x) throws SQLException {
		params.put(parameterIndex, new ByteParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setBytes(int parameterIndex, byte[] x) throws SQLException {
		params.put(parameterIndex, new ByteArrayParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException {
		params.put(parameterIndex, new CharacterStreamParamContext(parameterIndex, new Object[] { reader }));
	}

	@Override
	public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException {
		params.put(parameterIndex, new CharacterStreamParamContext(parameterIndex, new Object[] { reader, length }));
	}

	@Override
	public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException {
		params.put(parameterIndex, new CharacterStreamParamContext(parameterIndex, new Object[] { reader, length }));
	}

	@Override
	public void setClob(int parameterIndex, Clob x) throws SQLException {
		params.put(parameterIndex, new ClobParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setClob(int parameterIndex, Reader reader) throws SQLException {
		params.put(parameterIndex, new ClobParamContext(parameterIndex, new Object[] { reader }));
	}

	@Override
	public void setClob(int parameterIndex, Reader reader, long length) throws SQLException {
		params.put(parameterIndex, new ClobParamContext(parameterIndex, new Object[] { reader, length }));
	}

	public void setColumnIndexes(int[] columnIndexes) {
		if (columnIndexes != null && columnIndexes.length != 0) {
			this.columnIndexes = new int[columnIndexes.length];
			for (int i = 0; i < columnIndexes.length; i++) {
				this.columnIndexes[i] = columnIndexes[i];
			}
		}
	}

	public void setColumnNames(String[] columnNames) {
		if (columnNames != null && columnNames.length != 0) {
			this.columnNames = new String[columnNames.length];
			for (int i = 0; i < columnNames.length; i++) {
				this.columnNames[i] = columnNames[i];
			}
		}
	}

	@Override
	public void setDate(int parameterIndex, Date x) throws SQLException {
		params.put(parameterIndex, new DateParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException {
		params.put(parameterIndex, new DateParamContext(parameterIndex, new Object[] { x, cal }));
	}

	@Override
	public void setDouble(int parameterIndex, double x) throws SQLException {
		params.put(parameterIndex, new DoubleParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setFloat(int parameterIndex, float x) throws SQLException {
		params.put(parameterIndex, new FloatParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setInt(int parameterIndex, int x) throws SQLException {
		params.put(parameterIndex, new IntParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setLong(int parameterIndex, long x) throws SQLException {
		params.put(parameterIndex, new LongParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException {
		params.put(parameterIndex, new NCharacterStreamParamContext(parameterIndex, new Object[] { value }));
	}

	@Override
	public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException {
		params.put(parameterIndex, new NCharacterStreamParamContext(parameterIndex, new Object[] { value, length }));
	}

	@Override
	public void setNClob(int parameterIndex, NClob value) throws SQLException {
		params.put(parameterIndex, new NClobParamContext(parameterIndex, new Object[] { value }));
	}

	@Override
	public void setNClob(int parameterIndex, Reader reader) throws SQLException {
		params.put(parameterIndex, new NClobParamContext(parameterIndex, new Object[] { reader }));
	}

	@Override
	public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException {
		params.put(parameterIndex, new NClobParamContext(parameterIndex, new Object[] { reader, length }));
	}

	@Override
	public void setNString(int parameterIndex, String value) throws SQLException {
		params.put(parameterIndex, new NStringParamContext(parameterIndex, new Object[] { value }));
	}

	@Override
	public void setNull(int parameterIndex, int sqlType) throws SQLException {
		params.put(parameterIndex, new NullParamContext(parameterIndex, new Object[] { sqlType }));
	}

	@Override
	public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException {
		params.put(parameterIndex, new NullParamContext(parameterIndex, new Object[] { sqlType, typeName }));
	}

	@Override
	public void setObject(int parameterIndex, Object x) throws SQLException {
		params.put(parameterIndex, new ObjectParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
		params.put(parameterIndex, new ObjectParamContext(parameterIndex, new Object[] { x, targetSqlType }));
	}

	@Override
	public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException {
		params.put(parameterIndex, new ObjectParamContext(parameterIndex,
		      new Object[] { x, targetSqlType, scaleOrLength }));
	}

	// normal
	protected void setParams(PreparedStatement stmt) throws SQLException {
		for (ParamContext paramContext : params.values()) {
			paramContext.setParam(stmt);
		}
	}

	// in optimize: not contain, or multi queries: contain
	protected void setParams(PreparedStatement stmt, Set<Integer> paramIndexes, boolean contains) throws SQLException {
		int count = 1;
		for (Map.Entry<Integer, ParamContext> entry : params.entrySet()) {
			if (contains == paramIndexes.contains(entry.getKey() - 1)) {
				ParamContext paramContext = entry.getValue();
				paramContext.setIndex(count++);
				paramContext.setParam(stmt);
			}
		}
	}

	// batch insert
	protected void setParams(PreparedStatement stmt, List<Integer> indexMapping) throws SQLException {
		if (params != null) {
			for (int i = 0; i < indexMapping.size(); ++i) {
				int oldIndex = indexMapping.get(i);
				ParamContext paramContext = params.get(oldIndex);
				paramContext.setIndex(i + 1);
				paramContext.setParam(stmt);
			}
		}
	}

	@Override
	public void setRef(int parameterIndex, Ref x) throws SQLException {
		params.put(parameterIndex, new RefParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setRowId(int parameterIndex, RowId x) throws SQLException {
		params.put(parameterIndex, new RowIdParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setShort(int parameterIndex, short x) throws SQLException {
		params.put(parameterIndex, new ShortParamContext(parameterIndex, new Object[] { x }));
	}

	public void setSql(String sql) {
		this.sql = sql;
	}

	@Override
	public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException {
		params.put(parameterIndex, new SQLXMLParamContext(parameterIndex, new Object[] { xmlObject }));
	}

	@Override
	public void setString(int parameterIndex, String x) throws SQLException {
		params.put(parameterIndex, new StringParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setTime(int parameterIndex, Time x) throws SQLException {
		params.put(parameterIndex, new TimeParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException {
		params.put(parameterIndex, new TimeParamContext(parameterIndex, new Object[] { x, cal }));
	}

	@Override
	public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException {
		params.put(parameterIndex, new TimestampParamContext(parameterIndex, new Object[] { x }));
	}

	@Override
	public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException {
		params.put(parameterIndex, new TimestampParamContext(parameterIndex, new Object[] { x, cal }));
	}

	@Override
	public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException {
		params.put(parameterIndex, new UnicodeStreamParamContext(parameterIndex, new Object[] { x, length }));
	}

	@Override
	public void setURL(int parameterIndex, URL x) throws SQLException {
		params.put(parameterIndex, new URLParamContext(parameterIndex, new Object[] { x }));
	}

	public Map<Integer, ParamContext> covertToParamContext(List<Object> params) {
		Map<Integer, ParamContext> newParams = new TreeMap<Integer, ParamContext>(this.params);
		int oldParamSize = this.params.size();
		int newParamSize = params.size();
		int i = 0;
		for (; i < newParamSize; i++) {
			Object[] values = { params.get(i) };
			newParams.get(i + 1).setValues(values);
		}
		while (i < oldParamSize) {
			newParams.remove(++i);
		}
		return newParams;
	}

	public void setConcurrencyLevel(int concurrencyLevel) {
		this.concurrencyLevel = concurrencyLevel;
	}
}