io.netty.channel.Channel Scala Examples

The following examples show how to use io.netty.channel.Channel. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: NettyRpcHandlerSuite.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.rpc.netty

import java.net.InetSocketAddress
import java.nio.ByteBuffer

import io.netty.channel.Channel
import org.mockito.Mockito._
import org.mockito.Matchers._

import org.apache.spark.SparkFunSuite
import org.apache.spark.network.client.{TransportResponseHandler, TransportClient}
import org.apache.spark.network.server.StreamManager
import org.apache.spark.rpc._

class NettyRpcHandlerSuite extends SparkFunSuite {

  val env = mock(classOf[NettyRpcEnv])
  val sm = mock(classOf[StreamManager])
  when(env.deserialize(any(classOf[TransportClient]), any(classOf[ByteBuffer]))(any()))
    .thenReturn(RequestMessage(RpcAddress("localhost", 12345), null, null))

  test("receive") {
    val dispatcher = mock(classOf[Dispatcher])
    val nettyRpcHandler = new NettyRpcHandler(dispatcher, env, sm)

    val channel = mock(classOf[Channel])
    val client = new TransportClient(channel, mock(classOf[TransportResponseHandler]))
    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.receive(client, null, null)

    verify(dispatcher, times(1)).postToAll(RemoteProcessConnected(RpcAddress("localhost", 40000)))
  }

  test("connectionTerminated") {
    val dispatcher = mock(classOf[Dispatcher])
    val nettyRpcHandler = new NettyRpcHandler(dispatcher, env, sm)

    val channel = mock(classOf[Channel])
    val client = new TransportClient(channel, mock(classOf[TransportResponseHandler]))
    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.receive(client, null, null)

    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.connectionTerminated(client)

    verify(dispatcher, times(1)).postToAll(RemoteProcessConnected(RpcAddress("localhost", 40000)))
    verify(dispatcher, times(1)).postToAll(
      RemoteProcessDisconnected(RpcAddress("localhost", 40000)))
  }

} 
Example 2
Source File: MySQLSocket.scala    From asyncdb   with Apache License 2.0 5 votes vote down vote up
package io.asyncdb
package netty
package mysql

import cats.syntax.all._
import cats.effect._
import cats.effect.concurrent._
import cats.data.NonEmptyList
import io.netty.bootstrap.Bootstrap
import io.netty.channel.{Channel, ChannelInitializer}
import java.nio.charset.Charset
import protocol.client._
import protocol.server._

case class MySQLSocketConfig(
  bootstrap: Bootstrap,
  username: String,
  password: Option[String],
  database: Option[String],
  charset: Short,
  authMethod: Option[String]
) extends NettySocketConfig

class MySQLSocket[F[_]](
  config: MySQLSocketConfig,
  channelHolder: Deferred[F, Either[Throwable, Channel]],
  ref: MsgRef[F]
)(implicit F: Concurrent[F])
    extends NettySocket[F, Message](config, channelHolder) {

  def connect = {
    open.flatMap(_.read).as(this)
  }

  def disconnect = {
    close.void
  }

  def write(n: Message) = {
    channel.flatMap(_.write(n).to[F]).void
  }

  def read = ref.take.flatMap {
    case OrErr(value) =>
      F.fromEither(value)
    case v => F.pure(v)
  }
}

object MySQLSocket {
  def apply[F[_]: ConcurrentEffect](config: MySQLSocketConfig) = {
    for {
      msgRef   <- MVar[F].empty[Message]
      clientCS <- Deferred[F, Charset]
      initCtx = ChannelContext(
        ChannelState.Handshake.WaitHandshakeInit,
        clientCS
      )
      ctxRef <- Ref[F].of(initCtx)
      decoder = new FrameDecoder[F](config, ctxRef, msgRef)
      encoder = new FrameEncoder(config)
      initHandler = new ChannelInitializer[Channel] {
        override def initChannel(channel: Channel): Unit = {
          channel
            .pipeline()
            .addLast("MySQLFrameDecoder", decoder)
            .addLast("MySQLFrameEncoder", encoder)
        }
      }
      _ = config.bootstrap.handler(initHandler)
      channel <- Deferred[F, Either[Throwable, Channel]]
    } yield new MySQLSocket[F](config, channel, msgRef)
  }
} 
Example 3
Source File: NettySocket.scala    From asyncdb   with Apache License 2.0 5 votes vote down vote up
package io.asyncdb
package netty

import cats.syntax.all._
import cats.effect._
import cats.effect.concurrent.Deferred
import io.netty.bootstrap.Bootstrap
import io.netty.channel.Channel

trait NettySocketConfig {
  val bootstrap: Bootstrap
}

abstract class NettySocket[F[_], M](
  config: NettySocketConfig,
  channelHolder: Deferred[F, Either[Throwable, Channel]]
)(
  implicit F: Concurrent[F]
) extends Socket[F, M] {

  protected def open = F.delay(config.bootstrap.connect()).flatMap { f =>
    f.to[F]
      .attempt
      .flatMap { e =>
        channelHolder.complete(e.map(_.channel()))
      }
      .as(this)
  }

  protected def close = channel.flatMap(ch => F.delay(ch.close()))

  protected def channel = channelHolder.get.rethrow

} 
Example 4
Source File: NettyUtils.scala    From aloha   with Apache License 2.0 5 votes vote down vote up
package me.jrwang.aloha.transport.util

import io.netty.buffer.PooledByteBufAllocator
import io.netty.channel.{Channel, EventLoopGroup, ServerChannel}
import io.netty.channel.epoll.{EpollEventLoopGroup, EpollServerSocketChannel, EpollSocketChannel}
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio.{NioServerSocketChannel, NioSocketChannel}
import io.netty.util.concurrent.DefaultThreadFactory
import io.netty.util.internal.PlatformDependent
import me.jrwang.aloha.common.Logging


object NettyUtils extends Logging {
  
  private def getPrivateStaticField(name: String) = try {
    val f = PooledByteBufAllocator.DEFAULT.getClass.getDeclaredField(name)
    f.setAccessible(true)
    f.getInt(null)
  } catch {
    case e: Exception =>
      throw new RuntimeException(e)
  }
} 
Example 5
Source File: DummyUtxPoolSynchronizer.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.http
import com.wavesplatform.lang.ValidationError
import com.wavesplatform.network.UtxPoolSynchronizer
import com.wavesplatform.transaction.Transaction
import com.wavesplatform.transaction.smart.script.trace.TracedResult
import io.netty.channel.Channel

object DummyUtxPoolSynchronizer {
  val accepting: UtxPoolSynchronizer = new UtxPoolSynchronizer {
    override def tryPublish(tx: Transaction, source: Channel): Unit               = {}
    override def publish(tx: Transaction): TracedResult[ValidationError, Boolean] = TracedResult(Right(true))
  }

  def rejecting(error: Transaction => ValidationError): UtxPoolSynchronizer = new UtxPoolSynchronizer {
    override def tryPublish(tx: Transaction, source: Channel): Unit               = {}
    override def publish(tx: Transaction): TracedResult[ValidationError, Boolean] = TracedResult(Left(error(tx)))
  }
} 
Example 6
Source File: ClientSpec.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network

import java.util.concurrent.ConcurrentHashMap

import com.wavesplatform.{TransactionGen, Version}
import io.netty.buffer.{ByteBuf, Unpooled}
import io.netty.channel.Channel
import io.netty.channel.embedded.EmbeddedChannel
import io.netty.channel.group.ChannelGroup
import org.scalamock.scalatest.MockFactory
import org.scalatest.{FreeSpec, Matchers}

import scala.concurrent.duration.DurationInt
import scala.util.Random

class ClientSpec extends FreeSpec with Matchers with MockFactory with TransactionGen {

  private val clientHandshake = new Handshake(
    applicationName = "wavesI",
    applicationVersion = Version.VersionTuple,
    nodeName = "test",
    nodeNonce = Random.nextInt(),
    declaredAddress = None
  )

  private val serverHandshake = clientHandshake.copy(nodeNonce = Random.nextInt())

  "should send only a local handshake on connection" in {
    val channel = createEmbeddedChannel(mock[ChannelGroup])

    val sentClientHandshakeBuff = channel.readOutbound[ByteBuf]()
    Handshake.decode(sentClientHandshakeBuff) shouldBe clientHandshake
    channel.outboundMessages() shouldBe empty
  }

  "should add a server's channel to all channels after the handshake only" in {
    var channelWasAdded = false
    val allChannels     = mock[ChannelGroup]
    (allChannels.add _).expects(*).onCall { _: Channel =>
      channelWasAdded = true
      true
    }

    val channel = createEmbeddedChannel(allChannels)

    // skip the client's handshake
    channel.readOutbound[ByteBuf]()
    channelWasAdded shouldBe false

    val replyServerHandshakeBuff = Unpooled.buffer()
    serverHandshake.encode(replyServerHandshakeBuff)
    channel.writeInbound(replyServerHandshakeBuff)
    channelWasAdded shouldBe true
  }

  private def createEmbeddedChannel(allChannels: ChannelGroup) = new EmbeddedChannel(
    new HandshakeDecoder(PeerDatabase.NoOp),
    new HandshakeTimeoutHandler(1.minute),
    new HandshakeHandler.Client(
      handshake = clientHandshake,
      establishedConnections = new ConcurrentHashMap(),
      peerConnections = new ConcurrentHashMap(),
      peerDatabase = PeerDatabase.NoOp,
      allChannels = allChannels
    )
  )

} 
Example 7
Source File: MicroblockAppender.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.state.appender

import cats.data.EitherT
import com.wavesplatform.block.Block.BlockId
import com.wavesplatform.block.MicroBlock
import com.wavesplatform.lang.ValidationError
import com.wavesplatform.metrics.{BlockStats, _}
import com.wavesplatform.network.MicroBlockSynchronizer.MicroblockData
import com.wavesplatform.network._
import com.wavesplatform.state.Blockchain
import com.wavesplatform.transaction.BlockchainUpdater
import com.wavesplatform.transaction.TxValidationError.InvalidSignature
import com.wavesplatform.utils.ScorexLogging
import com.wavesplatform.utx.UtxPool
import io.netty.channel.Channel
import io.netty.channel.group.ChannelGroup
import kamon.Kamon
import monix.eval.Task
import monix.execution.Scheduler

import scala.util.{Left, Right}

object MicroblockAppender extends ScorexLogging {
  def apply(blockchainUpdater: BlockchainUpdater with Blockchain, utxStorage: UtxPool, scheduler: Scheduler, verify: Boolean = true)(
      microBlock: MicroBlock
  ): Task[Either[ValidationError, BlockId]] = {

    Task(metrics.microblockProcessingTimeStats.measureSuccessful {
      blockchainUpdater
        .processMicroBlock(microBlock, verify)
        .map { totalBlockId =>
          utxStorage.removeAll(microBlock.transactionData)
          totalBlockId
        }
    }).executeOn(scheduler)
  }

  def apply(
      blockchainUpdater: BlockchainUpdater with Blockchain,
      utxStorage: UtxPool,
      allChannels: ChannelGroup,
      peerDatabase: PeerDatabase,
      scheduler: Scheduler
  )(ch: Channel, md: MicroblockData): Task[Unit] = {
    import md.microBlock
    val microblockTotalResBlockSig = microBlock.totalResBlockSig
    (for {
      _ <- EitherT(Task.now(microBlock.signaturesValid()))
      _ <- EitherT(apply(blockchainUpdater, utxStorage, scheduler)(microBlock))
    } yield ()).value.map {
      case Right(_) =>
        md.invOpt match {
          case Some(mi) => allChannels.broadcast(mi, except = md.microblockOwners())
          case None     => log.warn(s"${id(ch)} Not broadcasting MicroBlockInv")
        }
        BlockStats.applied(microBlock)
      case Left(is: InvalidSignature) =>
        peerDatabase.blacklistAndClose(ch, s"Could not append microblock $microblockTotalResBlockSig: $is")
      case Left(ve) =>
        BlockStats.declined(microBlock)
        log.debug(s"${id(ch)} Could not append microblock $microblockTotalResBlockSig: $ve")
    }
  }

  private[this] object metrics {
    val microblockProcessingTimeStats = Kamon.timer("microblock-appender.processing-time").withoutTags()
  }
} 
Example 8
Source File: NetworkSender.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network.client

import java.io.IOException
import java.net.InetSocketAddress
import java.nio.channels.ClosedChannelException

import com.wavesplatform.network.TrafficLogger
import com.wavesplatform.utils.ScorexLogging
import io.netty.channel.Channel
import io.netty.channel.group.DefaultChannelGroup
import io.netty.util.concurrent.GlobalEventExecutor

import scala.concurrent.{ExecutionContext, Future, Promise}

class NetworkSender(trafficLoggerSettings: TrafficLogger.Settings, chainId: Char, name: String, nonce: Long)(implicit ec: ExecutionContext)
    extends ScorexLogging {
  private[this] val MessagesBatchSize = 100

  private[this] val allChannels = new DefaultChannelGroup(GlobalEventExecutor.INSTANCE)
  private[this] val client      = new NetworkClient(trafficLoggerSettings, chainId, name, nonce, allChannels)

  def connect(address: InetSocketAddress): Future[Channel] =
    client.connect(address)

  def send(channel: Channel, messages: Any*): Future[Unit] = {
    def doWrite(messages: Seq[Any]): Future[Unit] =
      if (messages.isEmpty)
        Future.successful(())
      else if (!channel.isWritable)
        Future.failed(new ClosedChannelException)
      else {
        val (send, keep) = messages.splitAt(MessagesBatchSize)
        val futures = send.toVector.map { msg =>
          val result = Promise[Unit]()
          channel.write(msg).addListener { (f: io.netty.util.concurrent.Future[Void]) =>
            if (!f.isSuccess) {
              val cause = Option(f.cause()).getOrElse(new IOException("Can't send a message to the channel"))
              log.error(s"Can't send a message to the channel: $msg", cause)
              result.failure(cause)
            } else {
              result.success(())
            }
          }
          result.future
        }

        channel.flush()
        Future.sequence(futures).flatMap(_ => doWrite(keep))
      }

    doWrite(messages)
  }

  def close(): Unit = client.shutdown()
} 
Example 9
Source File: MessageObserver.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network

import com.wavesplatform.block.Block
import com.wavesplatform.transaction.Transaction
import com.wavesplatform.utils.{Schedulers, ScorexLogging}
import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel.{Channel, ChannelHandlerContext, ChannelInboundHandlerAdapter}
import monix.execution.schedulers.SchedulerService
import monix.reactive.subjects.ConcurrentSubject

@Sharable
class MessageObserver extends ChannelInboundHandlerAdapter with ScorexLogging {

  private implicit val scheduler: SchedulerService = Schedulers.fixedPool(2, "message-observer")

  private val signatures          = ConcurrentSubject.publish[(Channel, Signatures)]
  private val blocks              = ConcurrentSubject.publish[(Channel, Block)]
  private val blockchainScores    = ConcurrentSubject.publish[(Channel, BigInt)]
  private val microblockInvs      = ConcurrentSubject.publish[(Channel, MicroBlockInv)]
  private val microblockResponses = ConcurrentSubject.publish[(Channel, MicroBlockResponse)]
  private val transactions        = ConcurrentSubject.publish[(Channel, Transaction)]

  override def channelRead(ctx: ChannelHandlerContext, msg: AnyRef): Unit = msg match {
    case b: Block               => blocks.onNext((ctx.channel(), b))
    case sc: BigInt             => blockchainScores.onNext((ctx.channel(), sc))
    case s: Signatures          => signatures.onNext((ctx.channel(), s))
    case mbInv: MicroBlockInv   => microblockInvs.onNext((ctx.channel(), mbInv))
    case mb: MicroBlockResponse => microblockResponses.onNext((ctx.channel(), mb))
    case tx: Transaction        => transactions.onNext((ctx.channel(), tx))
    case _                      => super.channelRead(ctx, msg)

  }

  def shutdown(): Unit = {
    signatures.onComplete()
    blocks.onComplete()
    blockchainScores.onComplete()
    microblockInvs.onComplete()
    microblockResponses.onComplete()
    transactions.onComplete()
  }
}

object MessageObserver {
  type Messages = (
      ChannelObservable[Signatures],
      ChannelObservable[Block],
      ChannelObservable[BigInt],
      ChannelObservable[MicroBlockInv],
      ChannelObservable[MicroBlockResponse],
      ChannelObservable[Transaction]
  )

  def apply(): (MessageObserver, Messages) = {
    val mo = new MessageObserver()
    (mo, (mo.signatures, mo.blocks, mo.blockchainScores, mo.microblockInvs, mo.microblockResponses, mo.transactions))
  }
} 
Example 10
Source File: ProxyIntegrationTest.scala    From Neutrino   with Apache License 2.0 5 votes vote down vote up
package com.ebay.neutrino.integ

import com.ebay.neutrino.config.{Configuration, LoadBalancer, VirtualPool, VirtualServer}
import com.ebay.neutrino.handler.{ExampleCloseHandler, ExamplePipelineHandler}
import com.ebay.neutrino.{NettyClientSupport, NeutrinoCore}
import io.netty.channel.{Channel, ChannelInitializer}
import io.netty.handler.codec.http.{DefaultFullHttpRequest, HttpMethod, HttpVersion}
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Ignore, Matchers}

import scala.concurrent.Await
import scala.concurrent.duration._



@Ignore
class ProxyIntegrationTest extends FlatSpec with NettyClientSupport with Matchers with BeforeAndAfterAll
{
  // Create a new balancer
  val config = Configuration.load("proxy.conf")
  val core   = NeutrinoCore(config)
  val server = new NettyEchoServer()


  override def beforeAll() = {
    val servers = Seq(VirtualServer("id", "localhost", 8081))
    val pools   = Seq(VirtualPool(servers=servers))

    // Start running the downstream server
    server.start()

    // Start running the proxy. This will run until the process is interrupted...
    core.configure(LoadBalancer("id", pools))
    Await.ready(core.start(), 5 seconds)
  }

  override def afterAll() = {
    Await.ready(core.shutdown(), 5 seconds)
    server.shutdown()
  }


  it should "run 10000 requests" in {

    // We'll have to connect as well
    val client = HttpClient(port=8080)
    val request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/")

    for (i <- 0 until 10000) {
      val conn = client.send(request)
      conn.channel.close()
    }
  }

}



class ProxyIntegrationInitializer extends ChannelInitializer[Channel] {

  // Initialize the user-configurable pipeline
  protected def initChannel(ch: Channel): Unit = {
    ch.pipeline.addLast(new ExampleCloseHandler())
    ch.pipeline.addLast(new ExamplePipelineHandler())
    //pipeline.addLast(new ExampleCustomHandler())
  }
} 
Example 11
Source File: EventLoopGroupOwner.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.apiserver

import java.util.UUID
import java.util.concurrent.TimeUnit.MILLISECONDS

import com.daml.resources.{Resource, ResourceOwner}
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio.{NioServerSocketChannel, NioSocketChannel}
import io.netty.channel.{Channel, EventLoopGroup, ServerChannel}
import io.netty.util.concurrent.DefaultThreadFactory

import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.Try

final class EventLoopGroupOwner(threadPoolName: String, parallelism: Int)
    extends ResourceOwner[EventLoopGroup] {

  override def acquire()(implicit executionContext: ExecutionContext): Resource[EventLoopGroup] =
    Resource(
      Future(new NioEventLoopGroup(
        parallelism,
        new DefaultThreadFactory(s"$threadPoolName-grpc-event-loop-${UUID.randomUUID()}", true))))(
      group => {
        val promise = Promise[Unit]()
        val future = group.shutdownGracefully(0, 0, MILLISECONDS)
        future.addListener((f: io.netty.util.concurrent.Future[_]) =>
          promise.complete(Try(f.get).map(_ => ())))
        promise.future
      }
    )
}

object EventLoopGroupOwner {

  val clientChannelType: Class[_ <: Channel] = classOf[NioSocketChannel]

  val serverChannelType: Class[_ <: ServerChannel] = classOf[NioServerSocketChannel]

} 
Example 12
Source File: NettyRpcHandlerSuite.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.rpc.netty

import java.net.InetSocketAddress
import java.nio.ByteBuffer

import io.netty.channel.Channel
import org.mockito.Matchers._
import org.mockito.Mockito._

import org.apache.spark.SparkFunSuite
import org.apache.spark.network.client.{TransportClient, TransportResponseHandler}
import org.apache.spark.network.server.StreamManager
import org.apache.spark.rpc._

class NettyRpcHandlerSuite extends SparkFunSuite {

  val env = mock(classOf[NettyRpcEnv])
  val sm = mock(classOf[StreamManager])
  when(env.deserialize(any(classOf[TransportClient]), any(classOf[ByteBuffer]))(any()))
    .thenReturn(new RequestMessage(RpcAddress("localhost", 12345), null, null))

  test("receive") {
    val dispatcher = mock(classOf[Dispatcher])
    val nettyRpcHandler = new NettyRpcHandler(dispatcher, env, sm)

    val channel = mock(classOf[Channel])
    val client = new TransportClient(channel, mock(classOf[TransportResponseHandler]))
    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelActive(client)

    verify(dispatcher, times(1)).postToAll(RemoteProcessConnected(RpcAddress("localhost", 40000)))
  }

  test("connectionTerminated") {
    val dispatcher = mock(classOf[Dispatcher])
    val nettyRpcHandler = new NettyRpcHandler(dispatcher, env, sm)

    val channel = mock(classOf[Channel])
    val client = new TransportClient(channel, mock(classOf[TransportResponseHandler]))
    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelActive(client)

    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelInactive(client)

    verify(dispatcher, times(1)).postToAll(RemoteProcessConnected(RpcAddress("localhost", 40000)))
    verify(dispatcher, times(1)).postToAll(
      RemoteProcessDisconnected(RpcAddress("localhost", 40000)))
  }

} 
Example 13
Source File: RBackendAuthHandler.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.api.r

import java.io.{ByteArrayOutputStream, DataOutputStream}
import java.nio.charset.StandardCharsets.UTF_8

import io.netty.channel.{Channel, ChannelHandlerContext, SimpleChannelInboundHandler}

import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils


private class RBackendAuthHandler(secret: String)
  extends SimpleChannelInboundHandler[Array[Byte]] with Logging {

  override def channelRead0(ctx: ChannelHandlerContext, msg: Array[Byte]): Unit = {
    // The R code adds a null terminator to serialized strings, so ignore it here.
    val clientSecret = new String(msg, 0, msg.length - 1, UTF_8)
    try {
      require(secret == clientSecret, "Auth secret mismatch.")
      ctx.pipeline().remove(this)
      writeReply("ok", ctx.channel())
    } catch {
      case e: Exception =>
        logInfo("Authentication failure.", e)
        writeReply("err", ctx.channel())
        ctx.close()
    }
  }

  private def writeReply(reply: String, chan: Channel): Unit = {
    val out = new ByteArrayOutputStream()
    SerDe.writeString(new DataOutputStream(out), reply)
    chan.writeAndFlush(out.toByteArray())
  }

} 
Example 14
Source File: NettyRpcHandlerSuite.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.rpc.netty

import java.net.InetSocketAddress
import java.nio.ByteBuffer

import io.netty.channel.Channel
import org.mockito.Matchers._
import org.mockito.Mockito._

import org.apache.spark.SparkFunSuite
import org.apache.spark.network.client.{TransportClient, TransportResponseHandler}
import org.apache.spark.network.server.StreamManager
import org.apache.spark.rpc._

class NettyRpcHandlerSuite extends SparkFunSuite {

  val env = mock(classOf[NettyRpcEnv])
  val sm = mock(classOf[StreamManager])
  when(env.deserialize(any(classOf[TransportClient]), any(classOf[ByteBuffer]))(any()))
    .thenReturn(RequestMessage(RpcAddress("localhost", 12345), null, null))

  test("receive") {
    val dispatcher = mock(classOf[Dispatcher])
    val nettyRpcHandler = new NettyRpcHandler(dispatcher, env, sm)

    val channel = mock(classOf[Channel])
    val client = new TransportClient(channel, mock(classOf[TransportResponseHandler]))
    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelActive(client)

    verify(dispatcher, times(1)).postToAll(RemoteProcessConnected(RpcAddress("localhost", 40000)))
  }

  test("connectionTerminated") {
    val dispatcher = mock(classOf[Dispatcher])
    val nettyRpcHandler = new NettyRpcHandler(dispatcher, env, sm)

    val channel = mock(classOf[Channel])
    val client = new TransportClient(channel, mock(classOf[TransportResponseHandler]))
    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelActive(client)

    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelInactive(client)

    verify(dispatcher, times(1)).postToAll(RemoteProcessConnected(RpcAddress("localhost", 40000)))
    verify(dispatcher, times(1)).postToAll(
      RemoteProcessDisconnected(RpcAddress("localhost", 40000)))
  }

} 
Example 15
Source File: RealmConnector.scala    From wowchat   with GNU General Public License v3.0 5 votes vote down vote up
package wowchat.realm

import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

import wowchat.common._
import com.typesafe.scalalogging.StrictLogging
import io.netty.bootstrap.Bootstrap
import io.netty.channel.socket.SocketChannel
import io.netty.channel.socket.nio.NioSocketChannel
import io.netty.channel.{Channel, ChannelInitializer, ChannelOption}
import io.netty.handler.timeout.IdleStateHandler
import io.netty.util.concurrent.Future

import scala.util.Try

class RealmConnector(realmConnectionCallback: RealmConnectionCallback) extends StrictLogging {

  private var channel: Option[Channel] = None
  private var connected: Boolean = false

  def connect: Unit = {
    logger.info(s"Connecting to realm server ${Global.config.wow.realmlist.host}:${Global.config.wow.realmlist.port}")

    val bootstrap = new Bootstrap
    bootstrap.group(Global.group)
      .channel(classOf[NioSocketChannel])
      .option[java.lang.Integer](ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000)
      .option[java.lang.Boolean](ChannelOption.SO_KEEPALIVE, true)
      .remoteAddress(new InetSocketAddress(Global.config.wow.realmlist.host, Global.config.wow.realmlist.port))
      .handler(new ChannelInitializer[SocketChannel]() {

        @throws[Exception]
        override protected def initChannel(socketChannel: SocketChannel): Unit = {
          val handler = if (WowChatConfig.getExpansion == WowExpansion.Vanilla) {
            new RealmPacketHandler(realmConnectionCallback)
          } else {
            new RealmPacketHandlerTBC(realmConnectionCallback)
          }

          socketChannel.pipeline.addLast(
            new IdleStateHandler(60, 120, 0),
            new IdleStateCallback,
            new RealmPacketDecoder,
            new RealmPacketEncoder,
            handler
          )
        }
      })

    channel = Some(bootstrap.connect.addListener((future: Future[_ >: Void]) => {
      Try {
        future.get(10, TimeUnit.SECONDS)
      }.fold(throwable => {
        logger.error(s"Failed to connect to realm server! ${throwable.getMessage}")
        realmConnectionCallback.disconnected
      }, _ => Unit)
    }).channel)
  }
} 
Example 16
Source File: NettyRpcHandlerSuite.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.rpc.netty

import java.net.InetSocketAddress
import java.nio.ByteBuffer

import io.netty.channel.Channel
import org.mockito.Matchers._
import org.mockito.Mockito._

import org.apache.spark.SparkFunSuite
import org.apache.spark.network.client.{TransportClient, TransportResponseHandler}
import org.apache.spark.network.server.StreamManager
import org.apache.spark.rpc._

class NettyRpcHandlerSuite extends SparkFunSuite {

  val env = mock(classOf[NettyRpcEnv])
  val sm = mock(classOf[StreamManager])
  when(env.deserialize(any(classOf[TransportClient]), any(classOf[ByteBuffer]))(any()))
    .thenReturn(RequestMessage(RpcAddress("localhost", 12345), null, null))

  test("receive") {
    val dispatcher = mock(classOf[Dispatcher])
    val nettyRpcHandler = new NettyRpcHandler(dispatcher, env, sm)

    val channel = mock(classOf[Channel])
    val client = new TransportClient(channel, mock(classOf[TransportResponseHandler]))
    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelActive(client)

    verify(dispatcher, times(1)).postToAll(RemoteProcessConnected(RpcAddress("localhost", 40000)))
  }

  test("connectionTerminated") {
    val dispatcher = mock(classOf[Dispatcher])
    val nettyRpcHandler = new NettyRpcHandler(dispatcher, env, sm)

    val channel = mock(classOf[Channel])
    val client = new TransportClient(channel, mock(classOf[TransportResponseHandler]))
    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelActive(client)

    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelInactive(client)

    verify(dispatcher, times(1)).postToAll(RemoteProcessConnected(RpcAddress("localhost", 40000)))
    verify(dispatcher, times(1)).postToAll(
      RemoteProcessDisconnected(RpcAddress("localhost", 40000)))
  }

} 
Example 17
Source File: NettyFutureConverters.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal

import io.netty.channel.Channel
import io.netty.channel.ChannelFuture
import io.netty.channel.ChannelFutureListener
import io.netty.util.concurrent.GenericFutureListener
import io.netty.util.concurrent.{ Future => NettyFuture }

import scala.concurrent.Future
import scala.concurrent.Promise

object NettyFutureConverters {
  implicit class ToFuture[T](future: NettyFuture[T]) {
    def toScala: Future[T] = {
      val promise = Promise[T]()
      future.addListener(new GenericFutureListener[NettyFuture[T]] {
        def operationComplete(future: NettyFuture[T]) = {
          if (future.isSuccess) {
            promise.success(future.getNow)
          } else if (future.isCancelled) {
            promise.failure(new RuntimeException("Future cancelled"))
          } else {
            promise.failure(future.cause())
          }
        }
      })
      promise.future
    }
  }

  implicit class ChannelFutureToFuture(future: ChannelFuture) {
    def channelFutureToScala: Future[Channel] = {
      val promise = Promise[Channel]()
      future.addListener(new ChannelFutureListener {
        def operationComplete(future: ChannelFuture) = {
          if (future.isSuccess) {
            promise.success(future.channel())
          } else if (future.isCancelled) {
            promise.failure(new RuntimeException("Future cancelled"))
          } else {
            promise.failure(future.cause())
          }
        }
      })
      promise.future
    }
  }
} 
Example 18
Source File: NettyServer.scala    From lila-ws   with GNU Affero General Public License v3.0 5 votes vote down vote up
package lila.ws
package netty

import com.typesafe.config.Config
import com.typesafe.scalalogging.Logger
import io.netty.bootstrap.ServerBootstrap
import io.netty.channel.{ Channel, ChannelInitializer }
import io.netty.channel.epoll.{ EpollEventLoopGroup, EpollServerSocketChannel }
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio.NioServerSocketChannel
import io.netty.handler.codec.http._
import scala.concurrent.ExecutionContext

final class NettyServer(
    clients: ClientSystem,
    router: Router,
    config: Config
)(implicit ec: ExecutionContext) {

  private val logger = Logger(getClass)

  def start(): Unit = {

    logger.info("Start")

    val port     = config.getInt("http.port")
    val useEpoll = config.getBoolean("netty.useEpoll")

    val bossGroup =
      if (useEpoll) new EpollEventLoopGroup(1)
      else new NioEventLoopGroup(1)
    val workerGroup =
      if (useEpoll) new EpollEventLoopGroup
      else new NioEventLoopGroup

    val channelClz =
      if (useEpoll) classOf[EpollServerSocketChannel]
      else classOf[NioServerSocketChannel]

    try {
      val boot = new ServerBootstrap
      boot
        .group(bossGroup, workerGroup)
        .channel(channelClz)
        .childHandler(new ChannelInitializer[Channel] {
          override def initChannel(ch: Channel): Unit = {
            val pipeline = ch.pipeline()
            pipeline.addLast(new HttpServerCodec)
            pipeline.addLast(new HttpObjectAggregator(4096))
            pipeline.addLast(new ProtocolHandler(clients, router))
            pipeline.addLast(new FrameHandler)
          }
        })

      val server = boot.bind(port).sync().channel()

      logger.info(s"Listening to $port")

      server.closeFuture().sync()

      logger.info(s"Closed $port")
    } finally {
      bossGroup.shutdownGracefully()
      workerGroup.shutdownGracefully()
    }

  }
} 
Example 19
Source File: NettyRpcHandlerSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.rpc.netty

import java.net.InetSocketAddress
import java.nio.ByteBuffer

import io.netty.channel.Channel
import org.mockito.Matchers._
import org.mockito.Mockito._

import org.apache.spark.SparkFunSuite
import org.apache.spark.network.client.{TransportClient, TransportResponseHandler}
import org.apache.spark.network.server.StreamManager
import org.apache.spark.rpc._

class NettyRpcHandlerSuite extends SparkFunSuite {

  val env = mock(classOf[NettyRpcEnv])
  val sm = mock(classOf[StreamManager])
  when(env.deserialize(any(classOf[TransportClient]), any(classOf[ByteBuffer]))(any()))
    .thenReturn(RequestMessage(RpcAddress("localhost", 12345), null, null))

  test("receive") {
    val dispatcher = mock(classOf[Dispatcher])
    val nettyRpcHandler = new NettyRpcHandler(dispatcher, env, sm)

    val channel = mock(classOf[Channel])
    val client = new TransportClient(channel, mock(classOf[TransportResponseHandler]))
    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelActive(client)

    verify(dispatcher, times(1)).postToAll(RemoteProcessConnected(RpcAddress("localhost", 40000)))
  }

  test("connectionTerminated") {
    val dispatcher = mock(classOf[Dispatcher])
    val nettyRpcHandler = new NettyRpcHandler(dispatcher, env, sm)

    val channel = mock(classOf[Channel])
    val client = new TransportClient(channel, mock(classOf[TransportResponseHandler]))
    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelActive(client)

    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelInactive(client)

    verify(dispatcher, times(1)).postToAll(RemoteProcessConnected(RpcAddress("localhost", 40000)))
    verify(dispatcher, times(1)).postToAll(
      RemoteProcessDisconnected(RpcAddress("localhost", 40000)))
  }

}