From 489c6b6926dfdf036dc44f04b2974a186b98c439 Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Wed, 10 Feb 2021 09:34:27 -0500 Subject: [PATCH 01/23] temporarily mute scala 3 --- build.sbt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.sbt b/build.sbt index c96fe33..cc6635e 100644 --- a/build.sbt +++ b/build.sbt @@ -25,7 +25,7 @@ ThisBuild / organizationName := "Typelevel" ThisBuild / startYear := Some(2021) -ThisBuild / crossScalaVersions := Seq("2.12.12", "2.13.4", "3.0.0-M3") +ThisBuild / crossScalaVersions := Seq("2.12.12", "2.13.4") // "3.0.0-M3" temporarily removed to easier/speedier ThisBuild / githubWorkflowOSes ++= Seq("macos-latest", "windows-latest") From 0d92b83c989bdb6f6212c3587f3aca2e1674303b Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Sun, 21 Feb 2021 21:31:54 -0500 Subject: [PATCH 02/23] WIP: Example Http server with websockets --- .scalafmt.conf | 27 +++ .../fs2/netty/incudator/TcpNetwork.scala | 132 +++++++++++ .../scala/fs2/netty/incudator/TcpSocket.scala | 38 ++++ .../netty/incudator/TcpSocketHandler.scala | 70 ++++++ .../incudator/http/ExampleHttpServer.scala | 164 ++++++++++++++ .../incudator/http/HttpClientConnection.scala | 212 ++++++++++++++++++ .../http/HttpPipeliningBlockerHandler.scala | 56 +++++ .../fs2/netty/incudator/http/HttpServer.scala | 90 ++++++++ .../fs2/netty/incudator/http/WebSocket.scala | 25 +++ .../incudator/http/WebSocketConfig.scala | 76 +++++++ 10 files changed, 890 insertions(+) create mode 100644 .scalafmt.conf create mode 100644 core/src/main/scala/fs2/netty/incudator/TcpNetwork.scala create mode 100644 core/src/main/scala/fs2/netty/incudator/TcpSocket.scala create mode 100644 core/src/main/scala/fs2/netty/incudator/TcpSocketHandler.scala create mode 100644 core/src/main/scala/fs2/netty/incudator/http/ExampleHttpServer.scala create mode 100644 core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala create mode 100644 core/src/main/scala/fs2/netty/incudator/http/HttpPipeliningBlockerHandler.scala create mode 100644 core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala create mode 100644 core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala create mode 100644 core/src/main/scala/fs2/netty/incudator/http/WebSocketConfig.scala diff --git a/.scalafmt.conf b/.scalafmt.conf new file mode 100644 index 0000000..20029ea --- /dev/null +++ b/.scalafmt.conf @@ -0,0 +1,27 @@ +version="2.7.5" +align = none +align.openParenCallSite = false +align.openParenDefnSite = false +align.tokens = [] +assumeStandardLibraryStripMargin = false +binPack.parentConstructors = false +continuationIndent.callSite = 2 +continuationIndent.defnSite = 2 +danglingParentheses = true +docstrings = ScalaDoc +docstrings.blankFirstLine = yes +encoding = UTF-8 +importSelectors = singleLine +includeCurlyBraceInSelectChains = true +indentOperator = spray +lineEndings = unix +maxColumn = 80 +newlines.alwaysBeforeTopLevelStatements = true +newlines.sometimesBeforeColonInMethodReturnType = false +optIn.breakChainOnFirstMethodDot = true +rewrite.rules = [ + PreferCurlyFors +] +spaces { + inImportCurlyBraces = false +} diff --git a/core/src/main/scala/fs2/netty/incudator/TcpNetwork.scala b/core/src/main/scala/fs2/netty/incudator/TcpNetwork.scala new file mode 100644 index 0000000..bfe053e --- /dev/null +++ b/core/src/main/scala/fs2/netty/incudator/TcpNetwork.scala @@ -0,0 +1,132 @@ +/* + * Copyright 2021 Typelevel + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package fs2.netty.incudator + +import cats.effect.std.{Dispatcher, Queue} +import cats.effect.{Async, Resource, Sync} +import cats.syntax.all._ +import com.comcast.ip4s.{Host, IpAddress, Port, SocketAddress} +import fs2.Stream +import fs2.netty.{ChannelOption, fromNettyFuture} +import io.netty.bootstrap.ServerBootstrap +import io.netty.channel.nio.NioEventLoopGroup +import io.netty.channel.socket.SocketChannel +import io.netty.channel.socket.nio.NioServerSocketChannel +import io.netty.channel.{ChannelHandler, ChannelInitializer, EventLoopGroup, ServerChannel, ChannelOption => JChannelOption} + +import java.net.InetSocketAddress + +final class TcpNetwork[F[_]: Async] private ( + parent: EventLoopGroup, // TODO: custom value class? + child: EventLoopGroup, + // clientChannelClazz: Class[_ <: Channel], + serverChannelClazz: Class[_ <: ServerChannel] +) { + + def server( + host: Option[Host], + port: Option[Port], + options: List[ChannelOption] = Nil + ): Resource[ + F, + (SocketAddress[IpAddress], Stream[F, TcpSocket[F, Byte, Byte, Nothing]]) + ] = server(host, port, options, Nil) + + def server[I, O, U]( + host: Option[Host], + port: Option[Port], + options: List[ChannelOption] = Nil, + handlers: List[ChannelHandler] = + Nil // TODO: Atm completely unsafe, but will fix + ): Resource[ + F, + (SocketAddress[IpAddress], Stream[F, TcpSocket[F, I, O, U]]) + ] = { + Dispatcher[F].flatMap { dispatcher => + Resource.suspend { + for { + tcpServerConnections <- Queue.unbounded[F, TcpSocket[F, I, O, U]] + + resolved <- host.traverse(_.resolve[F]) + + bootstrap <- Sync[F].delay { + val bootstrap = new ServerBootstrap + bootstrap + .group(parent, child) + .option( + JChannelOption.AUTO_READ.asInstanceOf[JChannelOption[Any]], + false + ) // backpressure accepting connections, not reads on any individual connection + .channel(serverChannelClazz) + .childHandler(new ChannelInitializer[SocketChannel] { + override def initChannel(ch: SocketChannel): Unit = { + handlers.foreach(ch.pipeline().addLast(_)) + // TODO: ... + dispatcher.unsafeRunAndForget { + TcpSocketHandler[F, I, O, U](ch) + .flatTap(nb => Sync[F].delay(ch.pipeline().addLast(nb))) + .flatMap(tcpServerConnections.offer) + } + } + }) + // .childOption() // TODO: what child opts are there? Anything useful to communicate to injected Netty pipeline can be done through attrs... + // TODO: log `bootstrap.config()` as info or debug or trace? + options.foreach(opt => bootstrap.option(opt.key, opt.value)) + bootstrap + } + + // TODO: is the right name? Bind uses the parent ELG that calla TCP accept which yields a connection to child ELG? + tcpAcceptChannel = Sync[F] defer { + val cf = bootstrap.bind( + resolved.map(_.toInetAddress).orNull, + port.map(_.value).getOrElse(0) + ) + fromNettyFuture[F](cf.pure[F]).as(cf.channel()) + } + } yield { + Resource + .make(tcpAcceptChannel) { ch => + fromNettyFuture[F](Sync[F].delay(ch.close())).void + } + .evalMap { ch => + Sync[F] + .delay( + SocketAddress.fromInetSocketAddress( + ch.localAddress().asInstanceOf[InetSocketAddress] + ) + ) + .tupleRight( + Stream.repeatEval( + Sync[F].delay(ch.read()) *> tcpServerConnections.take + ) + ) + } + } + } + } + } +} + +object TcpNetwork { + + def apply[F: Async](): TcpNetwork[F] = new TcpNetwork[F]( + parent = new NioEventLoopGroup(1), + child = new NioEventLoopGroup(), + classOf[NioServerSocketChannel] + ) + +} diff --git a/core/src/main/scala/fs2/netty/incudator/TcpSocket.scala b/core/src/main/scala/fs2/netty/incudator/TcpSocket.scala new file mode 100644 index 0000000..8e9b6c4 --- /dev/null +++ b/core/src/main/scala/fs2/netty/incudator/TcpSocket.scala @@ -0,0 +1,38 @@ +/* + * Copyright 2021 Typelevel + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package fs2 +package netty.incudator + +import io.netty.channel.ChannelPipeline + +trait TcpSocket[F[_], I, O, U] { + + def reads: Stream[F, I] + + def write(output: O): F[Unit] + + def writes: Pipe[F, I, INothing] + + def events: Stream[F, U] + +// def close: F[Unit] + + // TODO: mutator should be ChannelPipeline => F[Unit], but getting import collisions between FS2 stream and Sync in HttpServerConnection + def mutatePipeline[I2, O2, U2]( + mutator: ChannelPipeline => Unit + ): F[TcpSocket[F, I2, O2, U2]] +} diff --git a/core/src/main/scala/fs2/netty/incudator/TcpSocketHandler.scala b/core/src/main/scala/fs2/netty/incudator/TcpSocketHandler.scala new file mode 100644 index 0000000..6588251 --- /dev/null +++ b/core/src/main/scala/fs2/netty/incudator/TcpSocketHandler.scala @@ -0,0 +1,70 @@ +package fs2 +package netty.incudator + +import cats.effect.Sync +import cats.effect.kernel.Async +import cats.effect.std.Queue +import cats.syntax.all._ +import fs2.netty.fromNettyFuture +import io.netty.channel.socket.SocketChannel +import io.netty.channel.{ChannelHandlerContext, ChannelInboundHandlerAdapter, ChannelPipeline} +import io.netty.util.ReferenceCountUtil + +// Netty handler that acts as a bridge from Netty to FS2 +private class TcpSocketHandler[F[_]: Async, I, O, U]( + queue: Queue[F, I], + channel: SocketChannel +) extends ChannelInboundHandlerAdapter + with TcpSocket[F, I, O, U] { + + override lazy val reads: Stream[F, I] = ??? + + // We should enforce a decoder handler exists in the pipeline for the output object, but probably won't be able + // to with Netty. Instead might require/recommend that pipelines passed into TCP Network server pass tests + // with form ByteBuf -> I. Or use a client, O -> ByteBuf, then pass to server, ByteBuf -> I, where I == O, to + // enforce the correctness property. + // A hacky way to check is if `ByteToMessageDecoder` or similar Byte based Netty handlers are in the pipeline. + + // Should send from end of pipeline as output object maybe be transformed through the pipeline. + override def write(output: O): F[Unit] = fromNettyFuture[F]( + Sync[F].delay(channel.pipeline().writeAndFlush(output)) + ).void + + override def channelRead(ctx: ChannelHandlerContext, msg: Any): Unit = + msg match { + case i: I => + queue.offer( + ReferenceCountUtil.touch( + i, + "This is the last point FS2-Netty touches the Reference Counted Object" + ) + ) // Perhaps once we have good tests, wrapped in resoruce type, and guratentees then we can remove the touch. + + case _ => + ReferenceCountUtil.safeRelease( + msg + ) // Netty logs if release fails, but perhaps we want to catch error and do custom logging/reporting/handling + } + + override def writes: Pipe[F, I, INothing] = ??? + + override def events: Stream[F, U] = ??? + + override def mutatePipeline[I2, O2, U2]( + mutator: ChannelPipeline => Unit + ): F[TcpSocket[F, I2, O2, U2]] = + Sync[F] + .delay(mutator(channel.pipeline())) + .flatMap(_ => TcpSocketHandler[F, I2, O2, U2](channel)) + .map( + identity + ) // TODO: why cannot compiler infer TcpSocketHandler in flatMap? +} + +object TcpSocketHandler { + + def apply[F[_]: Async, I, O, U]( + channel: SocketChannel + ): F[TcpSocketHandler[F, I, O, U]] = + Queue.unbounded[F, I].map(q => new TcpSocketHandler[F, I, O, U](q, channel)) +} diff --git a/core/src/main/scala/fs2/netty/incudator/http/ExampleHttpServer.scala b/core/src/main/scala/fs2/netty/incudator/http/ExampleHttpServer.scala new file mode 100644 index 0000000..52d34fc --- /dev/null +++ b/core/src/main/scala/fs2/netty/incudator/http/ExampleHttpServer.scala @@ -0,0 +1,164 @@ +package fs2.netty.incudator.http + +import cats.data.Kleisli +import cats.effect.{ExitCode, IO, IOApp} +import fs2.netty.incudator.http.HttpClientConnection.WebSocketResponse +import io.netty.handler.codec.http._ +import io.netty.handler.codec.http.websocketx._ + +import scala.concurrent.duration._ + +object ExampleHttpServer extends IOApp { + + private[this] val HttpRouter = + Kleisli[IO, FullHttpRequest, FullHttpResponse] { request => + if (request.uri() == "/health_check") + IO { + new DefaultFullHttpResponse( + HttpVersion.HTTP_1_1, + HttpResponseStatus.OK + ) + } + else if (request.uri() == "/echo") + IO { + new DefaultFullHttpResponse( + HttpVersion.HTTP_1_1, + HttpResponseStatus.OK, + request.content() // echo back body + ) + } + else + IO { + new DefaultFullHttpResponse( + HttpVersion.HTTP_1_1, + HttpResponseStatus.NOT_FOUND + ) + } + } + + private[this] val ChatRooms = + scala.collection.mutable.Map.empty[String, List[WebSocket[IO, Nothing]]] + + private[this] val GenericWebSocketConfig = WebSocketConfig( + maxFramePayloadLength = 65536, + allowExtensions = false, + subProtocols = List.empty[String], + utf8FrameValidation = true + ) + + private[this] val WebSocketRouter = + Kleisli[IO, FullHttpRequest, WebSocketResponse[IO]] { request => + if (request.uri() == "/dev/null") + IO { + WebSocketResponse.SwitchToWebSocketProtocol[IO]( + GenericWebSocketConfig, + { + case Left(handshakeError: WebSocketHandshakeException) => + IO.unit + + case Left(error) => + IO.unit + + case Right((handshakeComplete, wsConn)) => + wsConn.reads.compile.drain + } + ) + } + else if (request.uri() == "/echo") + IO { + WebSocketResponse.SwitchToWebSocketProtocol[IO]( + GenericWebSocketConfig, + { + case Left(handshakeError: WebSocketHandshakeException) => + IO.unit + + case Left(error) => + IO.unit + + case Right((handshakeComplete, wsConn)) => + wsConn.reads + .evalMap { // TODO: is switchMap cleaner? + case frame: PingWebSocketFrame => + wsConn.write(new PongWebSocketFrame(frame.content())) + + case _: PongWebSocketFrame => + IO.unit + + case frame: TextWebSocketFrame => + wsConn.write(frame) + + case frame: CloseWebSocketFrame => + wsConn.write(frame) + + case frame: BinaryWebSocketFrame => + wsConn.write(frame) + + case _: ContinuationWebSocketFrame => + IO.unit + } + .attempt + .compile + .drain + } + ) + } + else if (request.uri() == "/chat") + IO { + WebSocketResponse.SwitchToWebSocketProtocol[IO]( + GenericWebSocketConfig, + { + case Left(handshakeError: WebSocketHandshakeException) => + IO.unit + + case Left(error) => + IO.unit + + case Right((handshakeComplete, webSocket)) => + for { + roomId <- IO( + handshakeComplete + .requestUri() + .split("//?") + .last + .split("=") + .last + ) // e.g. /chat?roomId=123abc + + _ <- IO(ChatRooms.updateWith(roomId) { + case Some(connections) => + Some(webSocket :: connections) + case None => + Some(List(webSocket)) + }) + + // TODO: broadcast reads to all connections in a chat room + } yield () + } + ) + } + else + IO( + WebSocketResponse + .`4xx`[IO](404, body = None, EmptyHttpHeaders.INSTANCE) + ) + } + + override def run(args: List[String]): IO[ExitCode] = + HttpServer + .start[IO]( + HttpServer.HttpConfigs( + requestTimeoutPeriod = 500.milliseconds, + HttpServer.HttpConfigs.Parsing.default + ) + ) + .evalMap { httpClientConnections => + httpClientConnections + .map(_.successfullyDecodedReads(HttpRouter, WebSocketRouter)) + .parJoin(65536) + .compile + .drain + } + .useForever + .as(ExitCode.Success) + +} diff --git a/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala b/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala new file mode 100644 index 0000000..2557c84 --- /dev/null +++ b/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala @@ -0,0 +1,212 @@ +package fs2.netty.incudator.http + +import cats.Applicative +import cats.data.Kleisli +import cats.effect.GenConcurrent +import cats.syntax.all._ +import fs2.Stream +import fs2.netty.incudator.TcpSocket +import fs2.netty.incudator.http.HttpClientConnection._ +import io.netty.buffer.Unpooled +import io.netty.channel.ChannelHandlerContext +import io.netty.handler.codec.TooLongFrameException +import io.netty.handler.codec.http._ +import io.netty.handler.codec.http.websocketx.WebSocketServerProtocolHandler.HandshakeComplete +import io.netty.handler.codec.http.websocketx.{WebSocketFrame, WebSocketServerProtocolHandler} + +// TODO: this is just a fancy function over Socket, so maybe just make this an object and a function? +// U could be io.netty.handler.timeout.IdleStateEvent if we wanted to handle connection closure, but in this +// context we want to close the channel anyway and just be notified why it was closed. However, we should likely +// send HttpResponseStatus.REQUEST_TIMEOUT for cleaner close. So change U type and handle at FS2 layer. +class HttpClientConnection[F[_]]( + tcpServerConnection: TcpSocket[ + F, + FullHttpRequest, + FullHttpResponse, + Nothing + ] +)(implicit genCon: GenConcurrent[F, Throwable]) { + + // TODO: Why does `Sync[F].delay(...).flatMap(...)` & `Stream.flatMap(...)` have a method collision when `import cats.syntax.all._` + + def successfullyDecodedReads( + httpRouter: Kleisli[F, FullHttpRequest, FullHttpResponse], + webSocketRouter: Kleisli[F, FullHttpRequest, WebSocketResponse[F]] + ): Stream[F, Unit] = + tcpServerConnection.reads + .evalMap { request => + if (request.decoderResult().isFailure) + createResponseForDecodeError(request.decoderResult().cause()) + .flatMap(tcpServerConnection.write) + else if (isWebSocketRequest(request)) + transitionToWebSocketsOrRespond( + webSocketRouter, + request + ) + else + httpRouter(request).flatMap(tcpServerConnection.write) + } + + private def createResponseForDecodeError( + cause: Throwable + ): F[DefaultFullHttpResponse] = + Applicative[F].pure { + cause match { + case ex: TooLongFrameException if isTooLongHeaderException(ex) => + val resp = new DefaultFullHttpResponse( + HttpVersion.HTTP_1_1, + HttpResponseStatus.REQUEST_HEADER_FIELDS_TOO_LARGE + ) + HttpUtil.setKeepAlive(resp, true) + resp + + case ex: TooLongFrameException if isTooLongInitialLineException(ex) => + new DefaultFullHttpResponse( + HttpVersion.HTTP_1_1, + HttpResponseStatus.REQUEST_URI_TOO_LONG + ) + // Netty will close connection here + + // TODO: HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE + case _ => + val resp = new DefaultFullHttpResponse( + HttpVersion.HTTP_1_1, + HttpResponseStatus.INTERNAL_SERVER_ERROR + ) + HttpUtil.setKeepAlive(resp, false) + resp + } + } + + private def transitionToWebSocketsOrRespond( + webSocketRouter: Kleisli[F, FullHttpRequest, WebSocketResponse[F]], + request: FullHttpRequest + ) = + webSocketRouter(request).flatMap { + case WebSocketResponse.SwitchToWebSocketProtocol( + wsConfigs, + cb + ) => + tcpServerConnection + .mutatePipeline[WebSocketFrame, WebSocketFrame, HandshakeComplete] { + pipeline => + // TODO: FS2-Netty should re-add itself back as last handler, perhaps it 1st removes itself then re-adds. + // We'll also remove this handler after handshake, so might be better to manually add + // WebSocketServerProtocolHandshakeHandler and Utf8FrameValidator since almost none of the other logic from + // WebSocketServerProtocolHandler will be needed. Maybe just the logic around close frame should be ported over. + val handler = + new WebSocketServerProtocolHandler(wsConfigs.toNetty) { + /* + Default `exceptionCaught` of `WebSocketServerProtocolHandler` returns a 400 w/o any headers like `Content-length`. + Let higher layer handler this. Catch WebSocketHandshakeException + */ + override def exceptionCaught( + ctx: ChannelHandlerContext, + cause: Throwable + ): Unit = ctx.fireExceptionCaught(cause) + } + pipeline.addLast(handler) + handler.channelRead(pipeline.context(handler), request) + } + .flatMap { connection => + connection.events + .find(_ => true) // only take 1st + .evalTap(handshakeComplete => + connection + .mutatePipeline[WebSocketFrame, WebSocketFrame, Nothing](_ => + () + ) + .map(wsConn => + cb( + ( + handshakeComplete, + new WebSocket[F, Nothing](underlying = wsConn) + ).asRight[Throwable] + ) + ) + ) + .compile + .drain + } + .onError { case e => + cb(e.asLeft[(HandshakeComplete, WebSocket[F, Nothing])]) + } + .void + + case WebSocketResponse.`3xx`(code, body, headers) => + wsResponse(code, body, headers).flatMap(tcpServerConnection.write) + + case WebSocketResponse.`4xx`(code, body, headers) => + wsResponse(code, body, headers).flatMap(tcpServerConnection.write) + + case WebSocketResponse.`5xx`(code, body, headers) => + wsResponse(code, body, headers).flatMap(tcpServerConnection.write) + } + + private def wsResponse( + code: Int, + body: Option[String], + headers: HttpHeaders + ): F[FullHttpResponse] = + Applicative[F].pure( + new DefaultFullHttpResponse( + HttpVersion.HTTP_1_1, + HttpResponseStatus.valueOf(code), + body.fold(Unpooled.EMPTY_BUFFER)(s => + Unpooled.wrappedBuffer(s.getBytes()) + ), + headers, + EmptyHttpHeaders.INSTANCE + ) + ) +} + +object HttpClientConnection { + + private def isWebSocketRequest(request: FullHttpRequest): Boolean = { + // this is the minimum that Netty checks + request.method() == HttpMethod.GET && request + .headers() + .contains(HttpHeaderNames.SEC_WEBSOCKET_KEY) + } + + private def isTooLongHeaderException(cause: TooLongFrameException) = + cause.getMessage.contains("header") + + private def isTooLongInitialLineException(cause: TooLongFrameException) = + cause.getMessage.contains("line") + + sealed abstract class WebSocketResponse[F[_]] + + object WebSocketResponse { + + // One of throwable could be WebSocketHandshakeException + final case class SwitchToWebSocketProtocol[F[_]]( + wsConfigs: WebSocketConfig, + cb: Either[Throwable, (HandshakeComplete, WebSocket[F, Nothing])] => F[ + Unit + ] + ) extends WebSocketResponse[F] + + // TODO: refined types for code would be nice + final case class `3xx`[F[_]]( + code: Int, + body: Option[String], + headers: HttpHeaders + ) extends WebSocketResponse[F] + + final case class `4xx`[F[_]]( + code: Int, + body: Option[String], + headers: HttpHeaders + ) extends WebSocketResponse[F] + + final case class `5xx`[F[_]]( + code: Int, + body: Option[String], + headers: HttpHeaders + ) extends WebSocketResponse[F] + + } + +} diff --git a/core/src/main/scala/fs2/netty/incudator/http/HttpPipeliningBlockerHandler.scala b/core/src/main/scala/fs2/netty/incudator/http/HttpPipeliningBlockerHandler.scala new file mode 100644 index 0000000..adce4f7 --- /dev/null +++ b/core/src/main/scala/fs2/netty/incudator/http/HttpPipeliningBlockerHandler.scala @@ -0,0 +1,56 @@ +package fs2.netty.incudator.http + +import io.netty.channel.{ChannelDuplexHandler, ChannelHandlerContext, ChannelPromise} +import io.netty.handler.codec.http.{DefaultFullHttpResponse, FullHttpRequest, FullHttpResponse, HttpResponseStatus, HttpUtil, HttpVersion} +import io.netty.util.ReferenceCountUtil + +class HttpPipeliningBlockerHandler extends ChannelDuplexHandler { + + private var clientAttemptingHttpPipelining = false + private var isHttpRequestInFlight = false + + override def channelRead(ctx: ChannelHandlerContext, msg: Any): Unit = + msg match { + case request: FullHttpRequest => + if (!isHttpRequestInFlight) { + isHttpRequestInFlight = true + super.channelRead(ctx, msg) + } else { + /* + Stop reading since we're going to close channel + */ + ctx.channel().config().setAutoRead(false) // TODO: remove this now? + ReferenceCountUtil.release(request) + clientAttemptingHttpPipelining = true + } + + case _ => + super.channelRead(ctx, msg) + } + + override def write( + ctx: ChannelHandlerContext, + msg: Any, + promise: ChannelPromise + ): Unit = { + msg match { + case _: FullHttpResponse => + super.write(ctx, msg, promise) + isHttpRequestInFlight = false + if (clientAttemptingHttpPipelining) { + // TODO: at some point, this can be made more robust to check if 1st response was sent. + // Perhaps channel is closed. In which case, don't need to send. + val response = new DefaultFullHttpResponse( + HttpVersion.HTTP_1_1, + HttpResponseStatus.TOO_MANY_REQUESTS + ) + HttpUtil.setKeepAlive(response, false) + HttpUtil.setContentLength(response, 0) + ctx.writeAndFlush(response) + } + + case _ => + super.write(ctx, msg, promise) + } + } +} diff --git a/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala b/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala new file mode 100644 index 0000000..76aa82a --- /dev/null +++ b/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala @@ -0,0 +1,90 @@ +package fs2.netty.incudator.http + +import cats.effect.kernel.Async +import cats.effect.{GenConcurrent, Resource} +import fs2.Stream +import fs2.netty.incudator.TcpNetwork +import io.netty.channel.{ChannelDuplexHandler, ChannelHandlerContext, ChannelPromise} +import io.netty.handler.codec.http._ +import io.netty.handler.timeout.ReadTimeoutHandler +import io.netty.util.ReferenceCountUtil + +import scala.concurrent.duration.FiniteDuration + +object HttpServer { + + def start[F[_]: Async](httpConfigs: HttpConfigs)(implicit + genCon: GenConcurrent[F, Throwable] + ): Resource[F, Stream[F, HttpClientConnection[F]]] = + TcpNetwork() + .server[FullHttpRequest, FullHttpResponse, Nothing]( + host = None, + port = None, + options = Nil, + handlers = List( + new HttpServerCodec( + httpConfigs.parsing.maxInitialLineLength, + httpConfigs.parsing.maxHeaderSize, + httpConfigs.parsing.maxChunkSize + ), + new HttpServerKeepAliveHandler, + new HttpObjectAggregator( + httpConfigs.parsing.maxHttpContentLength + ), + new ReadTimeoutHandler( // TODO: this also closes channel when exception is fired, should HttpClientConnection just handle that Idle Events? + httpConfigs.requestTimeoutPeriod.length, + httpConfigs.requestTimeoutPeriod.unit + ) + // new HttpPipeliningBlockerHandler + ) + ) + .map(_._2) + .map(_.map(new HttpClientConnection[F](_))) + + /** + * @param requestTimeoutPeriod - limit on how long connection can remain open w/o any requests + */ + final case class HttpConfigs( + requestTimeoutPeriod: FiniteDuration, + parsing: HttpConfigs.Parsing + ) + + // TODO: what about `Int Refined NonNegative` or validated or custom value types? + object HttpConfigs { + + /** + * @param maxHttpContentLength - limit on body/entity size + * @param maxInitialLineLength - limit on how long url can be, along with HTTP preamble, i.e. "GET HTTP 1.1 ..." + * @param maxHeaderSize - limit on size of single header + */ + final case class Parsing( + maxHttpContentLength: Int, + maxInitialLineLength: Int, + maxHeaderSize: Int + ) { + def maxChunkSize: Int = Parsing.DefaultMaxChunkSize + } + + object Parsing { + + private val DefaultMaxChunkSize: Int = + 8192 // Netty default + + val DefaultMaxHttpContentLength: Int = + 65536 // Netty default + + val DefaultMaxInitialLineLength: Int = + 4096 // Netty default + + val DefaultMaxHeaderSize: Int = 8192 // Netty default + + val default: Parsing = Parsing( + DefaultMaxHttpContentLength, + DefaultMaxInitialLineLength, + DefaultMaxHeaderSize + ) + } + + } + +} diff --git a/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala b/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala new file mode 100644 index 0000000..c1ee424 --- /dev/null +++ b/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala @@ -0,0 +1,25 @@ +package fs2.netty.incudator.http + +import fs2.netty.incudator.TcpSocket +import fs2.{INothing, Pipe, Stream} +import io.netty.channel.ChannelPipeline +import io.netty.handler.codec.http.websocketx.WebSocketFrame + +class WebSocket[F[_], U]( + underlying: TcpSocket[F, WebSocketFrame, WebSocketFrame, Nothing] +) extends TcpSocket[F, WebSocketFrame, WebSocketFrame, U] { + override def reads: Stream[F, WebSocketFrame] = underlying.reads + + // TODO: this will be aware of close frames + override def write(output: WebSocketFrame): F[Unit] = + underlying.write(output) + + override def writes: Pipe[F, WebSocketFrame, INothing] = underlying.writes + + override def events: Stream[F, Nothing] = underlying.events + + override def mutatePipeline[I2, O2, U2]( + mutator: ChannelPipeline => Unit + ): F[TcpSocket[F, I2, O2, U2]] = + underlying.mutatePipeline(mutator) +} diff --git a/core/src/main/scala/fs2/netty/incudator/http/WebSocketConfig.scala b/core/src/main/scala/fs2/netty/incudator/http/WebSocketConfig.scala new file mode 100644 index 0000000..c96f6ca --- /dev/null +++ b/core/src/main/scala/fs2/netty/incudator/http/WebSocketConfig.scala @@ -0,0 +1,76 @@ +package fs2.netty.incudator.http + +import fs2.netty.incudator.http.WebSocketConfig.DisableTimeout +import io.netty.handler.codec.http.websocketx.{WebSocketCloseStatus, WebSocketDecoderConfig, WebSocketServerProtocolConfig} + + +/** + * + * @param maxFramePayloadLength - limit on payload length from Text and Binary Frames + * @param allowExtensions - WS extensions like those for compression + * @param subProtocols - optional subprotocols to negotiate + * @param utf8FrameValidation - optionally validate text frames' payloads are utf8 + */ +final case class WebSocketConfig( + maxFramePayloadLength: Int, + allowExtensions: Boolean, + subProtocols: List[String], + utf8FrameValidation: Boolean + ) { + + def toNetty: WebSocketServerProtocolConfig = + WebSocketServerProtocolConfig + .newBuilder() + + // Match all paths, let application filter requests. + .websocketPath("/") + .checkStartsWith(true) + .subprotocols(subProtocolsCsv) + + // Application will handle timeouts for WS Handshake request. Set this far into the future b/c Netty doesn't + // not allow non-positive values in configs. + .handshakeTimeoutMillis(200000L) // 200 sec + + // Application will handle all inbound close frames, this flag tells Netty to handle them + .handleCloseFrames(false) + + // Application will handle all Control Frames + .dropPongFrames(false) + + // Netty's WebSocketCloseFrameHandler ensures Close Frames are sent on close (if they weren't sent before) + // and closes always send a Close Frame. + // It also checks that no new messages are sent after Close Frame is sent, throwing a ClosedChannelException. + // It would be nice to set it as INTERNAL_SERVER_ERROR since applications should handle closes, but b/c + // of a weird bug in Netty, this is triggered when UTF8 validation fails, so setting it to INVALID_PAYLOAD_DATA. + .sendCloseFrame(WebSocketCloseStatus.INVALID_PAYLOAD_DATA) + + // Netty can check that Close Frame has been sent in some time period, but we don't need this option because + // application should close channel immediately after each close + .forceCloseTimeoutMillis(DisableTimeout) + + .decoderConfig( + WebSocketDecoderConfig + .newBuilder() + .maxFramePayloadLength(maxFramePayloadLength) + + // Server's must set this to true + .expectMaskedFrames(true) + + // Allows to loosen the masking requirement on received frames. Should NOT be set. + .allowMaskMismatch(false) + .allowExtensions(allowExtensions) + .closeOnProtocolViolation(true) + .withUTF8Validator(utf8FrameValidation) + .build() + ) + .build() + + private def subProtocolsCsv = subProtocols match { + case list => list.mkString(", ") + case Nil => null + } +} + +object WebSocketConfig { + private val DisableTimeout = 0L +} \ No newline at end of file From 4d150287f94eb55b9ef2a88c93bc52149573b827 Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Mon, 22 Feb 2021 15:34:51 -0500 Subject: [PATCH 03/23] Make socket generic;compile project --- .../fs2/netty/benchmarks/echo/Fs2Netty.scala | 2 +- core/src/main/scala/fs2/netty/Network.scala | 144 ++++++++++++----- core/src/main/scala/fs2/netty/Socket.scala | 15 +- .../main/scala/fs2/netty/SocketHandler.scala | 148 ++++++++++-------- .../fs2/netty/incudator/TcpNetwork.scala | 132 ---------------- .../scala/fs2/netty/incudator/TcpSocket.scala | 38 ----- .../netty/incudator/TcpSocketHandler.scala | 70 --------- .../incudator/http/ExampleHttpServer.scala | 16 ++ .../incudator/http/HttpClientConnection.scala | 113 ++++++++----- .../http/HttpPipeliningBlockerHandler.scala | 16 ++ .../fs2/netty/incudator/http/HttpServer.scala | 76 +++++---- .../fs2/netty/incudator/http/WebSocket.scala | 32 +++- .../incudator/http/WebSocketConfig.scala | 16 ++ 13 files changed, 394 insertions(+), 424 deletions(-) delete mode 100644 core/src/main/scala/fs2/netty/incudator/TcpNetwork.scala delete mode 100644 core/src/main/scala/fs2/netty/incudator/TcpSocket.scala delete mode 100644 core/src/main/scala/fs2/netty/incudator/TcpSocketHandler.scala diff --git a/benchmarks/src/main/scala/fs2/netty/benchmarks/echo/Fs2Netty.scala b/benchmarks/src/main/scala/fs2/netty/benchmarks/echo/Fs2Netty.scala index 4ac553b..d1ba27b 100644 --- a/benchmarks/src/main/scala/fs2/netty/benchmarks/echo/Fs2Netty.scala +++ b/benchmarks/src/main/scala/fs2/netty/benchmarks/echo/Fs2Netty.scala @@ -29,7 +29,7 @@ object Fs2Netty extends IOApp { val port = Port(args(1).toInt).get val rsrc = Network[IO] flatMap { net => - val handlers = net.server(host, port) map { client => + val handlers = net.server(host, port, options = Nil) map { client => client.reads.through(client.writes).attempt.void } diff --git a/core/src/main/scala/fs2/netty/Network.scala b/core/src/main/scala/fs2/netty/Network.scala index 43e1364..d22534e 100644 --- a/core/src/main/scala/fs2/netty/Network.scala +++ b/core/src/main/scala/fs2/netty/Network.scala @@ -17,22 +17,22 @@ package fs2 package netty +import cats.data.NonEmptyList import cats.effect.{Async, Concurrent, Resource, Sync} import cats.effect.std.{Dispatcher, Queue} import cats.syntax.all._ - import com.comcast.ip4s.{Host, IpAddress, Port, SocketAddress} - import io.netty.bootstrap.{Bootstrap, ServerBootstrap} -import io.netty.channel.{Channel, ChannelInitializer, ChannelOption => JChannelOption, EventLoopGroup, ServerChannel} +import io.netty.channel.{Channel, ChannelHandler, ChannelInitializer, EventLoopGroup, ServerChannel, ChannelOption => JChannelOption} import io.netty.channel.socket.SocketChannel import java.net.InetSocketAddress import java.util.concurrent.ThreadFactory import java.util.concurrent.atomic.AtomicInteger +// TODO: Do we need to distinguish between TCP (connection based network) and UDP (connection-less network)? final class Network[F[_]: Async] private ( - parent: EventLoopGroup, + parent: EventLoopGroup, // TODO: custom value class? child: EventLoopGroup, clientChannelClazz: Class[_ <: Channel], serverChannelClazz: Class[_ <: ServerChannel]) { @@ -40,10 +40,10 @@ final class Network[F[_]: Async] private ( def client( addr: SocketAddress[Host], options: List[ChannelOption] = Nil) - : Resource[F, Socket[F]] = + : Resource[F, Socket[F, Byte, Byte, Nothing]] = Dispatcher[F] flatMap { disp => Resource suspend { - Concurrent[F].deferred[Socket[F]] flatMap { d => + Concurrent[F].deferred[Socket[F, Byte, Byte, Nothing]] flatMap { d => addr.host.resolve[F] flatMap { resolved => Sync[F] delay { val bootstrap = new Bootstrap @@ -66,55 +66,117 @@ final class Network[F[_]: Async] private ( } } + //TODO: Add back default args for opts, removed to fix compilation error for overloaded method def server( host: Option[Host], port: Port, - options: List[ChannelOption] = Nil) - : Stream[F, Socket[F]] = + options: List[ChannelOption]) + : Stream[F, Socket[F, Byte, Byte, Nothing]] = Stream.resource(serverResource(host, Some(port), options)).flatMap(_._2) + def server[I, O, E]( + host: Option[Host], + port: Port, + handlers: NonEmptyList[ChannelHandler], + options: List[ChannelOption]) + : Stream[F, Socket[F, I, O, E]] = + Stream.resource(serverResource[I, O, E](host, Some(port),handlers, options)).flatMap(_._2) + def serverResource( host: Option[Host], port: Option[Port], - options: List[ChannelOption] = Nil) - : Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F]])] = - Dispatcher[F] flatMap { disp => - Resource suspend { - Queue.unbounded[F, Socket[F]] flatMap { sockets => - host.traverse(_.resolve[F]) flatMap { resolved => - Sync[F] delay { - val bootstrap = new ServerBootstrap - bootstrap.group(parent, child) - .option(JChannelOption.AUTO_READ.asInstanceOf[JChannelOption[Any]], false) // backpressure - .channel(serverChannelClazz) - .childHandler(initializer(disp)(sockets.offer)) - - options.foreach(opt => bootstrap.option(opt.key, opt.value)) - - val connectChannel = Sync[F] defer { - val cf = bootstrap.bind( - resolved.map(_.toInetAddress).orNull, - port.map(_.value).getOrElse(0)) - fromNettyFuture[F](cf.pure[F]).as(cf.channel()) - } + options: List[ChannelOption]) + : Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, Byte, Byte, Nothing]])] = + serverResource(host, port, handlers = Nil,options) + + def serverResource[I, O, E]( + host: Option[Host], + port: Option[Port], + handlers: NonEmptyList[ChannelHandler], + options: List[ChannelOption] + ): Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, I, O, E]])] = + serverResource(host, port, handlers.toList, options) + + private def serverResource[I, O, E]( + host: Option[Host], + port: Option[Port], + handlers: List[ChannelHandler], + options: List[ChannelOption] + ): Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, I, O, E]])] = + for { + dispatcher <- Dispatcher[F] + + res <- Resource suspend { + for { + clientConnections <- Queue.unbounded[F, Socket[F, I, O, E]] + + resolvedHost <- host.traverse(_.resolve[F]) + + bootstrap <- Sync[F] delay { + val bootstrap = new ServerBootstrap + bootstrap + .group(parent, child) + .option( + JChannelOption.AUTO_READ.asInstanceOf[JChannelOption[Any]], + false + ) // backpressure for accepting connections, not reads on any individual connection + //.childOption() TODO: Any useful ones? + .channel(serverChannelClazz) + .childHandler(new ChannelInitializer[SocketChannel] { + override def initChannel(ch: SocketChannel): Unit = { + val p = ch.pipeline() + ch.config().setAutoRead(false) + + handlers.foldLeft(p)((pipeline, handler) => + pipeline.addLast(handler) + ) + // TODO: read up on CE3 Dispatcher, how is it different than Context Switch? Is this taking place async? + dispatcher.unsafeRunAndForget { + SocketHandler[F, I, O, E](dispatcher, ch) + .flatTap(h => Sync[F].delay(p.addLast(h))) + .flatMap(clientConnections.offer) + } + } + }) + + options.foreach(opt => bootstrap.option(opt.key, opt.value)) + bootstrap + } - val connection = Resource.make(connectChannel) { ch => - fromNettyFuture[F](Sync[F].delay(ch.close())).void - } + // TODO: Log properly as info, debug, or trace + _ <- Sync[F].delay(println(bootstrap.config())) - connection evalMap { ch => - Sync[F].delay(SocketAddress.fromInetSocketAddress(ch.localAddress().asInstanceOf[InetSocketAddress])).tupleRight( - Stream.repeatEval(Sync[F].delay(ch.read()) *> sockets.take)) - } - } + // TODO: is the right name? Bind uses the parent ELG that calla TCP accept which yields a connection to child ELG? + tcpAcceptChannel = Sync[F] defer { + val cf = bootstrap.bind( + resolvedHost.map(_.toInetAddress).orNull, + port.map(_.value).getOrElse(0) + ) + fromNettyFuture[F](cf.pure[F]).as(cf.channel()) + } + } yield Resource + .make(tcpAcceptChannel) { ch => + fromNettyFuture[F](Sync[F].delay(ch.close())).void + } + .evalMap { ch => + Sync[F] + .delay( + SocketAddress.fromInetSocketAddress( + ch.localAddress().asInstanceOf[InetSocketAddress] + ) + ) + .tupleRight( + Stream.repeatEval( + Sync[F].delay(ch.read()) *> clientConnections.take + ) + ) } - } } - } + } yield res private[this] def initializer( disp: Dispatcher[F])( - result: Socket[F] => F[Unit]) + result: Socket[F, Byte, Byte, Nothing] => F[Unit]) : ChannelInitializer[SocketChannel] = new ChannelInitializer[SocketChannel] { def initChannel(ch: SocketChannel) = { @@ -122,7 +184,7 @@ final class Network[F[_]: Async] private ( ch.config().setAutoRead(false) disp unsafeRunAndForget { - SocketHandler[F](disp, ch) flatMap { s => + SocketHandler[F, Byte, Byte, Nothing](disp, ch) flatMap { s => Sync[F].delay(p.addLast(s)) *> result(s) } } diff --git a/core/src/main/scala/fs2/netty/Socket.scala b/core/src/main/scala/fs2/netty/Socket.scala index 018e949..3ad964e 100644 --- a/core/src/main/scala/fs2/netty/Socket.scala +++ b/core/src/main/scala/fs2/netty/Socket.scala @@ -18,14 +18,21 @@ package fs2 package netty import com.comcast.ip4s.{IpAddress, SocketAddress} +import io.netty.channel.ChannelPipeline -trait Socket[F[_]] { +trait Socket[F[_], I, O, E] { def localAddress: F[SocketAddress[IpAddress]] def remoteAddress: F[SocketAddress[IpAddress]] - def reads: Stream[F, Byte] + def reads: Stream[F, I] - def write(bytes: Chunk[Byte]): F[Unit] - def writes: Pipe[F, Byte, INothing] + def events: Stream[F, E] + + def write(output: O): F[Unit] + def writes: Pipe[F, O, INothing] + + def mutatePipeline[I2, O2, E2]( + mutator: ChannelPipeline => F[Unit] + ): F[Socket[F, I2, O2, E2]] } diff --git a/core/src/main/scala/fs2/netty/SocketHandler.scala b/core/src/main/scala/fs2/netty/SocketHandler.scala index 1e053cc..ba7f4fc 100644 --- a/core/src/main/scala/fs2/netty/SocketHandler.scala +++ b/core/src/main/scala/fs2/netty/SocketHandler.scala @@ -17,98 +17,122 @@ package fs2 package netty -import cats.{Applicative, Functor} -import cats.effect.{Async, Poll, Sync} import cats.effect.std.{Dispatcher, Queue} +import cats.effect.{Async, Poll, Sync} import cats.syntax.all._ - +import cats.{Applicative, Functor} import com.comcast.ip4s.{IpAddress, SocketAddress} - -import io.netty.buffer.{ByteBuf, Unpooled} -import io.netty.channel.{ChannelHandlerContext, ChannelInboundHandlerAdapter} import io.netty.channel.socket.SocketChannel - -private final class SocketHandler[F[_]: Async] ( - disp: Dispatcher[F], - channel: SocketChannel, - bufs: Queue[F, AnyRef]) // ByteBuf | Throwable | Null - extends ChannelInboundHandlerAdapter - with Socket[F] { - - val localAddress: F[SocketAddress[IpAddress]] = +import io.netty.channel.{ChannelHandlerContext, ChannelInboundHandlerAdapter, ChannelPipeline} +import io.netty.util.ReferenceCountUtil + +private final class SocketHandler[F[_]: Async, I, O, E]( + disp: Dispatcher[F], + channel: SocketChannel, + readsQueue: Queue[F, AnyRef], // I | Throwable | Null + eventsQueue: Queue[F, E] +) extends ChannelInboundHandlerAdapter + with Socket[F, I, O, E] { + + override val localAddress: F[SocketAddress[IpAddress]] = Sync[F].delay(SocketAddress.fromInetSocketAddress(channel.localAddress())) - val remoteAddress: F[SocketAddress[IpAddress]] = + override val remoteAddress: F[SocketAddress[IpAddress]] = Sync[F].delay(SocketAddress.fromInetSocketAddress(channel.remoteAddress())) - private[this] def take(poll: Poll[F]): F[ByteBuf] = - poll(bufs.take) flatMap { - case null => Applicative[F].pure(null) // EOF marker - case buf: ByteBuf => buf.pure[F] - case t: Throwable => t.raiseError[F, ByteBuf] + private[this] def take(poll: Poll[F]): F[Option[I]] = + poll(readsQueue.take) flatMap { + case null => Applicative[F].pure(none[I]) // EOF marker + case i: I => Applicative[F].pure(i.some) + case t: Throwable => t.raiseError[F, Option[I]] } - private[this] val fetch: Stream[F, ByteBuf] = - Stream.bracketFull[F, ByteBuf](poll => Sync[F].delay(channel.read()) *> take(poll)) { (b, _) => - if (b != null) - Sync[F].delay(b.release()).void - else - Applicative[F].unit - } - - lazy val reads: Stream[F, Byte] = + private[this] val fetch: Stream[F, I] = + Stream + .bracketFull[F, Option[I]](poll => + Sync[F].delay(channel.read()) *> take(poll) + ) { (i, _) => + if (i != null) + Sync[F].delay(ReferenceCountUtil.safeRelease(i)).void + else + Applicative[F].unit + } + .unNoneTerminate + + override lazy val reads: Stream[F, I] = Stream force { Functor[F].ifF(isOpen)( - fetch.flatMap(b => if (b == null) Stream.empty else Stream.chunk(toChunk(b))) ++ reads, - Stream.empty) + fetch.flatMap(i => + if (i == null) Stream.empty else Stream.emit(i) + ) ++ reads, + Stream.empty + ) } - def write(bytes: Chunk[Byte]): F[Unit] = - fromNettyFuture[F](Sync[F].delay(channel.writeAndFlush(toByteBuf(bytes)))).void + override def write(output: O): F[Unit] = + fromNettyFuture[F]( + Sync[F].delay(channel.writeAndFlush(output)) + ).void - val writes: Pipe[F, Byte, INothing] = - _.chunks.evalMap(c => write(c) *> isOpen).takeWhile(b => b).drain + override val writes: Pipe[F, O, INothing] = + _.evalMap(o => write(o) *> isOpen).takeWhile(bool => bool).drain private[this] val isOpen: F[Boolean] = - Sync[F].delay(channel.isOpen()) + Sync[F].delay(channel.isOpen) override def channelRead(ctx: ChannelHandlerContext, msg: AnyRef) = - disp.unsafeRunAndForget(bufs.offer(msg)) + ReferenceCountUtil.touch( + msg, + s"Last touch point in FS2-Netty for ${msg.getClass.getSimpleName}" + ) match { + case i: I => + disp.unsafeRunAndForget(readsQueue.offer(i)) + + case _ => + ReferenceCountUtil.safeRelease( + msg + ) // TODO: Netty logs if release fails, but perhaps we want to catch error and do custom logging/reporting/handling + } override def exceptionCaught(ctx: ChannelHandlerContext, t: Throwable) = - disp.unsafeRunAndForget(bufs.offer(t)) + disp.unsafeRunAndForget(readsQueue.offer(t)) override def channelInactive(ctx: ChannelHandlerContext) = try { - disp.unsafeRunAndForget(bufs.offer(null)) + disp.unsafeRunAndForget(readsQueue.offer(null)) } catch { - case _: IllegalStateException => () // sometimes we can see this due to race conditions in shutdown + case _: IllegalStateException => + () // sometimes we can see this due to race conditions in shutdown } - private[this] def toByteBuf(chunk: Chunk[Byte]): ByteBuf = - chunk match { - case Chunk.ArraySlice(arr, off, len) => - Unpooled.wrappedBuffer(arr, off, len) - - case c: Chunk.ByteBuffer => - Unpooled.wrappedBuffer(c.toByteBuffer) - - case c => - Unpooled.wrappedBuffer(c.toArray) + override def userEventTriggered(ctx: ChannelHandlerContext, evt: Any): Unit = + evt match { + case e: E => disp.unsafeRunAndForget(eventsQueue.offer(e)) + case _ => () // TODO: probably raise error on stream... } - private[this] def toChunk(buf: ByteBuf): Chunk[Byte] = - if (buf.hasArray()) - Chunk.array(buf.array()) - else if (buf.nioBufferCount() > 0) - Chunk.byteBuffer(buf.nioBuffer()) - else - ??? + override lazy val events: Stream[F, E] = + Stream.fromQueueUnterminated(eventsQueue) + + override def mutatePipeline[I2, O2, E2]( + mutator: ChannelPipeline => F[Unit] + ): F[Socket[F, I2, O2, E2]] = + Sync[F] + .suspend(Sync.Type.Delay)(mutator(channel.pipeline())) + .flatMap(_ => SocketHandler[F, I2, O2, E2](disp, channel)) + .map( + identity + ) // TODO: why cannot compiler infer TcpSocketHandler in flatMap? } private object SocketHandler { - def apply[F[_]: Async](disp: Dispatcher[F], channel: SocketChannel): F[SocketHandler[F]] = - Queue.unbounded[F, AnyRef] map { bufs => - new SocketHandler(disp, channel, bufs) - } + + def apply[F[_]: Async, I, O, E]( + disp: Dispatcher[F], + channel: SocketChannel + ): F[SocketHandler[F, I, O, E]] = + for { + readsQueue <- Queue.unbounded[F, AnyRef] + eventsQueue <- Queue.unbounded[F, E] + } yield new SocketHandler(disp, channel, readsQueue, eventsQueue) } diff --git a/core/src/main/scala/fs2/netty/incudator/TcpNetwork.scala b/core/src/main/scala/fs2/netty/incudator/TcpNetwork.scala deleted file mode 100644 index bfe053e..0000000 --- a/core/src/main/scala/fs2/netty/incudator/TcpNetwork.scala +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright 2021 Typelevel - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package fs2.netty.incudator - -import cats.effect.std.{Dispatcher, Queue} -import cats.effect.{Async, Resource, Sync} -import cats.syntax.all._ -import com.comcast.ip4s.{Host, IpAddress, Port, SocketAddress} -import fs2.Stream -import fs2.netty.{ChannelOption, fromNettyFuture} -import io.netty.bootstrap.ServerBootstrap -import io.netty.channel.nio.NioEventLoopGroup -import io.netty.channel.socket.SocketChannel -import io.netty.channel.socket.nio.NioServerSocketChannel -import io.netty.channel.{ChannelHandler, ChannelInitializer, EventLoopGroup, ServerChannel, ChannelOption => JChannelOption} - -import java.net.InetSocketAddress - -final class TcpNetwork[F[_]: Async] private ( - parent: EventLoopGroup, // TODO: custom value class? - child: EventLoopGroup, - // clientChannelClazz: Class[_ <: Channel], - serverChannelClazz: Class[_ <: ServerChannel] -) { - - def server( - host: Option[Host], - port: Option[Port], - options: List[ChannelOption] = Nil - ): Resource[ - F, - (SocketAddress[IpAddress], Stream[F, TcpSocket[F, Byte, Byte, Nothing]]) - ] = server(host, port, options, Nil) - - def server[I, O, U]( - host: Option[Host], - port: Option[Port], - options: List[ChannelOption] = Nil, - handlers: List[ChannelHandler] = - Nil // TODO: Atm completely unsafe, but will fix - ): Resource[ - F, - (SocketAddress[IpAddress], Stream[F, TcpSocket[F, I, O, U]]) - ] = { - Dispatcher[F].flatMap { dispatcher => - Resource.suspend { - for { - tcpServerConnections <- Queue.unbounded[F, TcpSocket[F, I, O, U]] - - resolved <- host.traverse(_.resolve[F]) - - bootstrap <- Sync[F].delay { - val bootstrap = new ServerBootstrap - bootstrap - .group(parent, child) - .option( - JChannelOption.AUTO_READ.asInstanceOf[JChannelOption[Any]], - false - ) // backpressure accepting connections, not reads on any individual connection - .channel(serverChannelClazz) - .childHandler(new ChannelInitializer[SocketChannel] { - override def initChannel(ch: SocketChannel): Unit = { - handlers.foreach(ch.pipeline().addLast(_)) - // TODO: ... - dispatcher.unsafeRunAndForget { - TcpSocketHandler[F, I, O, U](ch) - .flatTap(nb => Sync[F].delay(ch.pipeline().addLast(nb))) - .flatMap(tcpServerConnections.offer) - } - } - }) - // .childOption() // TODO: what child opts are there? Anything useful to communicate to injected Netty pipeline can be done through attrs... - // TODO: log `bootstrap.config()` as info or debug or trace? - options.foreach(opt => bootstrap.option(opt.key, opt.value)) - bootstrap - } - - // TODO: is the right name? Bind uses the parent ELG that calla TCP accept which yields a connection to child ELG? - tcpAcceptChannel = Sync[F] defer { - val cf = bootstrap.bind( - resolved.map(_.toInetAddress).orNull, - port.map(_.value).getOrElse(0) - ) - fromNettyFuture[F](cf.pure[F]).as(cf.channel()) - } - } yield { - Resource - .make(tcpAcceptChannel) { ch => - fromNettyFuture[F](Sync[F].delay(ch.close())).void - } - .evalMap { ch => - Sync[F] - .delay( - SocketAddress.fromInetSocketAddress( - ch.localAddress().asInstanceOf[InetSocketAddress] - ) - ) - .tupleRight( - Stream.repeatEval( - Sync[F].delay(ch.read()) *> tcpServerConnections.take - ) - ) - } - } - } - } - } -} - -object TcpNetwork { - - def apply[F: Async](): TcpNetwork[F] = new TcpNetwork[F]( - parent = new NioEventLoopGroup(1), - child = new NioEventLoopGroup(), - classOf[NioServerSocketChannel] - ) - -} diff --git a/core/src/main/scala/fs2/netty/incudator/TcpSocket.scala b/core/src/main/scala/fs2/netty/incudator/TcpSocket.scala deleted file mode 100644 index 8e9b6c4..0000000 --- a/core/src/main/scala/fs2/netty/incudator/TcpSocket.scala +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2021 Typelevel - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package fs2 -package netty.incudator - -import io.netty.channel.ChannelPipeline - -trait TcpSocket[F[_], I, O, U] { - - def reads: Stream[F, I] - - def write(output: O): F[Unit] - - def writes: Pipe[F, I, INothing] - - def events: Stream[F, U] - -// def close: F[Unit] - - // TODO: mutator should be ChannelPipeline => F[Unit], but getting import collisions between FS2 stream and Sync in HttpServerConnection - def mutatePipeline[I2, O2, U2]( - mutator: ChannelPipeline => Unit - ): F[TcpSocket[F, I2, O2, U2]] -} diff --git a/core/src/main/scala/fs2/netty/incudator/TcpSocketHandler.scala b/core/src/main/scala/fs2/netty/incudator/TcpSocketHandler.scala deleted file mode 100644 index 6588251..0000000 --- a/core/src/main/scala/fs2/netty/incudator/TcpSocketHandler.scala +++ /dev/null @@ -1,70 +0,0 @@ -package fs2 -package netty.incudator - -import cats.effect.Sync -import cats.effect.kernel.Async -import cats.effect.std.Queue -import cats.syntax.all._ -import fs2.netty.fromNettyFuture -import io.netty.channel.socket.SocketChannel -import io.netty.channel.{ChannelHandlerContext, ChannelInboundHandlerAdapter, ChannelPipeline} -import io.netty.util.ReferenceCountUtil - -// Netty handler that acts as a bridge from Netty to FS2 -private class TcpSocketHandler[F[_]: Async, I, O, U]( - queue: Queue[F, I], - channel: SocketChannel -) extends ChannelInboundHandlerAdapter - with TcpSocket[F, I, O, U] { - - override lazy val reads: Stream[F, I] = ??? - - // We should enforce a decoder handler exists in the pipeline for the output object, but probably won't be able - // to with Netty. Instead might require/recommend that pipelines passed into TCP Network server pass tests - // with form ByteBuf -> I. Or use a client, O -> ByteBuf, then pass to server, ByteBuf -> I, where I == O, to - // enforce the correctness property. - // A hacky way to check is if `ByteToMessageDecoder` or similar Byte based Netty handlers are in the pipeline. - - // Should send from end of pipeline as output object maybe be transformed through the pipeline. - override def write(output: O): F[Unit] = fromNettyFuture[F]( - Sync[F].delay(channel.pipeline().writeAndFlush(output)) - ).void - - override def channelRead(ctx: ChannelHandlerContext, msg: Any): Unit = - msg match { - case i: I => - queue.offer( - ReferenceCountUtil.touch( - i, - "This is the last point FS2-Netty touches the Reference Counted Object" - ) - ) // Perhaps once we have good tests, wrapped in resoruce type, and guratentees then we can remove the touch. - - case _ => - ReferenceCountUtil.safeRelease( - msg - ) // Netty logs if release fails, but perhaps we want to catch error and do custom logging/reporting/handling - } - - override def writes: Pipe[F, I, INothing] = ??? - - override def events: Stream[F, U] = ??? - - override def mutatePipeline[I2, O2, U2]( - mutator: ChannelPipeline => Unit - ): F[TcpSocket[F, I2, O2, U2]] = - Sync[F] - .delay(mutator(channel.pipeline())) - .flatMap(_ => TcpSocketHandler[F, I2, O2, U2](channel)) - .map( - identity - ) // TODO: why cannot compiler infer TcpSocketHandler in flatMap? -} - -object TcpSocketHandler { - - def apply[F[_]: Async, I, O, U]( - channel: SocketChannel - ): F[TcpSocketHandler[F, I, O, U]] = - Queue.unbounded[F, I].map(q => new TcpSocketHandler[F, I, O, U](q, channel)) -} diff --git a/core/src/main/scala/fs2/netty/incudator/http/ExampleHttpServer.scala b/core/src/main/scala/fs2/netty/incudator/http/ExampleHttpServer.scala index 52d34fc..06cdef7 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/ExampleHttpServer.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/ExampleHttpServer.scala @@ -1,3 +1,19 @@ +/* + * Copyright 2021 Typelevel + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package fs2.netty.incudator.http import cats.data.Kleisli diff --git a/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala b/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala index 2557c84..1bebf27 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala @@ -1,14 +1,30 @@ +/* + * Copyright 2021 Typelevel + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package fs2.netty.incudator.http import cats.Applicative import cats.data.Kleisli -import cats.effect.GenConcurrent +import cats.effect.Sync import cats.syntax.all._ import fs2.Stream -import fs2.netty.incudator.TcpSocket +import fs2.netty.Socket import fs2.netty.incudator.http.HttpClientConnection._ import io.netty.buffer.Unpooled -import io.netty.channel.ChannelHandlerContext +import io.netty.channel.{ChannelHandlerContext, ChannelPipeline} import io.netty.handler.codec.TooLongFrameException import io.netty.handler.codec.http._ import io.netty.handler.codec.http.websocketx.WebSocketServerProtocolHandler.HandshakeComplete @@ -18,39 +34,37 @@ import io.netty.handler.codec.http.websocketx.{WebSocketFrame, WebSocketServerPr // U could be io.netty.handler.timeout.IdleStateEvent if we wanted to handle connection closure, but in this // context we want to close the channel anyway and just be notified why it was closed. However, we should likely // send HttpResponseStatus.REQUEST_TIMEOUT for cleaner close. So change U type and handle at FS2 layer. -class HttpClientConnection[F[_]]( - tcpServerConnection: TcpSocket[ +class HttpClientConnection[F[_]: Sync]( + clientSocket: Socket[ F, FullHttpRequest, FullHttpResponse, Nothing ] -)(implicit genCon: GenConcurrent[F, Throwable]) { - - // TODO: Why does `Sync[F].delay(...).flatMap(...)` & `Stream.flatMap(...)` have a method collision when `import cats.syntax.all._` +) { def successfullyDecodedReads( httpRouter: Kleisli[F, FullHttpRequest, FullHttpResponse], webSocketRouter: Kleisli[F, FullHttpRequest, WebSocketResponse[F]] ): Stream[F, Unit] = - tcpServerConnection.reads + clientSocket.reads .evalMap { request => if (request.decoderResult().isFailure) createResponseForDecodeError(request.decoderResult().cause()) - .flatMap(tcpServerConnection.write) + .flatMap(clientSocket.write) else if (isWebSocketRequest(request)) transitionToWebSocketsOrRespond( webSocketRouter, request ) else - httpRouter(request).flatMap(tcpServerConnection.write) + httpRouter(request).flatMap(clientSocket.write) } private def createResponseForDecodeError( cause: Throwable ): F[DefaultFullHttpResponse] = - Applicative[F].pure { + Sync[F].delay { cause match { case ex: TooLongFrameException if isTooLongHeaderException(ex) => val resp = new DefaultFullHttpResponse( @@ -81,41 +95,26 @@ class HttpClientConnection[F[_]]( private def transitionToWebSocketsOrRespond( webSocketRouter: Kleisli[F, FullHttpRequest, WebSocketResponse[F]], request: FullHttpRequest - ) = + ): F[Unit] = webSocketRouter(request).flatMap { case WebSocketResponse.SwitchToWebSocketProtocol( wsConfigs, cb ) => - tcpServerConnection - .mutatePipeline[WebSocketFrame, WebSocketFrame, HandshakeComplete] { - pipeline => - // TODO: FS2-Netty should re-add itself back as last handler, perhaps it 1st removes itself then re-adds. - // We'll also remove this handler after handshake, so might be better to manually add - // WebSocketServerProtocolHandshakeHandler and Utf8FrameValidator since almost none of the other logic from - // WebSocketServerProtocolHandler will be needed. Maybe just the logic around close frame should be ported over. - val handler = - new WebSocketServerProtocolHandler(wsConfigs.toNetty) { - /* - Default `exceptionCaught` of `WebSocketServerProtocolHandler` returns a 400 w/o any headers like `Content-length`. - Let higher layer handler this. Catch WebSocketHandshakeException - */ - override def exceptionCaught( - ctx: ChannelHandlerContext, - cause: Throwable - ): Unit = ctx.fireExceptionCaught(cause) - } - pipeline.addLast(handler) - handler.channelRead(pipeline.context(handler), request) - } + clientSocket + .mutatePipeline[WebSocketFrame, WebSocketFrame, HandshakeComplete]( + installWebSocketHandlersAndContinueWebSocketUpgrade( + request, + wsConfigs + ) + ) .flatMap { connection => connection.events - .find(_ => true) // only take 1st + .find(_ => true) // only take 1st event since Netty will only first once .evalTap(handshakeComplete => connection - .mutatePipeline[WebSocketFrame, WebSocketFrame, Nothing](_ => - () - ) + // TODO: maybe like a covary method? + .mutatePipeline[WebSocketFrame, WebSocketFrame, Nothing](_ => Applicative[F].unit) .map(wsConn => cb( ( @@ -134,21 +133,51 @@ class HttpClientConnection[F[_]]( .void case WebSocketResponse.`3xx`(code, body, headers) => - wsResponse(code, body, headers).flatMap(tcpServerConnection.write) + wsResponse(code, body, headers).flatMap(clientSocket.write) case WebSocketResponse.`4xx`(code, body, headers) => - wsResponse(code, body, headers).flatMap(tcpServerConnection.write) + wsResponse(code, body, headers).flatMap(clientSocket.write) case WebSocketResponse.`5xx`(code, body, headers) => - wsResponse(code, body, headers).flatMap(tcpServerConnection.write) + wsResponse(code, body, headers).flatMap(clientSocket.write) } + private def installWebSocketHandlersAndContinueWebSocketUpgrade( + request: FullHttpRequest, + wsConfigs: WebSocketConfig + )(pipeline: ChannelPipeline): F[Unit] = + for { + // TODO: FS2-Netty should re-add itself back as last handler, perhaps it 1st removes itself then re-adds. + // We'll also remove this handler after handshake, so might be better to manually add + // WebSocketServerProtocolHandshakeHandler and Utf8FrameValidator since almost none of the other logic from + // WebSocketServerProtocolHandler will be needed. Maybe just the logic around close frame should be ported over. + handler <- Applicative[F].pure( + new WebSocketServerProtocolHandler(wsConfigs.toNetty) { + + /* + Default `exceptionCaught` of `WebSocketServerProtocolHandler` returns a 400 w/o any headers like `Content-length`. + Let higher layer handler this. Catch WebSocketHandshakeException + */ + override def exceptionCaught( + ctx: ChannelHandlerContext, + cause: Throwable + ): Unit = ctx.fireExceptionCaught(cause) + } + ) + + _ <- Sync[F].delay(pipeline.addLast(handler)) + + _ <- Sync[F].delay( + handler.channelRead(pipeline.context(handler), request) + ) + } yield () + private def wsResponse( code: Int, body: Option[String], headers: HttpHeaders ): F[FullHttpResponse] = - Applicative[F].pure( + Sync[F].delay( new DefaultFullHttpResponse( HttpVersion.HTTP_1_1, HttpResponseStatus.valueOf(code), diff --git a/core/src/main/scala/fs2/netty/incudator/http/HttpPipeliningBlockerHandler.scala b/core/src/main/scala/fs2/netty/incudator/http/HttpPipeliningBlockerHandler.scala index adce4f7..e18b5f2 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/HttpPipeliningBlockerHandler.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/HttpPipeliningBlockerHandler.scala @@ -1,3 +1,19 @@ +/* + * Copyright 2021 Typelevel + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package fs2.netty.incudator.http import io.netty.channel.{ChannelDuplexHandler, ChannelHandlerContext, ChannelPromise} diff --git a/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala b/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala index 76aa82a..bc94ce1 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala @@ -1,45 +1,63 @@ +/* + * Copyright 2021 Typelevel + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package fs2.netty.incudator.http -import cats.effect.kernel.Async -import cats.effect.{GenConcurrent, Resource} +import cats.data.NonEmptyList +import cats.effect.{Async, Resource} import fs2.Stream -import fs2.netty.incudator.TcpNetwork -import io.netty.channel.{ChannelDuplexHandler, ChannelHandlerContext, ChannelPromise} +import fs2.netty.Network import io.netty.handler.codec.http._ import io.netty.handler.timeout.ReadTimeoutHandler -import io.netty.util.ReferenceCountUtil import scala.concurrent.duration.FiniteDuration object HttpServer { - def start[F[_]: Async](httpConfigs: HttpConfigs)(implicit - genCon: GenConcurrent[F, Throwable] + def start[F[_]: Async]( + httpConfigs: HttpConfigs ): Resource[F, Stream[F, HttpClientConnection[F]]] = - TcpNetwork() - .server[FullHttpRequest, FullHttpResponse, Nothing]( - host = None, - port = None, - options = Nil, - handlers = List( - new HttpServerCodec( - httpConfigs.parsing.maxInitialLineLength, - httpConfigs.parsing.maxHeaderSize, - httpConfigs.parsing.maxChunkSize - ), - new HttpServerKeepAliveHandler, - new HttpObjectAggregator( - httpConfigs.parsing.maxHttpContentLength + for { + network <- Network[F] + + rawHttpClientConnection <- network + .serverResource[FullHttpRequest, FullHttpResponse, Nothing]( + host = None, + port = None, + handlers = NonEmptyList.of( + new HttpServerCodec( + httpConfigs.parsing.maxInitialLineLength, + httpConfigs.parsing.maxHeaderSize, + httpConfigs.parsing.maxChunkSize + ), + new HttpServerKeepAliveHandler, + new HttpObjectAggregator( + httpConfigs.parsing.maxHttpContentLength + ), + new ReadTimeoutHandler( // TODO: this also closes channel when exception is fired, should HttpClientConnection just handle that Idle Events? + httpConfigs.requestTimeoutPeriod.length, + httpConfigs.requestTimeoutPeriod.unit + ) + // new HttpPipeliningBlockerHandler ), - new ReadTimeoutHandler( // TODO: this also closes channel when exception is fired, should HttpClientConnection just handle that Idle Events? - httpConfigs.requestTimeoutPeriod.length, - httpConfigs.requestTimeoutPeriod.unit - ) - // new HttpPipeliningBlockerHandler + options = Nil ) - ) - .map(_._2) - .map(_.map(new HttpClientConnection[F](_))) + .map(_._2) + + } yield rawHttpClientConnection.map(new HttpClientConnection[F](_)) /** * @param requestTimeoutPeriod - limit on how long connection can remain open w/o any requests diff --git a/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala b/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala index c1ee424..f876655 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala @@ -1,13 +1,35 @@ +/* + * Copyright 2021 Typelevel + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package fs2.netty.incudator.http -import fs2.netty.incudator.TcpSocket +import com.comcast.ip4s.{IpAddress, SocketAddress} +import fs2.netty.Socket import fs2.{INothing, Pipe, Stream} import io.netty.channel.ChannelPipeline import io.netty.handler.codec.http.websocketx.WebSocketFrame class WebSocket[F[_], U]( - underlying: TcpSocket[F, WebSocketFrame, WebSocketFrame, Nothing] -) extends TcpSocket[F, WebSocketFrame, WebSocketFrame, U] { + underlying: Socket[F, WebSocketFrame, WebSocketFrame, Nothing] // Delegate pattern +) extends Socket[F, WebSocketFrame, WebSocketFrame, U] { + + override def localAddress: F[SocketAddress[IpAddress]] = underlying.localAddress + + override def remoteAddress: F[SocketAddress[IpAddress]] = underlying.remoteAddress + override def reads: Stream[F, WebSocketFrame] = underlying.reads // TODO: this will be aware of close frames @@ -19,7 +41,7 @@ class WebSocket[F[_], U]( override def events: Stream[F, Nothing] = underlying.events override def mutatePipeline[I2, O2, U2]( - mutator: ChannelPipeline => Unit - ): F[TcpSocket[F, I2, O2, U2]] = + mutator: ChannelPipeline => F[Unit] + ): F[Socket[F, I2, O2, U2]] = underlying.mutatePipeline(mutator) } diff --git a/core/src/main/scala/fs2/netty/incudator/http/WebSocketConfig.scala b/core/src/main/scala/fs2/netty/incudator/http/WebSocketConfig.scala index c96f6ca..f8d0548 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/WebSocketConfig.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/WebSocketConfig.scala @@ -1,3 +1,19 @@ +/* + * Copyright 2021 Typelevel + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package fs2.netty.incudator.http import fs2.netty.incudator.http.WebSocketConfig.DisableTimeout From c9193acd10792a5923f9fc6f2b949a23595fb8ac Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Sat, 6 Mar 2021 23:01:29 -0500 Subject: [PATCH 04/23] WIP Create a Netty pipeline with default ByteBuf handling. Create an Fs2NettyEmbeddedChannel. "Fix" generic read message handling in SocketHandler. --- .../main/scala/fs2/netty/NettyPipeline.scala | 76 +++++++ core/src/main/scala/fs2/netty/Network.scala | 11 +- core/src/main/scala/fs2/netty/Socket.scala | 41 +++- .../main/scala/fs2/netty/SocketHandler.scala | 106 +++++---- .../EmbeddedChannelWithAutoRead.scala | 136 +++++++++++ .../embedded/Fs2NettyEmbeddedChannel.scala | 120 ++++++++++ .../incudator/http/HttpClientConnection.scala | 4 + .../fs2/netty/incudator/http/HttpServer.scala | 10 +- .../fs2/netty/incudator/http/WebSocket.scala | 24 +- .../scala/fs2/netty/NettyPipelineSpec.scala | 214 ++++++++++++++++++ .../test/scala/fs2/netty/NetworkSpec.scala | 56 ++--- 11 files changed, 716 insertions(+), 82 deletions(-) create mode 100644 core/src/main/scala/fs2/netty/NettyPipeline.scala create mode 100644 core/src/main/scala/fs2/netty/embedded/EmbeddedChannelWithAutoRead.scala create mode 100644 core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala create mode 100644 core/src/test/scala/fs2/netty/NettyPipelineSpec.scala diff --git a/core/src/main/scala/fs2/netty/NettyPipeline.scala b/core/src/main/scala/fs2/netty/NettyPipeline.scala new file mode 100644 index 0000000..92240c3 --- /dev/null +++ b/core/src/main/scala/fs2/netty/NettyPipeline.scala @@ -0,0 +1,76 @@ +/* + * Copyright 2021 Typelevel + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package fs2.netty + +import cats.effect.std.Dispatcher +import cats.effect.{Async, Sync} +import cats.syntax.all._ +import io.netty.buffer.ByteBuf +import io.netty.channel.socket.SocketChannel +import io.netty.channel.{Channel, ChannelHandler, ChannelInitializer} + +// TODO: account for Sharable annotation, some of these need to be an eval, and evaluated each time whereas others can be eagerly evaluated. +class NettyPipeline[F[_]: Async, I: Socket.Decoder, O, E]( + handlers: List[ChannelHandler] +)( + dispatcher: Dispatcher[F] +) { + + // TODO: there are other interesting type of channels + // TODO: Remember ChannelInitializer is Sharable! + def toSocketChannelInitializer( + cb: Socket[F, I, O, E] => F[Unit] + ): F[ChannelInitializer[SocketChannel]] = + toChannelInitializer[SocketChannel](cb) + + def toChannelInitializer[C <: Channel]( + cb: Socket[F, I, O, E] => F[Unit] + ): F[ChannelInitializer[C]] = Sync[F].delay { (ch: C) => + { + val p = ch.pipeline() + ch.config().setAutoRead(false) + + handlers.foldLeft(p)((pipeline, handler) => pipeline.addLast(handler)) + + dispatcher.unsafeRunAndForget { + // TODO: read up on CE3 Dispatcher, how is it different than Context Switch? Is this taking place async? Also is cats.effect.Effect removed in CE3? + SocketHandler[F, I, O, E](dispatcher, ch) + .flatTap(h => + Sync[F].delay(p.addLast(h)) + ) // TODO: pass EventExecutorGroup + .flatMap(cb) + //TODO: Wonder if cb should be invoked on handlerAdded in SocketHandler? Technically, Socket isn't + // "fully active"; SocketHandler is in the pipeline but is marked with ADD_PENDING status (whatever that means, + // maybe it's ok). Need to work out expectation of callers. And if this is addLast is called from a different + // thread, then handlerAdded will be scheduled by Netty to execute in the future. + } + } + } +} + +object NettyPipeline { + + def apply[F[_]: Async]( + dispatcher: Dispatcher[F] + ): F[NettyPipeline[F, ByteBuf, ByteBuf, Nothing]] = + Sync[F].delay( + new NettyPipeline[F, ByteBuf, ByteBuf, Nothing]( + handlers = Nil + )(dispatcher) + ) + +} diff --git a/core/src/main/scala/fs2/netty/Network.scala b/core/src/main/scala/fs2/netty/Network.scala index d22534e..280b536 100644 --- a/core/src/main/scala/fs2/netty/Network.scala +++ b/core/src/main/scala/fs2/netty/Network.scala @@ -74,7 +74,7 @@ final class Network[F[_]: Async] private ( : Stream[F, Socket[F, Byte, Byte, Nothing]] = Stream.resource(serverResource(host, Some(port), options)).flatMap(_._2) - def server[I, O, E]( + def server[I: Socket.Decoder, O, E]( host: Option[Host], port: Port, handlers: NonEmptyList[ChannelHandler], @@ -89,7 +89,7 @@ final class Network[F[_]: Async] private ( : Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, Byte, Byte, Nothing]])] = serverResource(host, port, handlers = Nil,options) - def serverResource[I, O, E]( + def serverResource[I: Socket.Decoder, O, E]( host: Option[Host], port: Option[Port], handlers: NonEmptyList[ChannelHandler], @@ -97,7 +97,7 @@ final class Network[F[_]: Async] private ( ): Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, I, O, E]])] = serverResource(host, port, handlers.toList, options) - private def serverResource[I, O, E]( + private def serverResource[I: Socket.Decoder, O, E]( host: Option[Host], port: Option[Port], handlers: List[ChannelHandler], @@ -174,6 +174,11 @@ final class Network[F[_]: Async] private ( } } yield res + + implicit val decoder: Socket.Decoder[Byte] = new Socket.Decoder[Byte] { + override def decode(x: AnyRef): Either[String, Byte] = ??? + } + private[this] def initializer( disp: Dispatcher[F])( result: Socket[F, Byte, Byte, Nothing] => F[Unit]) diff --git a/core/src/main/scala/fs2/netty/Socket.scala b/core/src/main/scala/fs2/netty/Socket.scala index 3ad964e..f554d3f 100644 --- a/core/src/main/scala/fs2/netty/Socket.scala +++ b/core/src/main/scala/fs2/netty/Socket.scala @@ -17,13 +17,23 @@ package fs2 package netty -import com.comcast.ip4s.{IpAddress, SocketAddress} +import cats.syntax.all._ +import io.netty.buffer.ByteBuf import io.netty.channel.ChannelPipeline -trait Socket[F[_], I, O, E] { +// TODO: `I <: ReferenceCounted` to avoid type erasure. This is a very big constraint on the Netty channel, although for HTTP +// and WS use cases this is completely ok. One alternative is scala reflections api, but will overhead be acceptable +// along the critical code path (assuming high volume servers/clients)? +// Think through variance of types. +trait Socket[F[_], I, O, +E] { - def localAddress: F[SocketAddress[IpAddress]] - def remoteAddress: F[SocketAddress[IpAddress]] + // TODO: Temporarily disabling while making Socket generic enough to test with EmbeddedChannel. Furthermore, these + // methods restrict Socket to be a InetChannel which isn't compatible with EmbeddedChannel. Netty also works with + // DomainSocketChannel and LocalChannel which have DomainSocketAddress and LocalAddress respectively(both for IPC?), + // not IpAddresses. + // Can these be provided on the server or client network resource construction rather than on the Socket? +// def localAddress: F[SocketAddress[IpAddress]] +// def remoteAddress: F[SocketAddress[IpAddress]] def reads: Stream[F, I] @@ -32,7 +42,28 @@ trait Socket[F[_], I, O, E] { def write(output: O): F[Unit] def writes: Pipe[F, O, INothing] - def mutatePipeline[I2, O2, E2]( + def isOpen: F[Boolean] + def isClosed: F[Boolean] + + def close(): F[Unit] + + def mutatePipeline[I2: Socket.Decoder, O2, E2]( mutator: ChannelPipeline => F[Unit] ): F[Socket[F, I2, O2, E2]] } + +object Socket { + + trait Decoder[A] { + def decode(x: AnyRef): Either[String, A] + } + + private[this] val ByteBufClassName = classOf[ByteBuf].getName + + implicit val ByteBufDecoder: Decoder[ByteBuf] = { + case bb: ByteBuf => bb.asRight[String] + case x => + s"pipeline error, expected $ByteBufClassName, but got ${x.getClass.getName}" + .asLeft[ByteBuf] + } +} diff --git a/core/src/main/scala/fs2/netty/SocketHandler.scala b/core/src/main/scala/fs2/netty/SocketHandler.scala index ba7f4fc..c5228e1 100644 --- a/core/src/main/scala/fs2/netty/SocketHandler.scala +++ b/core/src/main/scala/fs2/netty/SocketHandler.scala @@ -21,41 +21,46 @@ import cats.effect.std.{Dispatcher, Queue} import cats.effect.{Async, Poll, Sync} import cats.syntax.all._ import cats.{Applicative, Functor} -import com.comcast.ip4s.{IpAddress, SocketAddress} -import io.netty.channel.socket.SocketChannel -import io.netty.channel.{ChannelHandlerContext, ChannelInboundHandlerAdapter, ChannelPipeline} -import io.netty.util.ReferenceCountUtil +import io.netty.buffer.ByteBuf +import io.netty.channel.{Channel, ChannelHandlerContext, ChannelInboundHandlerAdapter, ChannelPipeline} +import io.netty.util.{ReferenceCountUtil, ReferenceCounted} -private final class SocketHandler[F[_]: Async, I, O, E]( +private final class SocketHandler[F[_]: Async, I, O, +E]( disp: Dispatcher[F], - channel: SocketChannel, - readsQueue: Queue[F, AnyRef], // I | Throwable | Null + channel: Channel, + readsQueue: Queue[F, Option[Either[Throwable, I]]], eventsQueue: Queue[F, E] -) extends ChannelInboundHandlerAdapter +)(implicit inboundDecoder: Socket.Decoder[I]) + extends ChannelInboundHandlerAdapter with Socket[F, I, O, E] { - override val localAddress: F[SocketAddress[IpAddress]] = - Sync[F].delay(SocketAddress.fromInetSocketAddress(channel.localAddress())) - - override val remoteAddress: F[SocketAddress[IpAddress]] = - Sync[F].delay(SocketAddress.fromInetSocketAddress(channel.remoteAddress())) +// override val localAddress: F[SocketAddress[IpAddress]] = +// Sync[F].delay(SocketAddress.fromInetSocketAddress(channel.localAddress())) +// +// override val remoteAddress: F[SocketAddress[IpAddress]] = +// Sync[F].delay(SocketAddress.fromInetSocketAddress(channel.remoteAddress())) + // TODO: we can avoid Option boxing if I <: Null private[this] def take(poll: Poll[F]): F[Option[I]] = poll(readsQueue.take) flatMap { - case null => Applicative[F].pure(none[I]) // EOF marker - case i: I => Applicative[F].pure(i.some) - case t: Throwable => t.raiseError[F, Option[I]] + case None => + Applicative[F].pure(none[I]) // EOF marker + + case Some(Right(i)) => + Applicative[F].pure(i.some) + + case Some(Left(t)) => + t.raiseError[F, Option[I]] } private[this] val fetch: Stream[F, I] = Stream .bracketFull[F, Option[I]](poll => Sync[F].delay(channel.read()) *> take(poll) - ) { (i, _) => - if (i != null) + ) { (opt, _) => + opt.fold(Applicative[F].unit)(i => Sync[F].delay(ReferenceCountUtil.safeRelease(i)).void - else - Applicative[F].unit + ) } .unNoneTerminate @@ -77,29 +82,53 @@ private final class SocketHandler[F[_]: Async, I, O, E]( override val writes: Pipe[F, O, INothing] = _.evalMap(o => write(o) *> isOpen).takeWhile(bool => bool).drain - private[this] val isOpen: F[Boolean] = + override val isOpen: F[Boolean] = Sync[F].delay(channel.isOpen) - override def channelRead(ctx: ChannelHandlerContext, msg: AnyRef) = - ReferenceCountUtil.touch( - msg, - s"Last touch point in FS2-Netty for ${msg.getClass.getSimpleName}" - ) match { - case i: I => - disp.unsafeRunAndForget(readsQueue.offer(i)) + override def isClosed: F[Boolean] = isOpen.map(bool => !bool) + + override def close(): F[Unit] = fromNettyFuture[F]( + Sync[F].delay(channel.close()) + ).void - case _ => - ReferenceCountUtil.safeRelease( - msg - ) // TODO: Netty logs if release fails, but perhaps we want to catch error and do custom logging/reporting/handling + // TODO: Even with a single channel.read() call, channelRead may get invoked more than once! Netty's "solution" + // is to use FlowControlHandler. Why isn't that the default Netty behavior?! + override def channelRead(ctx: ChannelHandlerContext, msg: AnyRef) = { + inboundDecoder.decode( + ReferenceCountUtil.touch( + msg, + s"Last touch point in FS2-Netty for ${msg.getClass.getSimpleName}" + ) + ) match { + case Left(errorMsg) => + // TODO: Netty logs if release fails, but perhaps we want to catch error and do custom logging/reporting/handling + ReferenceCountUtil.safeRelease(msg) + + case Right(i) => + // TODO: what's the perf impact of unsafeRunSync vs unsafeRunAndForget? + // FlowControlHandler & unsafeRunAndForget vs. unsafeRunSync-only? + disp.unsafeRunSync(readsQueue.offer(i.asRight[Exception].some)) } + } + + private def debug(x: Any) = x match { + case bb: ByteBuf => + val b = bb.readByte() + bb.resetReaderIndex() + val arr = Array[Byte](1) + arr(0) = b + new String(arr) + + case _ => + "" + } override def exceptionCaught(ctx: ChannelHandlerContext, t: Throwable) = - disp.unsafeRunAndForget(readsQueue.offer(t)) + disp.unsafeRunAndForget(readsQueue.offer(t.asLeft[I].some)) override def channelInactive(ctx: ChannelHandlerContext) = try { - disp.unsafeRunAndForget(readsQueue.offer(null)) + disp.unsafeRunAndForget(readsQueue.offer(None)) } catch { case _: IllegalStateException => () // sometimes we can see this due to race conditions in shutdown @@ -114,7 +143,7 @@ private final class SocketHandler[F[_]: Async, I, O, E]( override lazy val events: Stream[F, E] = Stream.fromQueueUnterminated(eventsQueue) - override def mutatePipeline[I2, O2, E2]( + override def mutatePipeline[I2: Socket.Decoder, O2, E2]( mutator: ChannelPipeline => F[Unit] ): F[Socket[F, I2, O2, E2]] = Sync[F] @@ -127,12 +156,13 @@ private final class SocketHandler[F[_]: Async, I, O, E]( private object SocketHandler { - def apply[F[_]: Async, I, O, E]( + def apply[F[_]: Async, I: Socket.Decoder, O, E]( disp: Dispatcher[F], - channel: SocketChannel + channel: Channel ): F[SocketHandler[F, I, O, E]] = for { - readsQueue <- Queue.unbounded[F, AnyRef] + readsQueue <- Queue.unbounded[F, Option[Either[Throwable, I]]] eventsQueue <- Queue.unbounded[F, E] } yield new SocketHandler(disp, channel, readsQueue, eventsQueue) + } diff --git a/core/src/main/scala/fs2/netty/embedded/EmbeddedChannelWithAutoRead.scala b/core/src/main/scala/fs2/netty/embedded/EmbeddedChannelWithAutoRead.scala new file mode 100644 index 0000000..eead718 --- /dev/null +++ b/core/src/main/scala/fs2/netty/embedded/EmbeddedChannelWithAutoRead.scala @@ -0,0 +1,136 @@ +package fs2.netty.embedded + +import io.netty.buffer.ByteBuf +import io.netty.channel.embedded.EmbeddedChannel +import io.netty.channel.{ChannelFuture, ChannelPromise} +import io.netty.util.ReferenceCountUtil + +import java.nio.channels.ClosedChannelException +import java.util + +// Based off of https://github.com/netty/netty/pull/9935/files - WARNING: Java code below +// Should be committed back upstream +class EmbeddedChannelWithAutoRead extends EmbeddedChannel { + + /** + * Used to simulate socket buffers. When autoRead is false, all inbound information will be temporarily stored here. + */ + private lazy val tempInboundMessages = + new util.ArrayDeque[util.AbstractMap.SimpleEntry[Any, ChannelPromise]]() + + def writeInboundFixed(msgs: Any*): Boolean = { + ensureOpen() + if (msgs.isEmpty) + return !inboundMessages().isEmpty + + if (!config().isAutoRead) { + msgs.foreach(msg => + tempInboundMessages.add( + new util.AbstractMap.SimpleEntry[Any, ChannelPromise](msg, null) + ) + ) + return false + } + + val p = pipeline + for (m <- msgs) { + p.fireChannelRead(m) + } + + flushInbound() + !inboundMessages().isEmpty + } + + override def writeOneInbound( + msg: Any, + promise: ChannelPromise + ): ChannelFuture = { + val (isChannelOpen, exception) = + if (isOpen) + (true, null) + else (false, new ClosedChannelException) + + if (isChannelOpen) { + if (!config().isAutoRead) { + tempInboundMessages.add( + new util.AbstractMap.SimpleEntry[Any, ChannelPromise](msg, promise) + ) + return promise + } else + pipeline().fireChannelRead(msg) + } + + if (exception == null) + promise.setSuccess() + else + promise.setFailure(exception) + } + + override def doClose(): Unit = { + super.doClose() + if (!tempInboundMessages.isEmpty) { + var exception: ClosedChannelException = null; + while (true) { + val entry = tempInboundMessages.poll() + if (entry == null) { + return + } + val value = entry.getKey; + if (value != null) { + ReferenceCountUtil.release(value); + } + val promise: ChannelPromise = entry.getValue; + if (promise != null) { + if (exception == null) { + exception = new ClosedChannelException(); + } + promise.tryFailure(exception); + } + } + } + } + + override def doBeginRead(): Unit = { + if (!tempInboundMessages.isEmpty) { + while (true) { + val pair = tempInboundMessages.poll(); + if (pair == null) { + return + } + + val msg = pair.getKey; + if (msg != null) { +// println(s"Firing read ${debug(msg)}") + pipeline().fireChannelRead(msg) + } + + val promise = pair.getValue + if (promise != null) { + try { + checkException() + promise.setSuccess() + } catch { + case e: Throwable => + promise.setFailure(e) + } + } + } + + // fire channelReadComplete. + val _ = flushInbound() + } + + } + + private def debug(x: Any) = x match { + case bb: ByteBuf => + val b = bb.readByte() + bb.resetReaderIndex() + val arr = Array[Byte](1) + arr(0) = b + new String(arr) + + case _ => + "" + } +} diff --git a/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala b/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala new file mode 100644 index 0000000..3e9b693 --- /dev/null +++ b/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala @@ -0,0 +1,120 @@ +/* + * Copyright 2021 Typelevel + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package fs2 +package netty.embedded + +import cats.effect.{Async, Sync} +import cats.implicits._ +import fs2.netty.embedded.Fs2NettyEmbeddedChannel.Encoder +import fs2.netty.{NettyPipeline, Socket} +import io.netty.buffer.{ByteBuf, Unpooled} +import io.netty.channel.embedded.EmbeddedChannel + +/** + * Better, safer, and clearer api for testing channels + * For use in tests only. + * @param underlying + * @param F + * @tparam F + */ +final case class Fs2NettyEmbeddedChannel[F[_]]( + underlying: EmbeddedChannelWithAutoRead +)(implicit + F: Sync[F] +) { + + // TODO: write examples (a spec?) for these + + def writeAllInboundWithoutFlush[A]( + a: A* + )(implicit encoder: Encoder[A]): F[Unit] = + for { + encodedObjects <- F.delay(a.map(encoder.encode)) + _ <- encodedObjects.traverse(bb => + F.delay(underlying.writeOneInbound(bb)) + ) // returns channelFutures + } yield () + + /** + * + * @param a + * @param encoder + * @tparam A + * @return `true` if the write operation did add something to the inbound buffer + */ + def writeAllInboundThenFlushThenRunAllPendingTasks[A](a: A*)(implicit + encoder: Encoder[A] + ): F[Boolean] = for { + encodedObjects <- F.delay(a.map(encoder.encode)) + areMsgsAdded <- F.delay( + underlying.writeInboundFixed(encodedObjects: _*) + ) // areByteBufsAddedToUnhandledBuffer? onUnhandledInboundMessage + } yield areMsgsAdded + + def flushInbound(): F[Unit] = F.delay(underlying.flushInbound()).void + + def isOpen: F[Boolean] = F.pure(underlying.isOpen) + + def isClosed: F[Boolean] = F.pure(!underlying.isOpen) + + def close(): F[Unit] = F.delay(underlying.close()).void +} + +object Fs2NettyEmbeddedChannel { + + def apply[F[_], I, O, E]( + nettyPipeline: NettyPipeline[F, I, O, E] + )(implicit F: Async[F]): F[(Fs2NettyEmbeddedChannel[F], Socket[F, I, O, E])] = + for { + channel <- F.delay( + new EmbeddedChannelWithAutoRead() + ) // With FlowControl/Dispatcher fixes, EmbeddedChannelWithAutoRead might not be needed after all. + socket <- F.async[Socket[F, I, O, E]] { cb => + nettyPipeline + .toChannelInitializer[EmbeddedChannel] { socket => + F.delay(cb(socket.asRight[Throwable])) + } + .flatMap { initializer => + F.delay(channel.pipeline().addFirst(initializer)) *> F.delay( + channel.runPendingTasks() + ) + } + .as[Option[F[Unit]]](None) + } + } yield (new Fs2NettyEmbeddedChannel[F](channel), socket) + + // TODO: Functor and contramap + trait Encoder[A] { + def encode(a: A): ByteBuf + } + + object CommonEncoders { + implicit val byteBufEncoder: Encoder[ByteBuf] = identity + + implicit val byteArrayEncoder: Encoder[Array[Byte]] = (a: Array[Byte]) => + Unpooled.wrappedBuffer(a) + + implicit val byteEncoder: Encoder[Byte] = (a: Byte) => + Unpooled.buffer(1, 1).writeByte(a) + + implicit val stringEncoder: Encoder[String] = (str: String) => + byteArrayEncoder.encode(str.getBytes) + +// implicit def listEncoder[A](implicit decoder: Decoder[A]): Encoder[List[A]] = (list: List[A]) => +// list.map() + } +} diff --git a/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala b/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala index 1bebf27..afcf109 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala @@ -92,6 +92,10 @@ class HttpClientConnection[F[_]: Sync]( } } + implicit val decoder = new Socket.Decoder[WebSocketFrame] { + override def decode(x: AnyRef): Either[String, WebSocketFrame] = ??? + } + private def transitionToWebSocketsOrRespond( webSocketRouter: Kleisli[F, FullHttpRequest, WebSocketResponse[F]], request: FullHttpRequest diff --git a/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala b/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala index bc94ce1..76c0076 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala @@ -19,7 +19,8 @@ package fs2.netty.incudator.http import cats.data.NonEmptyList import cats.effect.{Async, Resource} import fs2.Stream -import fs2.netty.Network +import cats.syntax.all._ +import fs2.netty.{Network, Socket} import io.netty.handler.codec.http._ import io.netty.handler.timeout.ReadTimeoutHandler @@ -27,6 +28,13 @@ import scala.concurrent.duration.FiniteDuration object HttpServer { + implicit val decoder = new Socket.Decoder[FullHttpRequest] { + override def decode(x: AnyRef): Either[String, FullHttpRequest] = x match { + case req: FullHttpRequest => req.asRight[String] + case _ => "non http message, pipeline error".asLeft[FullHttpRequest] + } + } + def start[F[_]: Async]( httpConfigs: HttpConfigs ): Resource[F, Stream[F, HttpClientConnection[F]]] = diff --git a/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala b/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala index f876655..31da408 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala @@ -16,19 +16,23 @@ package fs2.netty.incudator.http -import com.comcast.ip4s.{IpAddress, SocketAddress} import fs2.netty.Socket import fs2.{INothing, Pipe, Stream} import io.netty.channel.ChannelPipeline import io.netty.handler.codec.http.websocketx.WebSocketFrame class WebSocket[F[_], U]( - underlying: Socket[F, WebSocketFrame, WebSocketFrame, Nothing] // Delegate pattern + underlying: Socket[ + F, + WebSocketFrame, + WebSocketFrame, + Nothing + ] ) extends Socket[F, WebSocketFrame, WebSocketFrame, U] { - override def localAddress: F[SocketAddress[IpAddress]] = underlying.localAddress - - override def remoteAddress: F[SocketAddress[IpAddress]] = underlying.remoteAddress + // override def localAddress: F[SocketAddress[IpAddress]] = underlying.localAddress +// +// override def remoteAddress: F[SocketAddress[IpAddress]] = underlying.remoteAddress override def reads: Stream[F, WebSocketFrame] = underlying.reads @@ -40,8 +44,14 @@ class WebSocket[F[_], U]( override def events: Stream[F, Nothing] = underlying.events - override def mutatePipeline[I2, O2, U2]( + override def isOpen: F[Boolean] = underlying.isOpen + + override def isClosed: F[Boolean] = underlying.isClosed + + override def close(): F[Unit] = underlying.close() + + override def mutatePipeline[I2: Socket.Decoder, O2, E2]( mutator: ChannelPipeline => F[Unit] - ): F[Socket[F, I2, O2, U2]] = + ): F[Socket[F, I2, O2, E2]] = underlying.mutatePipeline(mutator) } diff --git a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala b/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala new file mode 100644 index 0000000..9f2953f --- /dev/null +++ b/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala @@ -0,0 +1,214 @@ +package fs2 +package netty + +import cats.effect.std.Dispatcher +import cats.effect.testing.specs2.CatsResource +import cats.effect.{IO, Resource} +import cats.syntax.all._ +import fs2.netty.embedded.Fs2NettyEmbeddedChannel +import fs2.netty.embedded.Fs2NettyEmbeddedChannel.CommonEncoders._ +import fs2.netty.embedded.Fs2NettyEmbeddedChannel.Encoder +import io.netty.buffer.ByteBuf +import org.specs2.mutable.SpecificationLike + +import scala.concurrent.duration._ + +class NettyPipelineSpec + extends CatsResource[IO, Dispatcher[IO]] + with SpecificationLike { + + // TODO: where does 10s timeout come from? + override val resource: Resource[IO, Dispatcher[IO]] = Dispatcher[IO] + + "default pipeline, i.e. no extra Channel handlers" should { + "zero reads in Netty corresponds to an empty fs2-netty ByteBuf reads stream" in withResource { + dispatcher => + for { + pipeline <- NettyPipeline[IO](dispatcher) + socket <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing]( + pipeline + ) + .map(_._2) + + reads <- socket.reads + .interruptAfter(1.second) + .compile + .toList // TODO: what's the proper way to check for empty stream? + } yield reads should beEmpty + } + + "zero events in Netty pipeline corresponds to an empty fs2-netty events stream" in withResource { + dispatcher => + for { + pipeline <- NettyPipeline[IO](dispatcher) + socket <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing]( + pipeline + ) + .map(_._2) + + events: List[Nothing] <- socket.events + .interruptAfter(1.second) + .compile + .toList + } yield events should beEmpty + } + + "reads from Netty appear in fs2-netty as reads stream as ByteBuf objects" in withResource { + dispatcher => + for { + // Given a socket and embedded channel from the default Netty Pipeline + pipeline <- NettyPipeline[IO](dispatcher) + x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing](pipeline) + (channel, socket) = x + + // Then configs should be setup, like autoread should be false...maybe move to top test? + _ <- IO(channel.underlying.config().isAutoRead should beFalse) + _ <- IO(channel.underlying.config().isAutoClose should beTrue) + + // And list of single byte ByteBuf's + encoder = implicitly[Encoder[Byte]] + byteBufs = "hello world".getBytes().map(encoder.encode) + + // When writing each ByteBuf individually to the channel + areMsgsAdded <- channel + .writeAllInboundThenFlushThenRunAllPendingTasks(byteBufs: _*) + + // Then messages aren't added to the inbound buffer because autoread should be off + _ <- IO(areMsgsAdded should beFalse) + + // And reads on socket yield the original message sent on channel + str <- socket.reads + .map(_.readByte()) + .take(11) + .foldMap(byteToString) + .compile + .last + _ <- IO(str shouldEqual "hello world".some) + + // And ByteBuf's should be released + _ <- IO(byteBufs.map(_.refCnt()) shouldEqual Array.fill(11)(0)) + } yield ok + } + + "writing ByteBuf's onto fs2-netty socket appear on Netty's channel" in withResource { + dispatcher => + for { + pipeline <- NettyPipeline[IO](dispatcher) + x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing](pipeline) + (channel, socket) = x + + encoder = implicitly[Encoder[Byte]] + byteBufs = "hello world".getBytes().map(encoder.encode).toList + // TODO: make resource? +// _ <- IO.unit.guarantee(IO(byteBufs.foreach(ReferenceCountUtil.release))) + + _ <- byteBufs.traverse(socket.write) + + str <- (0 until 11).toList + .traverse { _ => + IO(channel.underlying.readOutbound[ByteBuf]()) + .flatMap(bb => IO(bb.readByte())) + } + .map(_.foldMap(byteToString)) + + _ <- IO(str shouldEqual "hello world") + } yield ok + } + +// "piping any reads to writes just echos back ByteBuf's written onto Netty's channel" in withResource { +// dispatcher => +// for { +// pipeline <- NettyPipeline[IO](dispatcher) +// x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing](pipeline) +// (channel, socket) = x +// +// encoder = implicitly[Encoder[Byte]] +// byteBufs = "hello world".getBytes().map(encoder.encode).toList +// +// _ <- socket.reads.through(socket.writes).take(11).compile.drain +// +// str <- (0 until 11).toList +// .traverse { _ => +// IO(channel.underlying.readOutbound[ByteBuf]()) +// .flatMap(bb => IO(bb.readByte())) +// } +// .map(_.toArray) +// .map(new String(_)) +// +// _ <- IO(str shouldEqual "hello world") +// } yield ok +// } + + "closed connection in Netty appears as closed streams in fs2-netty" in withResource { + dispatcher => + for { + pipeline <- NettyPipeline[IO](dispatcher) + x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing](pipeline) + (channel, socket) = x + + // Netty sanity check + _ <- channel.isOpen.flatMap(isOpen => IO(isOpen should beTrue)) + _ <- socket.isOpen.flatMap(isOpen => IO(isOpen should beTrue)) + + // TODO: wrapper methods for underlying + _ <- channel.close() + + // Netty sanity check, maybe move these to their own test file for Embedded Channel + _ <- channel.isClosed.flatMap(isClosed => IO(isClosed should beTrue)) + _ <- socket.isOpen.flatMap(isOpen => IO(isOpen should beFalse)) + _ <- socket.isClosed.flatMap(isClosed => IO(isClosed should beTrue)) + } yield ok + } + + "closing connection in fs2-netty closes underlying Netty channel" in withResource { + dispatcher => + for { + pipeline <- NettyPipeline[IO](dispatcher) + x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing](pipeline) + (channel, socket) = x + + _ <- socket.close() + + _ <- channel.isClosed.flatMap(isClosed => IO(isClosed should beTrue)) + _ <- socket.isOpen.flatMap(isOpen => IO(isOpen should beFalse)) + _ <- socket.isClosed.flatMap(isClosed => IO(isClosed should beTrue)) + } yield ok + } + } + +// "byte to byte pipeline" should {} + +// "custom pipeline" should { +// repeat tests from above +// "pipelines that decode ByteBuf into I then fires channelRead, shows up in read stream" +// "pipelines that DO NOT decode ByteBuf into I but fire channelRead, DO NOT show up in read stream" +// "pipelines that encode O into ByteBuf (and go on to write to Netty), will send ByteBuf" +// "pipelines that encode O into ByteBuf (and DO NOT go on to write to Netty) (idk what kind of case this is), will NOT send ByteBuf" +// "pipelines that DO NOT encode O into ByteBuf, will NOT send ByteBuf" +// "pipelines that emit user triggered events of type E will show up in events stream" +// "pipelines that emit user triggered events of NOT type E will raise error in events stream" +// "connections that close, will shut off reads stream, writes will fail/cancel" +// "handlerAdded test???" +// "socket closes???" +// "pipelines that emit exceptions will raise error on reads stream" +// "pipelines that remove SocketHandler, will raise error on reads and events streams. Except in case of mutation" + +// "test I1->Bytes, then Bytes->I2 (pipeline), and I1 == I2...nvm ByteBuf can be thrown away, decode may be" + +// "non-deterministic by design. Too many assumptions. Maybe can provide tests for specific cases that require correctness property, but not all" in { +// ok +// } +// } + +// "there can be a pipeline that only sends, I = Nothing" in { ok } +// "there can be a pipeline that only receives, O = Nothing" in { ok } + +// "chunking..." in { ok } + +// "pipeline mutation" in { ok } + + private def byteToString(byte: Byte): String = { + val bytes = new Array[Byte](1) + bytes(0) = byte + new String(bytes) + } +} diff --git a/core/src/test/scala/fs2/netty/NetworkSpec.scala b/core/src/test/scala/fs2/netty/NetworkSpec.scala index 5aa3b22..26ba7c5 100644 --- a/core/src/test/scala/fs2/netty/NetworkSpec.scala +++ b/core/src/test/scala/fs2/netty/NetworkSpec.scala @@ -31,33 +31,33 @@ class NetworkSpec extends CatsResource[IO, Network[IO]] with SpecificationLike { Network[IO].use_.as(ok) } - "support a simple echo use-case" in withResource { net => - val data = List[Byte](1, 2, 3, 4, 5, 6, 7) - - val rsrc = net.serverResource(None, None) flatMap { - case (isa, incoming) => - val handler = incoming flatMap { socket => - socket.reads.through(socket.writes) - } - - for { - _ <- handler.compile.drain.background - - results <- net.client(isa) flatMap { socket => - Stream.emits(data) - .through(socket.writes) - .merge(socket.reads) - .take(data.length.toLong) - .compile.resource.toList - } - } yield results - } - - rsrc.use(IO.pure(_)) flatMap { results => - IO { - results mustEqual data - } - } - } +// "support a simple echo use-case" in withResource { net => +// val data = List[Byte](1, 2, 3, 4, 5, 6, 7) +// +// val rsrc = net.serverResource(None, None, Nil) flatMap { +// case (isa, incoming) => +// val handler = incoming flatMap { socket => +// socket.reads.through(socket.writes) +// } +// +// for { +// _ <- handler.compile.drain.background +// +// results <- net.client(isa) flatMap { socket => +// Stream.emits(data) +// .through(socket.writes) +// .merge(socket.reads) +// .take(data.length.toLong) +// .compile.resource.toList +// } +// } yield results +// } +// +// rsrc.use(IO.pure(_)) flatMap { results => +// IO { +// results mustEqual data +// } +// } +// } } } From 1ebacc2e84ba585a2dfb987c29d152650e7e58ab Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Sat, 6 Mar 2021 23:17:52 -0500 Subject: [PATCH 05/23] Test pipeline exceptions --- .../main/scala/fs2/netty/SocketHandler.scala | 1 + .../scala/fs2/netty/NettyPipelineSpec.scala | 23 +++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/core/src/main/scala/fs2/netty/SocketHandler.scala b/core/src/main/scala/fs2/netty/SocketHandler.scala index c5228e1..eb8a099 100644 --- a/core/src/main/scala/fs2/netty/SocketHandler.scala +++ b/core/src/main/scala/fs2/netty/SocketHandler.scala @@ -107,6 +107,7 @@ private final class SocketHandler[F[_]: Async, I, O, +E]( case Right(i) => // TODO: what's the perf impact of unsafeRunSync vs unsafeRunAndForget? // FlowControlHandler & unsafeRunAndForget vs. unsafeRunSync-only? + // Review for other Netty methods as well. disp.unsafeRunSync(readsQueue.offer(i.asRight[Exception].some)) } } diff --git a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala b/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala index 9f2953f..99a5692 100644 --- a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala +++ b/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala @@ -174,6 +174,29 @@ class NettyPipelineSpec _ <- socket.isClosed.flatMap(isClosed => IO(isClosed should beTrue)) } yield ok } + + "exceptions in Netty pipeline raises an exception on the reads stream" in withResource { + dispatcher => + for { + pipeline <- NettyPipeline[IO](dispatcher) + x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing](pipeline) + (channel, socket) = x + + _ <- IO( + channel.underlying + .pipeline() + .fireExceptionCaught(new Throwable("unit test error")) + ) + + errMsg <- socket.reads + .map(_ => "") + .handleErrorWith(t => Stream.emit(t.getMessage)) + .compile + .last + } yield errMsg shouldEqual "unit test error".some + } + + // test reads, writes, events, and exceptions in combination to ensure order of events makes sense } // "byte to byte pipeline" should {} From 74c2da68dfc969e8eae9507b3bb60f42cba8deeb Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Sun, 7 Mar 2021 13:09:32 -0500 Subject: [PATCH 06/23] WIP Pipeline mutations Fix echo pipe test --- .../main/scala/fs2/netty/DeadChannel.scala | 96 ++++++++++++++ .../main/scala/fs2/netty/SocketHandler.scala | 59 ++++++--- .../scala/fs2/netty/NettyPipelineSpec.scala | 117 ++++++++++++++---- 3 files changed, 227 insertions(+), 45 deletions(-) create mode 100644 core/src/main/scala/fs2/netty/DeadChannel.scala diff --git a/core/src/main/scala/fs2/netty/DeadChannel.scala b/core/src/main/scala/fs2/netty/DeadChannel.scala new file mode 100644 index 0000000..d407fca --- /dev/null +++ b/core/src/main/scala/fs2/netty/DeadChannel.scala @@ -0,0 +1,96 @@ +package fs2.netty + +import io.netty.buffer.ByteBufAllocator +import io.netty.channel.{Channel, ChannelConfig, ChannelFuture, ChannelId, ChannelMetadata, ChannelPipeline, ChannelProgressivePromise, ChannelPromise, EventLoop} +import io.netty.util.{Attribute, AttributeKey} + +import java.net.SocketAddress + +class DeadChannel(parent: Channel) extends Channel { + override def id(): ChannelId = parent.id() + + override def eventLoop(): EventLoop = parent.eventLoop() + + override def parent(): Channel = parent + + override def config(): ChannelConfig = parent.config() + + override def isOpen: Boolean = false + + override def isRegistered: Boolean = false + + override def isActive: Boolean = false + + override def metadata(): ChannelMetadata = parent.metadata + + override def localAddress(): SocketAddress = parent.localAddress + + override def remoteAddress(): SocketAddress = parent.remoteAddress + + override def closeFuture(): ChannelFuture = parent.voidPromise() + + override def isWritable: Boolean = false + + override def bytesBeforeUnwritable(): Long = parent.bytesBeforeUnwritable + + override def bytesBeforeWritable(): Long = parent.bytesBeforeWritable + + override def unsafe(): Channel.Unsafe = parent.unsafe() + + override def pipeline(): ChannelPipeline = parent.pipeline + + override def alloc(): ByteBufAllocator = parent.alloc + + override def read(): Channel = parent.read + + override def flush(): Channel = parent.flush + + override def compareTo(o: Channel): Int = parent.compareTo(o) + + override def attr[T](key: AttributeKey[T]): Attribute[T] = parent.attr(key) + + override def hasAttr[T](key: AttributeKey[T]): Boolean = false + + override def bind(localAddress: SocketAddress): ChannelFuture = parent.voidPromise() + + override def connect(remoteAddress: SocketAddress): ChannelFuture = parent.voidPromise() + + override def connect(remoteAddress: SocketAddress, localAddress: SocketAddress): ChannelFuture = parent.voidPromise() + + override def disconnect(): ChannelFuture = parent.voidPromise() + + override def close(): ChannelFuture = parent.voidPromise() + + override def deregister(): ChannelFuture = parent.voidPromise() + + override def bind(localAddress: SocketAddress, promise: ChannelPromise): ChannelFuture = parent.voidPromise() + + override def connect(remoteAddress: SocketAddress, promise: ChannelPromise): ChannelFuture = parent.voidPromise() + + override def connect(remoteAddress: SocketAddress, localAddress: SocketAddress, promise: ChannelPromise): ChannelFuture = parent.voidPromise() + + override def disconnect(promise: ChannelPromise): ChannelFuture = parent.voidPromise() + + override def close(promise: ChannelPromise): ChannelFuture = parent.voidPromise() + + override def deregister(promise: ChannelPromise): ChannelFuture = parent.voidPromise() + + override def write(msg: Any): ChannelFuture = parent.voidPromise() + + override def write(msg: Any, promise: ChannelPromise): ChannelFuture = parent.voidPromise() + + override def writeAndFlush(msg: Any, promise: ChannelPromise): ChannelFuture = parent.voidPromise() + + override def writeAndFlush(msg: Any): ChannelFuture = parent.voidPromise() + + override def newPromise(): ChannelPromise = parent.newPromise + + override def newProgressivePromise(): ChannelProgressivePromise = parent.newProgressivePromise + + override def newSucceededFuture(): ChannelFuture = parent.voidPromise() + + override def newFailedFuture(cause: Throwable): ChannelFuture = parent.voidPromise() + + override def voidPromise(): ChannelPromise = parent.voidPromise +} + diff --git a/core/src/main/scala/fs2/netty/SocketHandler.scala b/core/src/main/scala/fs2/netty/SocketHandler.scala index eb8a099..9da206c 100644 --- a/core/src/main/scala/fs2/netty/SocketHandler.scala +++ b/core/src/main/scala/fs2/netty/SocketHandler.scala @@ -18,18 +18,19 @@ package fs2 package netty import cats.effect.std.{Dispatcher, Queue} -import cats.effect.{Async, Poll, Sync} +import cats.effect.{Async, Concurrent, Deferred, Poll, Sync} import cats.syntax.all._ import cats.{Applicative, Functor} import io.netty.buffer.ByteBuf -import io.netty.channel.{Channel, ChannelHandlerContext, ChannelInboundHandlerAdapter, ChannelPipeline} -import io.netty.util.{ReferenceCountUtil, ReferenceCounted} +import io.netty.channel._ +import io.netty.util.ReferenceCountUtil -private final class SocketHandler[F[_]: Async, I, O, +E]( +private final class SocketHandler[F[_]: Async: Concurrent, I, O, +E]( disp: Dispatcher[F], - channel: Channel, + private var channel: Channel, readsQueue: Queue[F, Option[Either[Throwable, I]]], - eventsQueue: Queue[F, E] + eventsQueue: Queue[F, E], + pipelineMutationSwitch: Deferred[F, Unit] )(implicit inboundDecoder: Socket.Decoder[I]) extends ChannelInboundHandlerAdapter with Socket[F, I, O, E] { @@ -63,6 +64,7 @@ private final class SocketHandler[F[_]: Async, I, O, +E]( ) } .unNoneTerminate + .interruptWhen(pipelineMutationSwitch.get.attempt) override lazy val reads: Stream[F, I] = Stream force { @@ -74,9 +76,16 @@ private final class SocketHandler[F[_]: Async, I, O, +E]( ) } + override lazy val events: Stream[F, E] = + Stream + .fromQueueUnterminated(eventsQueue) + .interruptWhen(pipelineMutationSwitch.get.attempt) + override def write(output: O): F[Unit] = fromNettyFuture[F]( - Sync[F].delay(channel.writeAndFlush(output)) + /* Sync[F].delay(println(debug(output))) *> */ Sync[F].delay( + channel.writeAndFlush(output) + ) ).void override val writes: Pipe[F, O, INothing] = @@ -141,29 +150,43 @@ private final class SocketHandler[F[_]: Async, I, O, +E]( case _ => () // TODO: probably raise error on stream... } - override lazy val events: Stream[F, E] = - Stream.fromQueueUnterminated(eventsQueue) - override def mutatePipeline[I2: Socket.Decoder, O2, E2]( mutator: ChannelPipeline => F[Unit] ): F[Socket[F, I2, O2, E2]] = - Sync[F] - .suspend(Sync.Type.Delay)(mutator(channel.pipeline())) - .flatMap(_ => SocketHandler[F, I2, O2, E2](disp, channel)) - .map( - identity - ) // TODO: why cannot compiler infer TcpSocketHandler in flatMap? + for { + _ <- pipelineMutationSwitch.complete( + () + ) // shutdown the events and reads streams + oldChannel = channel // Save reference, as we first stop socket processing + _ <- Sync[F].delay{ + channel = new DeadChannel(channel) + } // shutdown writes + // TODO: what if queues have elements in them? E.g. Netty is concurrently calling channel read. Protocols should disallow this for the most part. + _ <- Sync[F].delay(oldChannel.pipeline().removeLast()) + _ <- mutator(oldChannel.pipeline()) + sh <- SocketHandler[F, I2, O2, E2](disp, oldChannel) + _ <- Sync[F].delay(oldChannel.pipeline().addLast(sh)) // TODO: I feel like we should pass a name for debugging purposes...?! + } yield sh + + // not to self: if we want to schedule an action to be done when channel is closed, can also do `ctx.channel.closeFuture.addListener` } private object SocketHandler { - def apply[F[_]: Async, I: Socket.Decoder, O, E]( + def apply[F[_]: Async: Concurrent, I: Socket.Decoder, O, E]( disp: Dispatcher[F], channel: Channel ): F[SocketHandler[F, I, O, E]] = for { readsQueue <- Queue.unbounded[F, Option[Either[Throwable, I]]] eventsQueue <- Queue.unbounded[F, E] - } yield new SocketHandler(disp, channel, readsQueue, eventsQueue) + pipelineMutationSwitch <- Deferred[F, Unit] + } yield new SocketHandler( + disp, + channel, + readsQueue, + eventsQueue, + pipelineMutationSwitch + ) } diff --git a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala b/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala index 99a5692..c2f7f57 100644 --- a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala +++ b/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala @@ -8,7 +8,7 @@ import cats.syntax.all._ import fs2.netty.embedded.Fs2NettyEmbeddedChannel import fs2.netty.embedded.Fs2NettyEmbeddedChannel.CommonEncoders._ import fs2.netty.embedded.Fs2NettyEmbeddedChannel.Encoder -import io.netty.buffer.ByteBuf +import io.netty.buffer.{ByteBuf, Unpooled} import org.specs2.mutable.SpecificationLike import scala.concurrent.duration._ @@ -109,35 +109,44 @@ class NettyPipelineSpec IO(channel.underlying.readOutbound[ByteBuf]()) .flatMap(bb => IO(bb.readByte())) } - .map(_.foldMap(byteToString)) + .map(_.toArray) + .map(new String(_)) _ <- IO(str shouldEqual "hello world") } yield ok } -// "piping any reads to writes just echos back ByteBuf's written onto Netty's channel" in withResource { -// dispatcher => -// for { -// pipeline <- NettyPipeline[IO](dispatcher) -// x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing](pipeline) -// (channel, socket) = x -// -// encoder = implicitly[Encoder[Byte]] -// byteBufs = "hello world".getBytes().map(encoder.encode).toList -// -// _ <- socket.reads.through(socket.writes).take(11).compile.drain -// -// str <- (0 until 11).toList -// .traverse { _ => -// IO(channel.underlying.readOutbound[ByteBuf]()) -// .flatMap(bb => IO(bb.readByte())) -// } -// .map(_.toArray) -// .map(new String(_)) -// -// _ <- IO(str shouldEqual "hello world") -// } yield ok -// } + "piping any reads to writes just echos back ByteBuf's written onto Netty's channel" in withResource { + dispatcher => + for { + pipeline <- NettyPipeline[IO](dispatcher) + x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing](pipeline) + (channel, socket) = x + + encoder = implicitly[Encoder[Byte]] + byteBufs = "hello world".getBytes().map(encoder.encode).toList + + _ <- channel + .writeAllInboundThenFlushThenRunAllPendingTasks(byteBufs: _*) + _ <- socket.reads + // fs2-netty automatically releases + .evalMap(bb => IO(bb.retain())) + .take(11) + .through(socket.writes) + .compile + .drain + + str <- (0 until 11).toList + .traverse { _ => + IO(channel.underlying.readOutbound[ByteBuf]()) + .flatMap(bb => IO(bb.readByte())) + } + .map(_.toArray) + .map(new String(_)) + + _ <- IO(str shouldEqual "hello world") + } yield ok + } "closed connection in Netty appears as closed streams in fs2-netty" in withResource { dispatcher => @@ -196,6 +205,62 @@ class NettyPipelineSpec } yield errMsg shouldEqual "unit test error".some } + "mutations" should { + "no-op mutation creates a Socket with same behavior as original, while original Socket is unregistered from pipeline and channel" in withResource { + dispatcher => + for { + // Given a channel and socket for the default pipeline + pipeline <- NettyPipeline[IO](dispatcher) + x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing]( + pipeline + ) + (channel, socket) = x + + // When performing a no-op socket pipeline mutation + newSocket <- socket.mutatePipeline[ByteBuf, ByteBuf, Nothing](_ => + IO.unit + ) + + // Then new socket should be able to receive and write ByteBuf's + encoder = implicitly[Encoder[Byte]] + byteBufs = "hello world".getBytes().map(encoder.encode).toList + _ <- channel + .writeAllInboundThenFlushThenRunAllPendingTasks(byteBufs: _*) + _ <- newSocket.reads + // fs2-netty automatically releases + .evalMap(bb => IO(bb.retain())) + .take(11) + .through(newSocket.writes) + .compile + .drain + + str <- (0 until 11).toList + .traverse { _ => + IO(channel.underlying.readOutbound[ByteBuf]()) + .flatMap(bb => IO(bb.readByte())) + } + .map(_.toArray) + .map(new String(_)) + + _ <- IO(str shouldEqual "hello world") + + // And old socket should not receive any of the ByteBuf's + oldSocketReads <- socket.reads + .interruptAfter(1.second) + .compile + .toList + _ <- IO(oldSocketReads should beEmpty) + + // Nor should old socket be able to write. + oldSocketWrite <- socket.write(Unpooled.EMPTY_BUFFER).attempt + _ <- IO(oldSocketWrite.isLeft should beTrue) + _ <- IO(channel.underlying.outboundMessages().isEmpty should beTrue) + } yield ok + } + + // varies I/O types and along with adding a handler that changes byteBufs to constant strings, affects reads stream and socket writes + } + // test reads, writes, events, and exceptions in combination to ensure order of events makes sense } @@ -227,8 +292,6 @@ class NettyPipelineSpec // "chunking..." in { ok } -// "pipeline mutation" in { ok } - private def byteToString(byte: Byte): String = { val bytes = new Array[Byte](1) bytes(0) = byte From 55600f59b8549216191cf0e4ff33e1df9bd3195c Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Sun, 7 Mar 2021 14:17:06 -0500 Subject: [PATCH 07/23] Clean up --- .../{DeadChannel.scala => NoopChannel.scala} | 82 ++++++++++++++----- core/src/main/scala/fs2/netty/Socket.scala | 1 + .../main/scala/fs2/netty/SocketHandler.scala | 13 ++- .../fs2/netty/incudator/http/WebSocket.scala | 2 + .../scala/fs2/netty/NettyPipelineSpec.scala | 29 ++++--- 5 files changed, 90 insertions(+), 37 deletions(-) rename core/src/main/scala/fs2/netty/{DeadChannel.scala => NoopChannel.scala} (50%) diff --git a/core/src/main/scala/fs2/netty/DeadChannel.scala b/core/src/main/scala/fs2/netty/NoopChannel.scala similarity index 50% rename from core/src/main/scala/fs2/netty/DeadChannel.scala rename to core/src/main/scala/fs2/netty/NoopChannel.scala index d407fca..638fb35 100644 --- a/core/src/main/scala/fs2/netty/DeadChannel.scala +++ b/core/src/main/scala/fs2/netty/NoopChannel.scala @@ -1,12 +1,18 @@ package fs2.netty import io.netty.buffer.ByteBufAllocator -import io.netty.channel.{Channel, ChannelConfig, ChannelFuture, ChannelId, ChannelMetadata, ChannelPipeline, ChannelProgressivePromise, ChannelPromise, EventLoop} +import io.netty.channel._ import io.netty.util.{Attribute, AttributeKey} import java.net.SocketAddress -class DeadChannel(parent: Channel) extends Channel { +/** + * Void Channel for SocketHandler to prevent writes and further channel effects. + * Reading state of parent channel is still allowed as it is safe, i.e. no side-effects. + * @param parent Channel to reference for reading state + */ +class NoopChannel(parent: Channel) extends Channel { + override def id(): ChannelId = parent.id() override def eventLoop(): EventLoop = parent.eventLoop() @@ -15,11 +21,11 @@ class DeadChannel(parent: Channel) extends Channel { override def config(): ChannelConfig = parent.config() - override def isOpen: Boolean = false + override def isOpen: Boolean = parent.isOpen - override def isRegistered: Boolean = false + override def isRegistered: Boolean = parent.isRegistered - override def isActive: Boolean = false + override def isActive: Boolean = parent.isActive override def metadata(): ChannelMetadata = parent.metadata @@ -49,13 +55,18 @@ class DeadChannel(parent: Channel) extends Channel { override def attr[T](key: AttributeKey[T]): Attribute[T] = parent.attr(key) - override def hasAttr[T](key: AttributeKey[T]): Boolean = false + override def hasAttr[T](key: AttributeKey[T]): Boolean = parent.hasAttr(key) - override def bind(localAddress: SocketAddress): ChannelFuture = parent.voidPromise() + override def bind(localAddress: SocketAddress): ChannelFuture = + parent.voidPromise() - override def connect(remoteAddress: SocketAddress): ChannelFuture = parent.voidPromise() + override def connect(remoteAddress: SocketAddress): ChannelFuture = + parent.voidPromise() - override def connect(remoteAddress: SocketAddress, localAddress: SocketAddress): ChannelFuture = parent.voidPromise() + override def connect( + remoteAddress: SocketAddress, + localAddress: SocketAddress + ): ChannelFuture = parent.voidPromise() override def disconnect(): ChannelFuture = parent.voidPromise() @@ -63,34 +74,61 @@ class DeadChannel(parent: Channel) extends Channel { override def deregister(): ChannelFuture = parent.voidPromise() - override def bind(localAddress: SocketAddress, promise: ChannelPromise): ChannelFuture = parent.voidPromise() + override def bind( + localAddress: SocketAddress, + promise: ChannelPromise + ): ChannelFuture = parent.voidPromise() + + override def connect( + remoteAddress: SocketAddress, + promise: ChannelPromise + ): ChannelFuture = parent.voidPromise() - override def connect(remoteAddress: SocketAddress, promise: ChannelPromise): ChannelFuture = parent.voidPromise() + override def connect( + remoteAddress: SocketAddress, + localAddress: SocketAddress, + promise: ChannelPromise + ): ChannelFuture = parent.voidPromise() - override def connect(remoteAddress: SocketAddress, localAddress: SocketAddress, promise: ChannelPromise): ChannelFuture = parent.voidPromise() + override def disconnect(promise: ChannelPromise): ChannelFuture = + parent.voidPromise() - override def disconnect(promise: ChannelPromise): ChannelFuture = parent.voidPromise() + override def close(promise: ChannelPromise): ChannelFuture = + parent.voidPromise() - override def close(promise: ChannelPromise): ChannelFuture = parent.voidPromise() + override def deregister(promise: ChannelPromise): ChannelFuture = + parent.voidPromise() - override def deregister(promise: ChannelPromise): ChannelFuture = parent.voidPromise() + /* + Below are the key methods we want to overwrite to stop writes + */ - override def write(msg: Any): ChannelFuture = parent.voidPromise() + override def write(msg: Any): ChannelFuture = + parent.newPromise().setFailure(new NoopChannel.NoopFailure) - override def write(msg: Any, promise: ChannelPromise): ChannelFuture = parent.voidPromise() + override def write(msg: Any, promise: ChannelPromise): ChannelFuture = + parent.newPromise().setFailure(new NoopChannel.NoopFailure) - override def writeAndFlush(msg: Any, promise: ChannelPromise): ChannelFuture = parent.voidPromise() + override def writeAndFlush(msg: Any, promise: ChannelPromise): ChannelFuture = + parent.newPromise().setFailure(new NoopChannel.NoopFailure) - override def writeAndFlush(msg: Any): ChannelFuture = parent.voidPromise() + override def writeAndFlush(msg: Any): ChannelFuture = + parent.newPromise().setFailure(new NoopChannel.NoopFailure) override def newPromise(): ChannelPromise = parent.newPromise - override def newProgressivePromise(): ChannelProgressivePromise = parent.newProgressivePromise + override def newProgressivePromise(): ChannelProgressivePromise = + parent.newProgressivePromise - override def newSucceededFuture(): ChannelFuture = parent.voidPromise() + override def newSucceededFuture(): ChannelFuture = + parent.newPromise().setFailure(new NoopChannel.NoopFailure) - override def newFailedFuture(cause: Throwable): ChannelFuture = parent.voidPromise() + override def newFailedFuture(cause: Throwable): ChannelFuture = + parent.newPromise().setFailure(new NoopChannel.NoopFailure) override def voidPromise(): ChannelPromise = parent.voidPromise } +object NoopChannel { + private class NoopFailure extends Throwable("Noop channel") +} diff --git a/core/src/main/scala/fs2/netty/Socket.scala b/core/src/main/scala/fs2/netty/Socket.scala index f554d3f..eb6bc05 100644 --- a/core/src/main/scala/fs2/netty/Socket.scala +++ b/core/src/main/scala/fs2/netty/Socket.scala @@ -44,6 +44,7 @@ trait Socket[F[_], I, O, +E] { def isOpen: F[Boolean] def isClosed: F[Boolean] + def isDetached: F[Boolean] def close(): F[Unit] diff --git a/core/src/main/scala/fs2/netty/SocketHandler.scala b/core/src/main/scala/fs2/netty/SocketHandler.scala index 9da206c..73c44cf 100644 --- a/core/src/main/scala/fs2/netty/SocketHandler.scala +++ b/core/src/main/scala/fs2/netty/SocketHandler.scala @@ -94,7 +94,10 @@ private final class SocketHandler[F[_]: Async: Concurrent, I, O, +E]( override val isOpen: F[Boolean] = Sync[F].delay(channel.isOpen) - override def isClosed: F[Boolean] = isOpen.map(bool => !bool) + override val isClosed: F[Boolean] = isOpen.map(bool => !bool) + + override val isDetached: F[Boolean] = + Sync[F].delay(channel.isInstanceOf[NoopChannel]) override def close(): F[Unit] = fromNettyFuture[F]( Sync[F].delay(channel.close()) @@ -158,14 +161,16 @@ private final class SocketHandler[F[_]: Async: Concurrent, I, O, +E]( () ) // shutdown the events and reads streams oldChannel = channel // Save reference, as we first stop socket processing - _ <- Sync[F].delay{ - channel = new DeadChannel(channel) + _ <- Sync[F].delay { + channel = new NoopChannel(channel) } // shutdown writes // TODO: what if queues have elements in them? E.g. Netty is concurrently calling channel read. Protocols should disallow this for the most part. _ <- Sync[F].delay(oldChannel.pipeline().removeLast()) _ <- mutator(oldChannel.pipeline()) sh <- SocketHandler[F, I2, O2, E2](disp, oldChannel) - _ <- Sync[F].delay(oldChannel.pipeline().addLast(sh)) // TODO: I feel like we should pass a name for debugging purposes...?! + _ <- Sync[F].delay( + oldChannel.pipeline().addLast(sh) + ) // TODO: I feel like we should pass a name for debugging purposes...?! } yield sh // not to self: if we want to schedule an action to be done when channel is closed, can also do `ctx.channel.closeFuture.addListener` diff --git a/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala b/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala index 31da408..5152c73 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala @@ -48,6 +48,8 @@ class WebSocket[F[_], U]( override def isClosed: F[Boolean] = underlying.isClosed + override def isDetached: F[Boolean] = underlying.isDetached + override def close(): F[Unit] = underlying.close() override def mutatePipeline[I2: Socket.Decoder, O2, E2]( diff --git a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala b/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala index c2f7f57..55b3f99 100644 --- a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala +++ b/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala @@ -27,8 +27,7 @@ class NettyPipelineSpec pipeline <- NettyPipeline[IO](dispatcher) socket <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing]( pipeline - ) - .map(_._2) + ).map(_._2) reads <- socket.reads .interruptAfter(1.second) @@ -43,8 +42,7 @@ class NettyPipelineSpec pipeline <- NettyPipeline[IO](dispatcher) socket <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing]( pipeline - ) - .map(_._2) + ).map(_._2) events: List[Nothing] <- socket.events .interruptAfter(1.second) @@ -129,7 +127,7 @@ class NettyPipelineSpec _ <- channel .writeAllInboundThenFlushThenRunAllPendingTasks(byteBufs: _*) _ <- socket.reads - // fs2-netty automatically releases + // fs2-netty automatically releases .evalMap(bb => IO(bb.retain())) .take(11) .through(socket.writes) @@ -216,10 +214,12 @@ class NettyPipelineSpec ) (channel, socket) = x + // Then socket is attached to a pipeline + _ <- socket.isDetached.map(_ should beFalse) + // When performing a no-op socket pipeline mutation newSocket <- socket.mutatePipeline[ByteBuf, ByteBuf, Nothing](_ => - IO.unit - ) + IO.unit) // Then new socket should be able to receive and write ByteBuf's encoder = implicitly[Encoder[Byte]] @@ -227,13 +227,12 @@ class NettyPipelineSpec _ <- channel .writeAllInboundThenFlushThenRunAllPendingTasks(byteBufs: _*) _ <- newSocket.reads - // fs2-netty automatically releases + // fs2-netty automatically releases .evalMap(bb => IO(bb.retain())) .take(11) .through(newSocket.writes) .compile .drain - str <- (0 until 11).toList .traverse { _ => IO(channel.underlying.readOutbound[ByteBuf]()) @@ -241,9 +240,14 @@ class NettyPipelineSpec } .map(_.toArray) .map(new String(_)) - _ <- IO(str shouldEqual "hello world") + // And new socket is attached to a pipeline + _ <- newSocket.isDetached.map(_ should beFalse) + + // And old socket is no longer attached to a pipeline + _ <- socket.isDetached.map(_ should beTrue) + // And old socket should not receive any of the ByteBuf's oldSocketReads <- socket.reads .interruptAfter(1.second) @@ -253,7 +257,10 @@ class NettyPipelineSpec // Nor should old socket be able to write. oldSocketWrite <- socket.write(Unpooled.EMPTY_BUFFER).attempt - _ <- IO(oldSocketWrite.isLeft should beTrue) + _ <- IO(oldSocketWrite should beLeft[Throwable].like { + case t => + t.getMessage should_=== ("Noop channel") + }) _ <- IO(channel.underlying.outboundMessages().isEmpty should beTrue) } yield ok } From 843a2f53190512744ba5bb2e833cb3f665abba3c Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Sun, 7 Mar 2021 22:40:02 -0500 Subject: [PATCH 08/23] Test pipeline mutations with different handlers and types --- .../scala/fs2/netty/NettyPipelineSpec.scala | 55 +++++++++++++++++-- 1 file changed, 49 insertions(+), 6 deletions(-) diff --git a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala b/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala index 55b3f99..5cfbfe2 100644 --- a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala +++ b/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala @@ -9,6 +9,7 @@ import fs2.netty.embedded.Fs2NettyEmbeddedChannel import fs2.netty.embedded.Fs2NettyEmbeddedChannel.CommonEncoders._ import fs2.netty.embedded.Fs2NettyEmbeddedChannel.Encoder import io.netty.buffer.{ByteBuf, Unpooled} +import io.netty.handler.codec.bytes.{ByteArrayDecoder, ByteArrayEncoder} import org.specs2.mutable.SpecificationLike import scala.concurrent.duration._ @@ -127,7 +128,7 @@ class NettyPipelineSpec _ <- channel .writeAllInboundThenFlushThenRunAllPendingTasks(byteBufs: _*) _ <- socket.reads - // fs2-netty automatically releases + // fs2-netty automatically releases .evalMap(bb => IO(bb.retain())) .take(11) .through(socket.writes) @@ -219,7 +220,8 @@ class NettyPipelineSpec // When performing a no-op socket pipeline mutation newSocket <- socket.mutatePipeline[ByteBuf, ByteBuf, Nothing](_ => - IO.unit) + IO.unit + ) // Then new socket should be able to receive and write ByteBuf's encoder = implicitly[Encoder[Byte]] @@ -227,7 +229,7 @@ class NettyPipelineSpec _ <- channel .writeAllInboundThenFlushThenRunAllPendingTasks(byteBufs: _*) _ <- newSocket.reads - // fs2-netty automatically releases + // fs2-netty automatically releases .evalMap(bb => IO(bb.retain())) .take(11) .through(newSocket.writes) @@ -257,15 +259,56 @@ class NettyPipelineSpec // Nor should old socket be able to write. oldSocketWrite <- socket.write(Unpooled.EMPTY_BUFFER).attempt - _ <- IO(oldSocketWrite should beLeft[Throwable].like { - case t => - t.getMessage should_=== ("Noop channel") + _ <- IO(oldSocketWrite should beLeft[Throwable].like { case t => + t.getMessage should_=== ("Noop channel") }) _ <- IO(channel.underlying.outboundMessages().isEmpty should beTrue) } yield ok } // varies I/O types and along with adding a handler that changes byteBufs to constant strings, affects reads stream and socket writes + "vary the Socket types" in withResource { dispatcher => + for { + // Given a channel and socket for the default pipeline + pipeline <- NettyPipeline[IO](dispatcher) + x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing]( + pipeline + ) + (channel, socket) = x + + pipelineDecoder = new Socket.Decoder[Array[Byte]] { + override def decode(x: AnyRef): Either[String, Array[Byte]] = + x match { + case array: Array[Byte] => array.asRight[String] + case _ => + "whoops, pipeline is misconfigured".asLeft[Array[Byte]] + } + } + byteSocket <- socket + .mutatePipeline[Array[Byte], Array[Byte], Nothing] { pipeline => + for { + _ <- IO(pipeline.addLast(new ByteArrayDecoder)) + _ <- IO(pipeline.addLast(new ByteArrayEncoder)) + } yield () + }(pipelineDecoder) + + byteBuf = implicitly[Encoder[Array[Byte]]] + .encode("hello world".getBytes()) + _ <- channel + .writeAllInboundThenFlushThenRunAllPendingTasks(byteBuf) + _ <- byteSocket.reads + .take(1) + .through(byteSocket.writes) + .compile + .drain + str <- IO(channel.underlying.readOutbound[ByteBuf]()) + .flatTap(bb => IO(bb.readableBytes() shouldEqual 11)) + .tupleRight(new Array[Byte](11)) + .flatMap { case (buf, bytes) => IO(buf.readBytes(bytes)).as(bytes) } + .map(new String(_)) + _ <- IO(str shouldEqual "hello world") + } yield ok + } } // test reads, writes, events, and exceptions in combination to ensure order of events makes sense From 169e8e5a4c639d67ca74823bfa51aded5a917e18 Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Sun, 7 Mar 2021 23:19:04 -0500 Subject: [PATCH 09/23] Clean up spec --- .../scala/fs2/netty/NettyPipelineSpec.scala | 30 +++---------------- 1 file changed, 4 insertions(+), 26 deletions(-) diff --git a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala b/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala index 5cfbfe2..3d7adce 100644 --- a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala +++ b/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala @@ -311,35 +311,13 @@ class NettyPipelineSpec } } + // pipeline mutation error + + // socket decode error + // test reads, writes, events, and exceptions in combination to ensure order of events makes sense } -// "byte to byte pipeline" should {} - -// "custom pipeline" should { -// repeat tests from above -// "pipelines that decode ByteBuf into I then fires channelRead, shows up in read stream" -// "pipelines that DO NOT decode ByteBuf into I but fire channelRead, DO NOT show up in read stream" -// "pipelines that encode O into ByteBuf (and go on to write to Netty), will send ByteBuf" -// "pipelines that encode O into ByteBuf (and DO NOT go on to write to Netty) (idk what kind of case this is), will NOT send ByteBuf" -// "pipelines that DO NOT encode O into ByteBuf, will NOT send ByteBuf" -// "pipelines that emit user triggered events of type E will show up in events stream" -// "pipelines that emit user triggered events of NOT type E will raise error in events stream" -// "connections that close, will shut off reads stream, writes will fail/cancel" -// "handlerAdded test???" -// "socket closes???" -// "pipelines that emit exceptions will raise error on reads stream" -// "pipelines that remove SocketHandler, will raise error on reads and events streams. Except in case of mutation" - -// "test I1->Bytes, then Bytes->I2 (pipeline), and I1 == I2...nvm ByteBuf can be thrown away, decode may be" + -// "non-deterministic by design. Too many assumptions. Maybe can provide tests for specific cases that require correctness property, but not all" in { -// ok -// } -// } - -// "there can be a pipeline that only sends, I = Nothing" in { ok } -// "there can be a pipeline that only receives, O = Nothing" in { ok } - // "chunking..." in { ok } private def byteToString(byte: Byte): String = { From 4486a97fb09d5a75d66cdba966cff219a34107e1 Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Mon, 8 Mar 2021 19:32:45 -0500 Subject: [PATCH 10/23] Create a BytePipeline --- .../fs2/netty/NettyChannelInitializer.scala | 16 ++++ .../main/scala/fs2/netty/NettyPipeline.scala | 10 +-- .../embedded/Fs2NettyEmbeddedChannel.scala | 21 +++-- .../fs2/netty/pipeline/BytePipeline.scala | 84 +++++++++++++++++++ .../scala/fs2/netty/NettyPipelineSpec.scala | 14 ++-- .../fs2/netty/pipeline/BytePipelineSpec.scala | 43 ++++++++++ 6 files changed, 163 insertions(+), 25 deletions(-) create mode 100644 core/src/main/scala/fs2/netty/NettyChannelInitializer.scala create mode 100644 core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala create mode 100644 core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala diff --git a/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala b/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala new file mode 100644 index 0000000..56415f8 --- /dev/null +++ b/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala @@ -0,0 +1,16 @@ +package fs2.netty + +import io.netty.channel.{Channel, ChannelInitializer} +import io.netty.channel.socket.SocketChannel + +trait NettyChannelInitializer[F[_], I, O, E] { + + def toSocketChannelInitializer( + cb: Socket[F, I, O, E] => F[Unit] + ): F[ChannelInitializer[SocketChannel]] = + toChannelInitializer[SocketChannel](cb) + + def toChannelInitializer[C <: Channel]( + cb: Socket[F, I, O, E] => F[Unit] + ): F[ChannelInitializer[C]] +} diff --git a/core/src/main/scala/fs2/netty/NettyPipeline.scala b/core/src/main/scala/fs2/netty/NettyPipeline.scala index 92240c3..c156ac7 100644 --- a/core/src/main/scala/fs2/netty/NettyPipeline.scala +++ b/core/src/main/scala/fs2/netty/NettyPipeline.scala @@ -20,7 +20,6 @@ import cats.effect.std.Dispatcher import cats.effect.{Async, Sync} import cats.syntax.all._ import io.netty.buffer.ByteBuf -import io.netty.channel.socket.SocketChannel import io.netty.channel.{Channel, ChannelHandler, ChannelInitializer} // TODO: account for Sharable annotation, some of these need to be an eval, and evaluated each time whereas others can be eagerly evaluated. @@ -28,16 +27,11 @@ class NettyPipeline[F[_]: Async, I: Socket.Decoder, O, E]( handlers: List[ChannelHandler] )( dispatcher: Dispatcher[F] -) { +) extends NettyChannelInitializer[F, I, O, E] { // TODO: there are other interesting type of channels // TODO: Remember ChannelInitializer is Sharable! - def toSocketChannelInitializer( - cb: Socket[F, I, O, E] => F[Unit] - ): F[ChannelInitializer[SocketChannel]] = - toChannelInitializer[SocketChannel](cb) - - def toChannelInitializer[C <: Channel]( + override def toChannelInitializer[C <: Channel]( cb: Socket[F, I, O, E] => F[Unit] ): F[ChannelInitializer[C]] = Sync[F].delay { (ch: C) => { diff --git a/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala b/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala index 3e9b693..cf06357 100644 --- a/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala +++ b/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala @@ -20,7 +20,7 @@ package netty.embedded import cats.effect.{Async, Sync} import cats.implicits._ import fs2.netty.embedded.Fs2NettyEmbeddedChannel.Encoder -import fs2.netty.{NettyPipeline, Socket} +import fs2.netty.{NettyChannelInitializer, Socket} import io.netty.buffer.{ByteBuf, Unpooled} import io.netty.channel.embedded.EmbeddedChannel @@ -31,7 +31,7 @@ import io.netty.channel.embedded.EmbeddedChannel * @param F * @tparam F */ -final case class Fs2NettyEmbeddedChannel[F[_]]( +final case class Fs2NettyEmbeddedChannel[F[_]] private ( underlying: EmbeddedChannelWithAutoRead )(implicit F: Sync[F] @@ -50,12 +50,11 @@ final case class Fs2NettyEmbeddedChannel[F[_]]( } yield () /** - * - * @param a - * @param encoder - * @tparam A - * @return `true` if the write operation did add something to the inbound buffer - */ + * @param a + * @param encoder + * @tparam A + * @return `true` if the write operation did add something to the inbound buffer + */ def writeAllInboundThenFlushThenRunAllPendingTasks[A](a: A*)(implicit encoder: Encoder[A] ): F[Boolean] = for { @@ -77,14 +76,14 @@ final case class Fs2NettyEmbeddedChannel[F[_]]( object Fs2NettyEmbeddedChannel { def apply[F[_], I, O, E]( - nettyPipeline: NettyPipeline[F, I, O, E] + initializer: NettyChannelInitializer[F, I, O, E] )(implicit F: Async[F]): F[(Fs2NettyEmbeddedChannel[F], Socket[F, I, O, E])] = for { channel <- F.delay( new EmbeddedChannelWithAutoRead() ) // With FlowControl/Dispatcher fixes, EmbeddedChannelWithAutoRead might not be needed after all. socket <- F.async[Socket[F, I, O, E]] { cb => - nettyPipeline + initializer .toChannelInitializer[EmbeddedChannel] { socket => F.delay(cb(socket.asRight[Throwable])) } @@ -109,7 +108,7 @@ object Fs2NettyEmbeddedChannel { Unpooled.wrappedBuffer(a) implicit val byteEncoder: Encoder[Byte] = (a: Byte) => - Unpooled.buffer(1, 1).writeByte(a) + Unpooled.buffer(1, 1).writeByte(a.toInt) implicit val stringEncoder: Encoder[String] = (str: String) => byteArrayEncoder.encode(str.getBytes) diff --git a/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala b/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala new file mode 100644 index 0000000..2499f65 --- /dev/null +++ b/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala @@ -0,0 +1,84 @@ +package fs2 +package netty.pipeline + +import cats.effect.std.Dispatcher +import cats.effect.{Async, Sync} +import cats.syntax.all._ +import fs2.netty.pipeline.BytePipeline._ +import fs2.netty.{NettyChannelInitializer, NettyPipeline, Socket} +import fs2.{Chunk, INothing, Pipe} +import io.netty.buffer.{ByteBuf, Unpooled} +import io.netty.channel.{Channel, ChannelInitializer, ChannelPipeline} +import io.netty.handler.codec.bytes.ByteArrayDecoder + +class BytePipeline[F[_]: Async](dispatcher: Dispatcher[F]) + extends NettyChannelInitializer[F, Byte, Chunk[Byte], Nothing] { + + private val byteArrayPipeline = + new NettyPipeline[F, Array[Byte], ByteBuf, Nothing]( + handlers = List( + new ByteArrayDecoder + ) // TODO: eval, not sharable. Test with 2 channels using same NettyPipeline class. + )(dispatcher) + + override def toChannelInitializer[C <: Channel]( + cb: Socket[F, Byte, Chunk[Byte], Nothing] => F[Unit] + ): F[ChannelInitializer[C]] = + byteArrayPipeline + .toChannelInitializer { byteArraySocket => + Sync[F].delay(new ChunkingByteSocket[F](byteArraySocket)).flatMap(cb) + } +} + +object BytePipeline { + + def apply[F[_]: Async](dispatcher: Dispatcher[F]): F[BytePipeline[F]] = + Sync[F].delay(new BytePipeline(dispatcher)) + + implicit val byteArraySocketDecoder: Socket.Decoder[Array[Byte]] = { + case array: Array[Byte] => array.asRight[String] + case _ => + "pipeline is misconfigured".asLeft[Array[Byte]] + } + + private class ChunkingByteSocket[F[_]: Async]( + socket: Socket[F, Array[Byte], ByteBuf, Nothing] + ) extends Socket[F, Byte, Chunk[Byte], Nothing] { + + override lazy val reads: fs2.Stream[F, Byte] = + socket.reads.map(Chunk.array(_)).flatMap(Stream.chunk) + + override lazy val events: fs2.Stream[F, Nothing] = socket.events + + override def write(output: Chunk[Byte]): F[Unit] = + socket.write(toByteBuf(output)) + + override lazy val writes: Pipe[F, Chunk[Byte], INothing] = + _.map(toByteBuf).through(socket.writes) + + override val isOpen: F[Boolean] = socket.isOpen + + override val isClosed: F[Boolean] = socket.isClosed + + override val isDetached: F[Boolean] = socket.isDetached + + override def close(): F[Unit] = socket.close() + + override def mutatePipeline[I2: Socket.Decoder, O2, E2]( + mutator: ChannelPipeline => F[Unit] + ): F[Socket[F, I2, O2, E2]] = socket.mutatePipeline[I2, O2, E2](mutator) + + // TODO: alloc over unpooled? + private[this] def toByteBuf(chunk: Chunk[Byte]): ByteBuf = + chunk match { + case Chunk.ArraySlice(arr, off, len) => + Unpooled.wrappedBuffer(arr, off, len) + + case c: Chunk.ByteBuffer => + Unpooled.wrappedBuffer(c.toByteBuffer) + + case c => + Unpooled.wrappedBuffer(c.toArray) + } + } +} diff --git a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala b/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala index 3d7adce..673d825 100644 --- a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala +++ b/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala @@ -301,6 +301,7 @@ class NettyPipelineSpec .through(byteSocket.writes) .compile .drain + str <- IO(channel.underlying.readOutbound[ByteBuf]()) .flatTap(bb => IO(bb.readableBytes() shouldEqual 11)) .tupleRight(new Array[Byte](11)) @@ -309,16 +310,17 @@ class NettyPipelineSpec _ <- IO(str shouldEqual "hello world") } yield ok } - } - // pipeline mutation error + // pipeline mutation error - // socket decode error + // socket decode error - // test reads, writes, events, and exceptions in combination to ensure order of events makes sense - } + // test reads, writes, events, and exceptions in combination to ensure order of events makes sense + } -// "chunking..." in { ok } + // eval handlers + // test pipeline with ByteArrayEncoder/Decoder passed into pipeline, not mutation + } private def byteToString(byte: Byte): String = { val bytes = new Array[Byte](1) diff --git a/core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala b/core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala new file mode 100644 index 0000000..ef930f7 --- /dev/null +++ b/core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala @@ -0,0 +1,43 @@ +package fs2.netty.pipeline + +import cats.effect.std.Dispatcher +import cats.effect.testing.specs2.CatsResource +import cats.effect.{IO, Resource} +import cats.syntax.all._ +import fs2.Chunk +import fs2.netty.embedded.Fs2NettyEmbeddedChannel +import fs2.netty.embedded.Fs2NettyEmbeddedChannel.CommonEncoders._ +import io.netty.buffer.ByteBuf +import org.specs2.mutable.SpecificationLike + +class BytePipelineSpec + extends CatsResource[IO, Dispatcher[IO]] + with SpecificationLike { + + override val resource: Resource[IO, Dispatcher[IO]] = Dispatcher[IO] + + "can echo back what is written" in withResource { dispatcher => + for { + pipeline <- BytePipeline(dispatcher) + x <- Fs2NettyEmbeddedChannel[IO, Byte, Chunk[Byte], Nothing](pipeline) + (channel, socket) = x + + _ <- channel.writeAllInboundThenFlushThenRunAllPendingTasks("hello world") + _ <- socket.reads + .take(5) + .chunks + .through(socket.writes) + .compile + .drain + + str <- IO(channel.underlying.readOutbound[ByteBuf]()) + .flatTap(bb => IO(bb.readableBytes() shouldEqual 5)) + .tupleRight(new Array[Byte](5)) + .flatMap { case (buf, bytes) => IO(buf.readBytes(bytes)).as(bytes) } + .map(new String(_)) + + _ <- IO(str shouldEqual "hello") + } yield ok + } + +} From 5cfc196777fcecbba1f8fef32d7d37f2635a28b6 Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Mon, 8 Mar 2021 19:46:04 -0500 Subject: [PATCH 11/23] Create alternative byte pipeline --- .../pipeline/AlternativeBytePipeline.scala | 83 +++++++++++++++++++ .../fs2/netty/pipeline/BytePipelineSpec.scala | 24 ++++++ 2 files changed, 107 insertions(+) create mode 100644 core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala diff --git a/core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala b/core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala new file mode 100644 index 0000000..9801d30 --- /dev/null +++ b/core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala @@ -0,0 +1,83 @@ +package fs2.netty.pipeline + +import cats.effect.std.Dispatcher +import cats.effect.{Async, Sync} +import cats.syntax.all._ +import fs2.netty.pipeline.AlternativeBytePipeline.ByteBufToByteChunkSocket +import fs2.{Chunk, INothing, Pipe, Stream} +import fs2.netty.pipeline.BytePipeline._ +import fs2.netty.{NettyChannelInitializer, NettyPipeline, Socket} +import io.netty.buffer.{ByteBuf, ByteBufUtil, Unpooled} +import io.netty.channel.{Channel, ChannelInitializer, ChannelPipeline} +import io.netty.handler.codec.bytes.ByteArrayDecoder + +// This class and BytePipeline highlight the different way to create +// sockets, i.e. rely on Netty handlers or encode transforms in fs2. +class AlternativeBytePipeline[F[_]: Async]( + byteBufPipeline: NettyPipeline[F, ByteBuf, ByteBuf, Nothing] +) extends NettyChannelInitializer[F, Byte, Chunk[Byte], Nothing] { + + override def toChannelInitializer[C <: Channel]( + cb: Socket[F, Byte, Chunk[Byte], Nothing] => F[Unit] + ): F[ChannelInitializer[C]] = + byteBufPipeline + .toChannelInitializer { byteBufSocket => + Sync[F] + .delay(new ByteBufToByteChunkSocket[F](byteBufSocket)) + .flatMap(cb) + } +} + +object AlternativeBytePipeline { + + def apply[F[_]: Async]( + dispatcher: Dispatcher[F] + ): F[AlternativeBytePipeline[F]] = + for { + byteBufPipeline <- NettyPipeline.apply[F](dispatcher) + } yield new AlternativeBytePipeline(byteBufPipeline) + + private class ByteBufToByteChunkSocket[F[_]: Async]( + socket: Socket[F, ByteBuf, ByteBuf, Nothing] + ) extends Socket[F, Byte, Chunk[Byte], Nothing] { + + override lazy val reads: fs2.Stream[F, Byte] = + socket.reads + .evalMap(bb => + Sync[F].delay(ByteBufUtil.getBytes(bb)).map(Chunk.array(_)) + ) + .flatMap(Stream.chunk) + + override lazy val events: fs2.Stream[F, Nothing] = socket.events + + override def write(output: Chunk[Byte]): F[Unit] = + socket.write(toByteBuf(output)) + + override lazy val writes: Pipe[F, Chunk[Byte], INothing] = + _.map(toByteBuf).through(socket.writes) + + override val isOpen: F[Boolean] = socket.isOpen + + override val isClosed: F[Boolean] = socket.isClosed + + override val isDetached: F[Boolean] = socket.isDetached + + override def close(): F[Unit] = socket.close() + + override def mutatePipeline[I2: Socket.Decoder, O2, E2]( + mutator: ChannelPipeline => F[Unit] + ): F[Socket[F, I2, O2, E2]] = socket.mutatePipeline[I2, O2, E2](mutator) + + private[this] def toByteBuf(chunk: Chunk[Byte]): ByteBuf = + chunk match { + case Chunk.ArraySlice(arr, off, len) => + Unpooled.wrappedBuffer(arr, off, len) + + case c: Chunk.ByteBuffer => + Unpooled.wrappedBuffer(c.toByteBuffer) + + case c => + Unpooled.wrappedBuffer(c.toArray) + } + } +} diff --git a/core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala b/core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala index ef930f7..f5eb802 100644 --- a/core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala +++ b/core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala @@ -40,4 +40,28 @@ class BytePipelineSpec } yield ok } + "alternative can echo back what is written" in withResource { dispatcher => + for { + pipeline <- AlternativeBytePipeline(dispatcher) + x <- Fs2NettyEmbeddedChannel[IO, Byte, Chunk[Byte], Nothing](pipeline) + (channel, socket) = x + + _ <- channel.writeAllInboundThenFlushThenRunAllPendingTasks("hello world") + _ <- socket.reads + .take(5) + .chunks + .through(socket.writes) + .compile + .drain + + str <- IO(channel.underlying.readOutbound[ByteBuf]()) + .flatTap(bb => IO(bb.readableBytes() shouldEqual 5)) + .tupleRight(new Array[Byte](5)) + .flatMap { case (buf, bytes) => IO(buf.readBytes(bytes)).as(bytes) } + .map(new String(_)) + + _ <- IO(str shouldEqual "hello") + } yield ok + } + } From 3f7ea3a45d6778989248b70487d7a75eb4cf11af Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Wed, 10 Mar 2021 19:45:57 -0500 Subject: [PATCH 12/23] Eval creation of handlers for pipeline --- .../fs2/netty/NettyChannelInitializer.scala | 16 ++ .../main/scala/fs2/netty/NettyPipeline.scala | 41 +++- .../main/scala/fs2/netty/NoopChannel.scala | 16 ++ .../main/scala/fs2/netty/SocketHandler.scala | 2 +- .../EmbeddedChannelWithAutoRead.scala | 16 ++ .../pipeline/AlternativeBytePipeline.scala | 20 +- .../fs2/netty/pipeline/BytePipeline.scala | 39 +++- .../scala/fs2/netty/NettyPipelineSpec.scala | 200 +++++++++++++++++- 8 files changed, 327 insertions(+), 23 deletions(-) diff --git a/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala b/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala index 56415f8..88bfd51 100644 --- a/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala +++ b/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala @@ -1,3 +1,19 @@ +/* + * Copyright 2021 Typelevel + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package fs2.netty import io.netty.channel.{Channel, ChannelInitializer} diff --git a/core/src/main/scala/fs2/netty/NettyPipeline.scala b/core/src/main/scala/fs2/netty/NettyPipeline.scala index c156ac7..7a89eef 100644 --- a/core/src/main/scala/fs2/netty/NettyPipeline.scala +++ b/core/src/main/scala/fs2/netty/NettyPipeline.scala @@ -16,15 +16,15 @@ package fs2.netty +import cats.Eval import cats.effect.std.Dispatcher import cats.effect.{Async, Sync} import cats.syntax.all._ import io.netty.buffer.ByteBuf -import io.netty.channel.{Channel, ChannelHandler, ChannelInitializer} +import io.netty.channel.{Channel, ChannelHandler, ChannelHandlerAdapter, ChannelInboundHandler, ChannelInitializer, ChannelOutboundHandler} -// TODO: account for Sharable annotation, some of these need to be an eval, and evaluated each time whereas others can be eagerly evaluated. -class NettyPipeline[F[_]: Async, I: Socket.Decoder, O, E]( - handlers: List[ChannelHandler] +class NettyPipeline[F[_]: Async, I: Socket.Decoder, O, E] private ( + handlers: List[Eval[ChannelHandler]] )( dispatcher: Dispatcher[F] ) extends NettyChannelInitializer[F, I, O, E] { @@ -38,7 +38,14 @@ class NettyPipeline[F[_]: Async, I: Socket.Decoder, O, E]( val p = ch.pipeline() ch.config().setAutoRead(false) - handlers.foldLeft(p)((pipeline, handler) => pipeline.addLast(handler)) + handlers.map(_.map { case adapter: ChannelHandlerAdapter => + case handler: ChannelInboundHandler => + case handler: ChannelOutboundHandler => + case _ => + }) + handlers + .map(_.value) + .foldLeft(p)((pipeline, handler) => pipeline.addLast(handler)) dispatcher.unsafeRunAndForget { // TODO: read up on CE3 Dispatcher, how is it different than Context Switch? Is this taking place async? Also is cats.effect.Effect removed in CE3? @@ -61,10 +68,30 @@ object NettyPipeline { def apply[F[_]: Async]( dispatcher: Dispatcher[F] ): F[NettyPipeline[F, ByteBuf, ByteBuf, Nothing]] = + apply(dispatcher, handlers = Nil) + + def apply[F[_]: Async, I: Socket.Decoder, O, E]( + dispatcher: Dispatcher[F], + handlers: List[Eval[ChannelHandler]] + ): F[NettyPipeline[F, I, O, E]] = Sync[F].delay( - new NettyPipeline[F, ByteBuf, ByteBuf, Nothing]( - handlers = Nil + new NettyPipeline[F, I, O, E]( + memoizeSharableHandlers(handlers) )(dispatcher) ) + /* + Netty will throw an exception if Sharable handler is added to more than one channel. + */ + private[this] def memoizeSharableHandlers[E, O, I: Socket.Decoder, F[ + _ + ]: Async](handlers: List[Eval[ChannelHandler]]) = + handlers.map(eval => + eval.flatMap { + case adapter: ChannelHandlerAdapter if adapter.isSharable => + eval.memoize + case _ => + eval + } + ) } diff --git a/core/src/main/scala/fs2/netty/NoopChannel.scala b/core/src/main/scala/fs2/netty/NoopChannel.scala index 638fb35..3828fb9 100644 --- a/core/src/main/scala/fs2/netty/NoopChannel.scala +++ b/core/src/main/scala/fs2/netty/NoopChannel.scala @@ -1,3 +1,19 @@ +/* + * Copyright 2021 Typelevel + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package fs2.netty import io.netty.buffer.ByteBufAllocator diff --git a/core/src/main/scala/fs2/netty/SocketHandler.scala b/core/src/main/scala/fs2/netty/SocketHandler.scala index 73c44cf..78309a3 100644 --- a/core/src/main/scala/fs2/netty/SocketHandler.scala +++ b/core/src/main/scala/fs2/netty/SocketHandler.scala @@ -60,7 +60,7 @@ private final class SocketHandler[F[_]: Async: Concurrent, I, O, +E]( Sync[F].delay(channel.read()) *> take(poll) ) { (opt, _) => opt.fold(Applicative[F].unit)(i => - Sync[F].delay(ReferenceCountUtil.safeRelease(i)).void + Sync[F].delay(ReferenceCountUtil.safeRelease(i)).void // TODO: check ref count before release? ) } .unNoneTerminate diff --git a/core/src/main/scala/fs2/netty/embedded/EmbeddedChannelWithAutoRead.scala b/core/src/main/scala/fs2/netty/embedded/EmbeddedChannelWithAutoRead.scala index eead718..07121c1 100644 --- a/core/src/main/scala/fs2/netty/embedded/EmbeddedChannelWithAutoRead.scala +++ b/core/src/main/scala/fs2/netty/embedded/EmbeddedChannelWithAutoRead.scala @@ -1,3 +1,19 @@ +/* + * Copyright 2021 Typelevel + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package fs2.netty.embedded import io.netty.buffer.ByteBuf diff --git a/core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala b/core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala index 9801d30..9c17a0e 100644 --- a/core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala +++ b/core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala @@ -1,15 +1,29 @@ +/* + * Copyright 2021 Typelevel + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package fs2.netty.pipeline import cats.effect.std.Dispatcher import cats.effect.{Async, Sync} import cats.syntax.all._ import fs2.netty.pipeline.AlternativeBytePipeline.ByteBufToByteChunkSocket -import fs2.{Chunk, INothing, Pipe, Stream} -import fs2.netty.pipeline.BytePipeline._ import fs2.netty.{NettyChannelInitializer, NettyPipeline, Socket} +import fs2.{Chunk, INothing, Pipe, Stream} import io.netty.buffer.{ByteBuf, ByteBufUtil, Unpooled} import io.netty.channel.{Channel, ChannelInitializer, ChannelPipeline} -import io.netty.handler.codec.bytes.ByteArrayDecoder // This class and BytePipeline highlight the different way to create // sockets, i.e. rely on Netty handlers or encode transforms in fs2. diff --git a/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala b/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala index 2499f65..8198cf1 100644 --- a/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala +++ b/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala @@ -1,25 +1,35 @@ +/* + * Copyright 2021 Typelevel + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package fs2 package netty.pipeline +import cats.Eval import cats.effect.std.Dispatcher import cats.effect.{Async, Sync} import cats.syntax.all._ import fs2.netty.pipeline.BytePipeline._ import fs2.netty.{NettyChannelInitializer, NettyPipeline, Socket} -import fs2.{Chunk, INothing, Pipe} import io.netty.buffer.{ByteBuf, Unpooled} import io.netty.channel.{Channel, ChannelInitializer, ChannelPipeline} import io.netty.handler.codec.bytes.ByteArrayDecoder -class BytePipeline[F[_]: Async](dispatcher: Dispatcher[F]) - extends NettyChannelInitializer[F, Byte, Chunk[Byte], Nothing] { - - private val byteArrayPipeline = - new NettyPipeline[F, Array[Byte], ByteBuf, Nothing]( - handlers = List( - new ByteArrayDecoder - ) // TODO: eval, not sharable. Test with 2 channels using same NettyPipeline class. - )(dispatcher) +class BytePipeline[F[_]: Async]( + byteArrayPipeline: NettyPipeline[F, Array[Byte], ByteBuf, Nothing] +) extends NettyChannelInitializer[F, Byte, Chunk[Byte], Nothing] { override def toChannelInitializer[C <: Channel]( cb: Socket[F, Byte, Chunk[Byte], Nothing] => F[Unit] @@ -33,7 +43,14 @@ class BytePipeline[F[_]: Async](dispatcher: Dispatcher[F]) object BytePipeline { def apply[F[_]: Async](dispatcher: Dispatcher[F]): F[BytePipeline[F]] = - Sync[F].delay(new BytePipeline(dispatcher)) + for { + pipeline <- NettyPipeline[F, Array[Byte], ByteBuf, Nothing]( + dispatcher, + handlers = List( + Eval.always(new ByteArrayDecoder) + ) + ) + } yield new BytePipeline(pipeline) implicit val byteArraySocketDecoder: Socket.Decoder[Array[Byte]] = { case array: Array[Byte] => array.asRight[String] diff --git a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala b/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala index 673d825..b5d5da1 100644 --- a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala +++ b/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala @@ -1,17 +1,25 @@ package fs2 package netty +import cats.Eval import cats.effect.std.Dispatcher import cats.effect.testing.specs2.CatsResource import cats.effect.{IO, Resource} import cats.syntax.all._ +import fs2.netty.NettyPipelineSpec.{SharableStatefulByteBufToReadCountChannelHandler, SharableStatefulStringToReadCountChannelHandler, StatefulMessageToReadCountChannelHandler} import fs2.netty.embedded.Fs2NettyEmbeddedChannel import fs2.netty.embedded.Fs2NettyEmbeddedChannel.CommonEncoders._ import fs2.netty.embedded.Fs2NettyEmbeddedChannel.Encoder import io.netty.buffer.{ByteBuf, Unpooled} +import io.netty.channel.ChannelHandler.Sharable +import io.netty.channel.{ChannelHandlerContext, ChannelInboundHandler} +import io.netty.handler.codec.MessageToMessageDecoder import io.netty.handler.codec.bytes.{ByteArrayDecoder, ByteArrayEncoder} +import io.netty.handler.codec.string.StringDecoder +import io.netty.util.ReferenceCountUtil import org.specs2.mutable.SpecificationLike +import java.util import scala.concurrent.duration._ class NettyPipelineSpec @@ -318,13 +326,203 @@ class NettyPipelineSpec // test reads, writes, events, and exceptions in combination to ensure order of events makes sense } - // eval handlers // test pipeline with ByteArrayEncoder/Decoder passed into pipeline, not mutation } + "custom pipelines" should { + implicit val stringSocketDecoder: Socket.Decoder[String] = { + case str: String => str.asRight[String] + case _ => "pipeline misconfigured".asLeft[String] + } + + "custom handlers can change the types of reads and writes " in withResource { + dispatcher => + for { + pipeline <- NettyPipeline[IO, String, String, Nothing]( + dispatcher, + handlers = List(Eval.now(new StringDecoder)) + ) + x <- Fs2NettyEmbeddedChannel[IO, String, String, Nothing](pipeline) + (channel, socket) = x + + _ <- channel.writeAllInboundThenFlushThenRunAllPendingTasks( + "hello", + " ", + "world" + ) + + strings <- socket.reads.take(3).compile.toList + + _ <- IO(strings.mkString("") should_=== "hello world") + + _ <- socket.write("output message") + + msg <- IO(channel.underlying.readOutbound[String]()) + _ <- IO(msg should_=== "output message") + } yield ok + } + + // tests should enforce that ByteBuf is read off embedded channel ^^ + + "non sharable handlers must be always evaluated per channel" in withResource { + dispatcher => + for { + pipeline <- NettyPipeline[IO, String, String, Nothing]( + dispatcher, + handlers = + List(Eval.always(new StatefulMessageToReadCountChannelHandler)) + ) + x <- Fs2NettyEmbeddedChannel[IO, String, String, Nothing](pipeline) + (channelOne, socketOne) = x + y <- Fs2NettyEmbeddedChannel[IO, String, String, Nothing](pipeline) + (channelTwo, socketTwo) = y + + inputs = List("a", "b", "c") + + // for same input to each channel we expect the same output, i.e. same scan of counts + _ <- channelOne.writeAllInboundThenFlushThenRunAllPendingTasks( + inputs: _* + ) + countsOne <- socketOne.reads.take(3).map(_.toInt).compile.toList + _ <- IO(countsOne should_=== List(1, 2, 3)) + + _ <- channelTwo.writeAllInboundThenFlushThenRunAllPendingTasks( + inputs: _* + ) + countsTwo <- socketTwo.reads.take(3).map(_.toInt).compile.toList + _ <- IO(countsTwo should_=== List(1, 2, 3)) + } yield ok + } + + "sharable handlers are memoized per channel regardless of the eval policy" in withResource { + dispatcher => + for { + pipeline <- NettyPipeline[IO, String, String, Nothing]( + dispatcher, + handlers = List( + Eval.always( + new SharableStatefulByteBufToReadCountChannelHandler + ), + Eval.now( + new SharableStatefulStringToReadCountChannelHandler + ), + Eval.later( + new SharableStatefulStringToReadCountChannelHandler + ) + ) + ) + x <- Fs2NettyEmbeddedChannel[IO, String, String, Nothing](pipeline) + (channelOne, socketOne) = x + y <- Fs2NettyEmbeddedChannel[IO, String, String, Nothing](pipeline) + (channelTwo, socketTwo) = y + + inputs = List("a", "b", "c") + + _ <- channelOne.writeAllInboundThenFlushThenRunAllPendingTasks( + inputs: _* + ) + countsOne <- socketOne.reads.take(3).map(_.toInt).compile.toList + _ <- IO(countsOne should_=== List(1, 2, 3)) + + _ <- channelTwo.writeAllInboundThenFlushThenRunAllPendingTasks( + inputs: _* + ) + countsTwo <- socketTwo.reads.take(3).map(_.toInt).compile.toList + _ <- IO(countsTwo should_=== List(4, 5, 6)) + } yield ok + } + } + private def byteToString(byte: Byte): String = { val bytes = new Array[Byte](1) bytes(0) = byte new String(bytes) } } + +object NettyPipelineSpec { + + /** + * Does not use MessageToMessageDecoder, SimpleChannelInboundHandler, or anything that extends ChannelHandlerAdapter. + * Netty tacks if a ChannelHandlerAdapter annotated with @Sharable is added. Netty will throw an exception if such a + * handler would be reused, e.g. + * io.netty.channel.ChannelInitializer exceptionCaught + * WARNING: Failed to initialize a channel. Closing: [id: 0xembedded, L:embedded - R:embedded] + * io.netty.channel.ChannelPipelineException: fs2.netty.NettyPipelineSpec$StatefulMessageToReadCountChannelHandler is not a @Sharable handler, so can't be added or removed multiple tim + */ + private class StatefulMessageToReadCountChannelHandler + extends ChannelInboundHandler { + private var readCounter = 0 + + override def channelRegistered(ctx: ChannelHandlerContext): Unit = + ctx.fireChannelRegistered() + + override def channelUnregistered(ctx: ChannelHandlerContext): Unit = + ctx.fireChannelUnregistered() + + override def channelActive(ctx: ChannelHandlerContext): Unit = + ctx.fireChannelActive() + + override def channelInactive(ctx: ChannelHandlerContext): Unit = + ctx.fireChannelInactive() + + override def channelRead(ctx: ChannelHandlerContext, msg: Any): Unit = { + ReferenceCountUtil.safeRelease(msg) + readCounter += 1 + ctx.fireChannelRead(readCounter.toString) + } + + override def channelReadComplete(ctx: ChannelHandlerContext): Unit = + ctx.fireChannelReadComplete() + + override def userEventTriggered( + ctx: ChannelHandlerContext, + evt: Any + ): Unit = + ctx.fireUserEventTriggered() + + override def channelWritabilityChanged(ctx: ChannelHandlerContext): Unit = + ctx.fireChannelWritabilityChanged() + + override def exceptionCaught( + ctx: ChannelHandlerContext, + cause: Throwable + ): Unit = + ctx.fireExceptionCaught(cause) + + override def handlerAdded(ctx: ChannelHandlerContext): Unit = () + + override def handlerRemoved(ctx: ChannelHandlerContext): Unit = () + } + + @Sharable + private class SharableStatefulStringToReadCountChannelHandler + extends MessageToMessageDecoder[String] { + private var readCounter = 0 + + override def decode( + ctx: ChannelHandlerContext, + msg: String, + out: util.List[AnyRef] + ): Unit = { + readCounter += 1 + out.add(readCounter.toString) + } + } + + @Sharable + private class SharableStatefulByteBufToReadCountChannelHandler + extends MessageToMessageDecoder[ByteBuf] { + private var readCounter = 0 + + override def decode( + ctx: ChannelHandlerContext, + msg: ByteBuf, + out: util.List[AnyRef] + ): Unit = { + readCounter += 1 + out.add(readCounter.toString) + } + } + +} From c1df8799d196ab6f4e7875b455ad83c994a4a3d2 Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Wed, 10 Mar 2021 19:57:21 -0500 Subject: [PATCH 13/23] clean up comment --- core/src/main/scala/fs2/netty/NettyPipeline.scala | 5 ----- 1 file changed, 5 deletions(-) diff --git a/core/src/main/scala/fs2/netty/NettyPipeline.scala b/core/src/main/scala/fs2/netty/NettyPipeline.scala index 7a89eef..3466e25 100644 --- a/core/src/main/scala/fs2/netty/NettyPipeline.scala +++ b/core/src/main/scala/fs2/netty/NettyPipeline.scala @@ -38,11 +38,6 @@ class NettyPipeline[F[_]: Async, I: Socket.Decoder, O, E] private ( val p = ch.pipeline() ch.config().setAutoRead(false) - handlers.map(_.map { case adapter: ChannelHandlerAdapter => - case handler: ChannelInboundHandler => - case handler: ChannelOutboundHandler => - case _ => - }) handlers .map(_.value) .foldLeft(p)((pipeline, handler) => pipeline.addLast(handler)) From 903566e877e262d32daa480001c821b8e0e2cbc5 Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Wed, 10 Mar 2021 22:16:06 -0500 Subject: [PATCH 14/23] Return to calling unsafeRunAndForget --- .../main/scala/fs2/netty/NettyPipeline.scala | 6 ++- .../main/scala/fs2/netty/SocketHandler.scala | 41 +++++++++++++------ 2 files changed, 33 insertions(+), 14 deletions(-) diff --git a/core/src/main/scala/fs2/netty/NettyPipeline.scala b/core/src/main/scala/fs2/netty/NettyPipeline.scala index 3466e25..1ca0023 100644 --- a/core/src/main/scala/fs2/netty/NettyPipeline.scala +++ b/core/src/main/scala/fs2/netty/NettyPipeline.scala @@ -21,7 +21,8 @@ import cats.effect.std.Dispatcher import cats.effect.{Async, Sync} import cats.syntax.all._ import io.netty.buffer.ByteBuf -import io.netty.channel.{Channel, ChannelHandler, ChannelHandlerAdapter, ChannelInboundHandler, ChannelInitializer, ChannelOutboundHandler} +import io.netty.channel.{Channel, ChannelHandler, ChannelHandlerAdapter, ChannelInitializer} +import io.netty.handler.flow.FlowControlHandler class NettyPipeline[F[_]: Async, I: Socket.Decoder, O, E] private ( handlers: List[Eval[ChannelHandler]] @@ -41,6 +42,9 @@ class NettyPipeline[F[_]: Async, I: Socket.Decoder, O, E] private ( handlers .map(_.value) .foldLeft(p)((pipeline, handler) => pipeline.addLast(handler)) + // `channelRead` on ChannelInboundHandler's may get invoked more than once despite autoRead being turned off + // and handler calling read to control read rate, i.e. backpressure. Netty's solution is to use `FlowControlHandler`. + .addLast(new FlowControlHandler(false)) dispatcher.unsafeRunAndForget { // TODO: read up on CE3 Dispatcher, how is it different than Context Switch? Is this taking place async? Also is cats.effect.Effect removed in CE3? diff --git a/core/src/main/scala/fs2/netty/SocketHandler.scala b/core/src/main/scala/fs2/netty/SocketHandler.scala index 78309a3..fdb96f9 100644 --- a/core/src/main/scala/fs2/netty/SocketHandler.scala +++ b/core/src/main/scala/fs2/netty/SocketHandler.scala @@ -23,6 +23,7 @@ import cats.syntax.all._ import cats.{Applicative, Functor} import io.netty.buffer.ByteBuf import io.netty.channel._ +import io.netty.handler.flow.FlowControlHandler import io.netty.util.ReferenceCountUtil private final class SocketHandler[F[_]: Async: Concurrent, I, O, +E]( @@ -60,7 +61,9 @@ private final class SocketHandler[F[_]: Async: Concurrent, I, O, +E]( Sync[F].delay(channel.read()) *> take(poll) ) { (opt, _) => opt.fold(Applicative[F].unit)(i => - Sync[F].delay(ReferenceCountUtil.safeRelease(i)).void // TODO: check ref count before release? + Sync[F] + .delay(ReferenceCountUtil.safeRelease(i)) + .void // TODO: check ref count before release? ) } .unNoneTerminate @@ -103,9 +106,7 @@ private final class SocketHandler[F[_]: Async: Concurrent, I, O, +E]( Sync[F].delay(channel.close()) ).void - // TODO: Even with a single channel.read() call, channelRead may get invoked more than once! Netty's "solution" - // is to use FlowControlHandler. Why isn't that the default Netty behavior?! - override def channelRead(ctx: ChannelHandlerContext, msg: AnyRef) = { + override def channelRead(ctx: ChannelHandlerContext, msg: AnyRef) = inboundDecoder.decode( ReferenceCountUtil.touch( msg, @@ -117,12 +118,10 @@ private final class SocketHandler[F[_]: Async: Concurrent, I, O, +E]( ReferenceCountUtil.safeRelease(msg) case Right(i) => - // TODO: what's the perf impact of unsafeRunSync vs unsafeRunAndForget? - // FlowControlHandler & unsafeRunAndForget vs. unsafeRunSync-only? - // Review for other Netty methods as well. - disp.unsafeRunSync(readsQueue.offer(i.asRight[Exception].some)) + // TODO: what's the perf impact of unsafeRunSync-only vs. unsafeRunAndForget-&-FlowControlHandler? + // Review ordering for other ChannelHandler "callback" methods as well. + disp.unsafeRunAndForget(readsQueue.offer(i.asRight[Exception].some)) } - } private def debug(x: Any) = x match { case bb: ByteBuf => @@ -157,6 +156,7 @@ private final class SocketHandler[F[_]: Async: Concurrent, I, O, +E]( mutator: ChannelPipeline => F[Unit] ): F[Socket[F, I2, O2, E2]] = for { + // TODO: Edge cases aren't fully tested _ <- pipelineMutationSwitch.complete( () ) // shutdown the events and reads streams @@ -164,13 +164,28 @@ private final class SocketHandler[F[_]: Async: Concurrent, I, O, +E]( _ <- Sync[F].delay { channel = new NoopChannel(channel) } // shutdown writes - // TODO: what if queues have elements in them? E.g. Netty is concurrently calling channel read. Protocols should disallow this for the most part. - _ <- Sync[F].delay(oldChannel.pipeline().removeLast()) + _ <- Sync[F].delay( + oldChannel.pipeline().removeLast() + ) //remove SocketHandler + _ <- Sync[F].delay( + oldChannel.pipeline().removeLast() + ) //remove FlowControlHandler + /* + TODO: Above may dump remaining messages into fireChannelRead, do we care about those messages? Should we + signal up that this happened? Probably should as certain apps may care about a peer not behaving according to + the expected protocol. In this case, we add a custom handler to capture those messages, then either: + - raiseError on the new reads stream, or + - set a Signal + Also need to think through edge case where Netty is concurrently calling channel read vs. this manipulating + pipeline. Maybe protocols need to inform this layer about when exactly to transition. + */ _ <- mutator(oldChannel.pipeline()) sh <- SocketHandler[F, I2, O2, E2](disp, oldChannel) + // TODO: pass a name for debugging purposes? _ <- Sync[F].delay( - oldChannel.pipeline().addLast(sh) - ) // TODO: I feel like we should pass a name for debugging purposes...?! + oldChannel.pipeline().addLast(new FlowControlHandler(false)) + ) + _ <- Sync[F].delay(oldChannel.pipeline().addLast(sh)) } yield sh // not to self: if we want to schedule an action to be done when channel is closed, can also do `ctx.channel.closeFuture.addListener` From 1f34082b5d4eff42ee622c1bcb7ac3b3487a9793 Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Wed, 10 Mar 2021 22:22:30 -0500 Subject: [PATCH 15/23] More notes --- core/src/main/scala/fs2/netty/NettyPipeline.scala | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/fs2/netty/NettyPipeline.scala b/core/src/main/scala/fs2/netty/NettyPipeline.scala index 1ca0023..5663cbf 100644 --- a/core/src/main/scala/fs2/netty/NettyPipeline.scala +++ b/core/src/main/scala/fs2/netty/NettyPipeline.scala @@ -42,8 +42,13 @@ class NettyPipeline[F[_]: Async, I: Socket.Decoder, O, E] private ( handlers .map(_.value) .foldLeft(p)((pipeline, handler) => pipeline.addLast(handler)) - // `channelRead` on ChannelInboundHandler's may get invoked more than once despite autoRead being turned off - // and handler calling read to control read rate, i.e. backpressure. Netty's solution is to use `FlowControlHandler`. + /* `channelRead` on ChannelInboundHandler's may get invoked more than once despite autoRead being turned off + and handler calling read to control read rate, i.e. backpressure. Netty's solution is to use `FlowControlHandler`. + Below from https://stackoverflow.com/questions/45887006/how-to-ensure-channelread-called-once-after-each-read-in-netty: + | Also note that most decoders will automatically perform a read even if you have AUTO_READ=false since they + | need to read enough data in order to yield at least one message to subsequent (i.e. your) handlers... but + | after they yield a message, they won't auto-read from the socket again. + */ .addLast(new FlowControlHandler(false)) dispatcher.unsafeRunAndForget { From b425b7dd77e7778664c7d3fe622abe78e7ab580f Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Fri, 12 Mar 2021 21:24:56 -0500 Subject: [PATCH 16/23] Test events stream --- .../fs2/netty/NettyChannelInitializer.scala | 6 +- .../main/scala/fs2/netty/NettyPipeline.scala | 16 ++--- core/src/main/scala/fs2/netty/Network.scala | 30 +++++----- core/src/main/scala/fs2/netty/Socket.scala | 22 +++++-- .../main/scala/fs2/netty/SocketHandler.scala | 37 ++++++------ .../embedded/Fs2NettyEmbeddedChannel.scala | 10 ++-- .../incudator/http/ExampleHttpServer.scala | 2 +- .../incudator/http/HttpClientConnection.scala | 18 +++--- .../fs2/netty/incudator/http/HttpServer.scala | 2 +- .../fs2/netty/incudator/http/WebSocket.scala | 13 ++-- .../pipeline/AlternativeBytePipeline.scala | 21 +++---- .../fs2/netty/pipeline/BytePipeline.scala | 20 +++---- .../scala/fs2/netty/NettyPipelineSpec.scala | 60 ++++++++++++------- .../fs2/netty/pipeline/BytePipelineSpec.scala | 4 +- 14 files changed, 149 insertions(+), 112 deletions(-) diff --git a/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala b/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala index 88bfd51..6c8bf4f 100644 --- a/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala +++ b/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala @@ -19,14 +19,14 @@ package fs2.netty import io.netty.channel.{Channel, ChannelInitializer} import io.netty.channel.socket.SocketChannel -trait NettyChannelInitializer[F[_], I, O, E] { +trait NettyChannelInitializer[F[_], I, O] { def toSocketChannelInitializer( - cb: Socket[F, I, O, E] => F[Unit] + cb: Socket[F, I, O] => F[Unit] ): F[ChannelInitializer[SocketChannel]] = toChannelInitializer[SocketChannel](cb) def toChannelInitializer[C <: Channel]( - cb: Socket[F, I, O, E] => F[Unit] + cb: Socket[F, I, O] => F[Unit] ): F[ChannelInitializer[C]] } diff --git a/core/src/main/scala/fs2/netty/NettyPipeline.scala b/core/src/main/scala/fs2/netty/NettyPipeline.scala index 5663cbf..7065b09 100644 --- a/core/src/main/scala/fs2/netty/NettyPipeline.scala +++ b/core/src/main/scala/fs2/netty/NettyPipeline.scala @@ -24,16 +24,16 @@ import io.netty.buffer.ByteBuf import io.netty.channel.{Channel, ChannelHandler, ChannelHandlerAdapter, ChannelInitializer} import io.netty.handler.flow.FlowControlHandler -class NettyPipeline[F[_]: Async, I: Socket.Decoder, O, E] private ( +class NettyPipeline[F[_]: Async, I: Socket.Decoder, O] private ( handlers: List[Eval[ChannelHandler]] )( dispatcher: Dispatcher[F] -) extends NettyChannelInitializer[F, I, O, E] { +) extends NettyChannelInitializer[F, I, O] { // TODO: there are other interesting type of channels // TODO: Remember ChannelInitializer is Sharable! override def toChannelInitializer[C <: Channel]( - cb: Socket[F, I, O, E] => F[Unit] + cb: Socket[F, I, O] => F[Unit] ): F[ChannelInitializer[C]] = Sync[F].delay { (ch: C) => { val p = ch.pipeline() @@ -53,7 +53,7 @@ class NettyPipeline[F[_]: Async, I: Socket.Decoder, O, E] private ( dispatcher.unsafeRunAndForget { // TODO: read up on CE3 Dispatcher, how is it different than Context Switch? Is this taking place async? Also is cats.effect.Effect removed in CE3? - SocketHandler[F, I, O, E](dispatcher, ch) + SocketHandler[F, I, O](dispatcher, ch) .flatTap(h => Sync[F].delay(p.addLast(h)) ) // TODO: pass EventExecutorGroup @@ -71,15 +71,15 @@ object NettyPipeline { def apply[F[_]: Async]( dispatcher: Dispatcher[F] - ): F[NettyPipeline[F, ByteBuf, ByteBuf, Nothing]] = + ): F[NettyPipeline[F, ByteBuf, ByteBuf]] = apply(dispatcher, handlers = Nil) - def apply[F[_]: Async, I: Socket.Decoder, O, E]( + def apply[F[_]: Async, I: Socket.Decoder, O]( dispatcher: Dispatcher[F], handlers: List[Eval[ChannelHandler]] - ): F[NettyPipeline[F, I, O, E]] = + ): F[NettyPipeline[F, I, O]] = Sync[F].delay( - new NettyPipeline[F, I, O, E]( + new NettyPipeline[F, I, O]( memoizeSharableHandlers(handlers) )(dispatcher) ) diff --git a/core/src/main/scala/fs2/netty/Network.scala b/core/src/main/scala/fs2/netty/Network.scala index 280b536..91909bd 100644 --- a/core/src/main/scala/fs2/netty/Network.scala +++ b/core/src/main/scala/fs2/netty/Network.scala @@ -40,10 +40,10 @@ final class Network[F[_]: Async] private ( def client( addr: SocketAddress[Host], options: List[ChannelOption] = Nil) - : Resource[F, Socket[F, Byte, Byte, Nothing]] = + : Resource[F, Socket[F, Byte, Byte]] = Dispatcher[F] flatMap { disp => Resource suspend { - Concurrent[F].deferred[Socket[F, Byte, Byte, Nothing]] flatMap { d => + Concurrent[F].deferred[Socket[F, Byte, Byte]] flatMap { d => addr.host.resolve[F] flatMap { resolved => Sync[F] delay { val bootstrap = new Bootstrap @@ -71,44 +71,44 @@ final class Network[F[_]: Async] private ( host: Option[Host], port: Port, options: List[ChannelOption]) - : Stream[F, Socket[F, Byte, Byte, Nothing]] = + : Stream[F, Socket[F, Byte, Byte]] = Stream.resource(serverResource(host, Some(port), options)).flatMap(_._2) - def server[I: Socket.Decoder, O, E]( + def server[I: Socket.Decoder, O]( host: Option[Host], port: Port, handlers: NonEmptyList[ChannelHandler], options: List[ChannelOption]) - : Stream[F, Socket[F, I, O, E]] = - Stream.resource(serverResource[I, O, E](host, Some(port),handlers, options)).flatMap(_._2) + : Stream[F, Socket[F, I, O]] = + Stream.resource(serverResource[I, O](host, Some(port),handlers, options)).flatMap(_._2) def serverResource( host: Option[Host], port: Option[Port], options: List[ChannelOption]) - : Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, Byte, Byte, Nothing]])] = + : Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, Byte, Byte]])] = serverResource(host, port, handlers = Nil,options) - def serverResource[I: Socket.Decoder, O, E]( + def serverResource[I: Socket.Decoder, O]( host: Option[Host], port: Option[Port], handlers: NonEmptyList[ChannelHandler], options: List[ChannelOption] - ): Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, I, O, E]])] = + ): Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, I, O]])] = serverResource(host, port, handlers.toList, options) - private def serverResource[I: Socket.Decoder, O, E]( + private def serverResource[I: Socket.Decoder, O]( host: Option[Host], port: Option[Port], handlers: List[ChannelHandler], options: List[ChannelOption] - ): Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, I, O, E]])] = + ): Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, I, O]])] = for { dispatcher <- Dispatcher[F] res <- Resource suspend { for { - clientConnections <- Queue.unbounded[F, Socket[F, I, O, E]] + clientConnections <- Queue.unbounded[F, Socket[F, I, O]] resolvedHost <- host.traverse(_.resolve[F]) @@ -132,7 +132,7 @@ final class Network[F[_]: Async] private ( ) // TODO: read up on CE3 Dispatcher, how is it different than Context Switch? Is this taking place async? dispatcher.unsafeRunAndForget { - SocketHandler[F, I, O, E](dispatcher, ch) + SocketHandler[F, I, O](dispatcher, ch) .flatTap(h => Sync[F].delay(p.addLast(h))) .flatMap(clientConnections.offer) } @@ -181,7 +181,7 @@ final class Network[F[_]: Async] private ( private[this] def initializer( disp: Dispatcher[F])( - result: Socket[F, Byte, Byte, Nothing] => F[Unit]) + result: Socket[F, Byte, Byte] => F[Unit]) : ChannelInitializer[SocketChannel] = new ChannelInitializer[SocketChannel] { def initChannel(ch: SocketChannel) = { @@ -189,7 +189,7 @@ final class Network[F[_]: Async] private ( ch.config().setAutoRead(false) disp unsafeRunAndForget { - SocketHandler[F, Byte, Byte, Nothing](disp, ch) flatMap { s => + SocketHandler[F, Byte, Byte](disp, ch) flatMap { s => Sync[F].delay(p.addLast(s)) *> result(s) } } diff --git a/core/src/main/scala/fs2/netty/Socket.scala b/core/src/main/scala/fs2/netty/Socket.scala index eb6bc05..8529a56 100644 --- a/core/src/main/scala/fs2/netty/Socket.scala +++ b/core/src/main/scala/fs2/netty/Socket.scala @@ -25,7 +25,7 @@ import io.netty.channel.ChannelPipeline // and WS use cases this is completely ok. One alternative is scala reflections api, but will overhead be acceptable // along the critical code path (assuming high volume servers/clients)? // Think through variance of types. -trait Socket[F[_], I, O, +E] { +trait Socket[F[_], I, O] { // TODO: Temporarily disabling while making Socket generic enough to test with EmbeddedChannel. Furthermore, these // methods restrict Socket to be a InetChannel which isn't compatible with EmbeddedChannel. Netty also works with @@ -37,7 +37,21 @@ trait Socket[F[_], I, O, +E] { def reads: Stream[F, I] - def events: Stream[F, E] + /** + * Handlers may optionally generate events to communicate with downstream handlers. These include but not limited to + * signals about handshake complete, timeouts, and errors. + * + * Some examples from Netty: + * - ChannelInputShutdownReadComplete + * - ChannelInputShutdownEvent + * - SslCompletionEvent + * - ProxyConnectionEvent + * - HandshakeComplete + * - Http2FrameStreamEvent + * - IdleStateEvent + * @return + */ + def events: Stream[F, AnyRef] def write(output: O): F[Unit] def writes: Pipe[F, O, INothing] @@ -48,9 +62,9 @@ trait Socket[F[_], I, O, +E] { def close(): F[Unit] - def mutatePipeline[I2: Socket.Decoder, O2, E2]( + def mutatePipeline[I2: Socket.Decoder, O2]( mutator: ChannelPipeline => F[Unit] - ): F[Socket[F, I2, O2, E2]] + ): F[Socket[F, I2, O2]] } object Socket { diff --git a/core/src/main/scala/fs2/netty/SocketHandler.scala b/core/src/main/scala/fs2/netty/SocketHandler.scala index fdb96f9..6bd6708 100644 --- a/core/src/main/scala/fs2/netty/SocketHandler.scala +++ b/core/src/main/scala/fs2/netty/SocketHandler.scala @@ -18,7 +18,7 @@ package fs2 package netty import cats.effect.std.{Dispatcher, Queue} -import cats.effect.{Async, Concurrent, Deferred, Poll, Sync} +import cats.effect._ import cats.syntax.all._ import cats.{Applicative, Functor} import io.netty.buffer.ByteBuf @@ -26,15 +26,15 @@ import io.netty.channel._ import io.netty.handler.flow.FlowControlHandler import io.netty.util.ReferenceCountUtil -private final class SocketHandler[F[_]: Async: Concurrent, I, O, +E]( +private final class SocketHandler[F[_]: Async: Concurrent, I, O]( disp: Dispatcher[F], private var channel: Channel, readsQueue: Queue[F, Option[Either[Throwable, I]]], - eventsQueue: Queue[F, E], + eventsQueue: Queue[F, AnyRef], pipelineMutationSwitch: Deferred[F, Unit] )(implicit inboundDecoder: Socket.Decoder[I]) extends ChannelInboundHandlerAdapter - with Socket[F, I, O, E] { + with Socket[F, I, O] { // override val localAddress: F[SocketAddress[IpAddress]] = // Sync[F].delay(SocketAddress.fromInetSocketAddress(channel.localAddress())) @@ -79,7 +79,7 @@ private final class SocketHandler[F[_]: Async: Concurrent, I, O, +E]( ) } - override lazy val events: Stream[F, E] = + override lazy val events: Stream[F, AnyRef] = Stream .fromQueueUnterminated(eventsQueue) .interruptWhen(pipelineMutationSwitch.get.attempt) @@ -119,7 +119,6 @@ private final class SocketHandler[F[_]: Async: Concurrent, I, O, +E]( case Right(i) => // TODO: what's the perf impact of unsafeRunSync-only vs. unsafeRunAndForget-&-FlowControlHandler? - // Review ordering for other ChannelHandler "callback" methods as well. disp.unsafeRunAndForget(readsQueue.offer(i.asRight[Exception].some)) } @@ -140,21 +139,25 @@ private final class SocketHandler[F[_]: Async: Concurrent, I, O, +E]( override def channelInactive(ctx: ChannelHandlerContext) = try { + //TODO: Is ordering preserved? disp.unsafeRunAndForget(readsQueue.offer(None)) } catch { case _: IllegalStateException => () // sometimes we can see this due to race conditions in shutdown } - override def userEventTriggered(ctx: ChannelHandlerContext, evt: Any): Unit = - evt match { - case e: E => disp.unsafeRunAndForget(eventsQueue.offer(e)) - case _ => () // TODO: probably raise error on stream... - } + override def userEventTriggered( + ctx: ChannelHandlerContext, + evt: AnyRef + ): Unit = + //TODO: Is ordering preserved? Might indeed be best to not run this handler in a separate thread pool (unless + // netty manages ordering...which isn't likely as it should just hand off to ec) and call dispatcher manually + // where needed. This way we can keep a thread-unsafe mutable queue. + disp.unsafeRunAndForget(eventsQueue.offer(evt)) - override def mutatePipeline[I2: Socket.Decoder, O2, E2]( + override def mutatePipeline[I2: Socket.Decoder, O2]( mutator: ChannelPipeline => F[Unit] - ): F[Socket[F, I2, O2, E2]] = + ): F[Socket[F, I2, O2]] = for { // TODO: Edge cases aren't fully tested _ <- pipelineMutationSwitch.complete( @@ -180,7 +183,7 @@ private final class SocketHandler[F[_]: Async: Concurrent, I, O, +E]( pipeline. Maybe protocols need to inform this layer about when exactly to transition. */ _ <- mutator(oldChannel.pipeline()) - sh <- SocketHandler[F, I2, O2, E2](disp, oldChannel) + sh <- SocketHandler[F, I2, O2](disp, oldChannel) // TODO: pass a name for debugging purposes? _ <- Sync[F].delay( oldChannel.pipeline().addLast(new FlowControlHandler(false)) @@ -193,13 +196,13 @@ private final class SocketHandler[F[_]: Async: Concurrent, I, O, +E]( private object SocketHandler { - def apply[F[_]: Async: Concurrent, I: Socket.Decoder, O, E]( + def apply[F[_]: Async: Concurrent, I: Socket.Decoder, O]( disp: Dispatcher[F], channel: Channel - ): F[SocketHandler[F, I, O, E]] = + ): F[SocketHandler[F, I, O]] = for { readsQueue <- Queue.unbounded[F, Option[Either[Throwable, I]]] - eventsQueue <- Queue.unbounded[F, E] + eventsQueue <- Queue.unbounded[F, AnyRef] pipelineMutationSwitch <- Deferred[F, Unit] } yield new SocketHandler( disp, diff --git a/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala b/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala index cf06357..8e31d0f 100644 --- a/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala +++ b/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala @@ -75,14 +75,14 @@ final case class Fs2NettyEmbeddedChannel[F[_]] private ( object Fs2NettyEmbeddedChannel { - def apply[F[_], I, O, E]( - initializer: NettyChannelInitializer[F, I, O, E] - )(implicit F: Async[F]): F[(Fs2NettyEmbeddedChannel[F], Socket[F, I, O, E])] = + def apply[F[_], I, O]( + initializer: NettyChannelInitializer[F, I, O] + )(implicit F: Async[F]): F[(Fs2NettyEmbeddedChannel[F], Socket[F, I, O])] = for { channel <- F.delay( new EmbeddedChannelWithAutoRead() - ) // With FlowControl/Dispatcher fixes, EmbeddedChannelWithAutoRead might not be needed after all. - socket <- F.async[Socket[F, I, O, E]] { cb => + ) // With FlowControl/Dispatcher fixes EmbeddedChannelWithAutoRead might not be needed after all. + socket <- F.async[Socket[F, I, O]] { cb => initializer .toChannelInitializer[EmbeddedChannel] { socket => F.delay(cb(socket.asRight[Throwable])) diff --git a/core/src/main/scala/fs2/netty/incudator/http/ExampleHttpServer.scala b/core/src/main/scala/fs2/netty/incudator/http/ExampleHttpServer.scala index 06cdef7..64ca2f5 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/ExampleHttpServer.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/ExampleHttpServer.scala @@ -53,7 +53,7 @@ object ExampleHttpServer extends IOApp { } private[this] val ChatRooms = - scala.collection.mutable.Map.empty[String, List[WebSocket[IO, Nothing]]] + scala.collection.mutable.Map.empty[String, List[WebSocket[IO]]] private[this] val GenericWebSocketConfig = WebSocketConfig( maxFramePayloadLength = 65536, diff --git a/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala b/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala index afcf109..2d309e8 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala @@ -38,8 +38,7 @@ class HttpClientConnection[F[_]: Sync]( clientSocket: Socket[ F, FullHttpRequest, - FullHttpResponse, - Nothing + FullHttpResponse ] ) { @@ -106,7 +105,7 @@ class HttpClientConnection[F[_]: Sync]( cb ) => clientSocket - .mutatePipeline[WebSocketFrame, WebSocketFrame, HandshakeComplete]( + .mutatePipeline[WebSocketFrame, WebSocketFrame]( installWebSocketHandlersAndContinueWebSocketUpgrade( request, wsConfigs @@ -114,16 +113,19 @@ class HttpClientConnection[F[_]: Sync]( ) .flatMap { connection => connection.events - .find(_ => true) // only take 1st event since Netty will only first once + // only take 1st event since Netty will only first once + .collectFirst { case hc: HandshakeComplete => hc } .evalTap(handshakeComplete => connection // TODO: maybe like a covary method? - .mutatePipeline[WebSocketFrame, WebSocketFrame, Nothing](_ => Applicative[F].unit) + .mutatePipeline[WebSocketFrame, WebSocketFrame](_ => + Applicative[F].unit + ) .map(wsConn => cb( ( handshakeComplete, - new WebSocket[F, Nothing](underlying = wsConn) + new WebSocket[F](underlying = wsConn) ).asRight[Throwable] ) ) @@ -132,7 +134,7 @@ class HttpClientConnection[F[_]: Sync]( .drain } .onError { case e => - cb(e.asLeft[(HandshakeComplete, WebSocket[F, Nothing])]) + cb(e.asLeft[(HandshakeComplete, WebSocket[F])]) } .void @@ -216,7 +218,7 @@ object HttpClientConnection { // One of throwable could be WebSocketHandshakeException final case class SwitchToWebSocketProtocol[F[_]]( wsConfigs: WebSocketConfig, - cb: Either[Throwable, (HandshakeComplete, WebSocket[F, Nothing])] => F[ + cb: Either[Throwable, (HandshakeComplete, WebSocket[F])] => F[ Unit ] ) extends WebSocketResponse[F] diff --git a/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala b/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala index 76c0076..060be62 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala @@ -42,7 +42,7 @@ object HttpServer { network <- Network[F] rawHttpClientConnection <- network - .serverResource[FullHttpRequest, FullHttpResponse, Nothing]( + .serverResource[FullHttpRequest, FullHttpResponse]( host = None, port = None, handlers = NonEmptyList.of( diff --git a/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala b/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala index 5152c73..d19fca6 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala @@ -21,14 +21,13 @@ import fs2.{INothing, Pipe, Stream} import io.netty.channel.ChannelPipeline import io.netty.handler.codec.http.websocketx.WebSocketFrame -class WebSocket[F[_], U]( +class WebSocket[F[_]]( underlying: Socket[ F, WebSocketFrame, - WebSocketFrame, - Nothing + WebSocketFrame ] -) extends Socket[F, WebSocketFrame, WebSocketFrame, U] { +) extends Socket[F, WebSocketFrame, WebSocketFrame] { // override def localAddress: F[SocketAddress[IpAddress]] = underlying.localAddress // @@ -42,7 +41,7 @@ class WebSocket[F[_], U]( override def writes: Pipe[F, WebSocketFrame, INothing] = underlying.writes - override def events: Stream[F, Nothing] = underlying.events + override def events: Stream[F, AnyRef] = underlying.events override def isOpen: F[Boolean] = underlying.isOpen @@ -52,8 +51,8 @@ class WebSocket[F[_], U]( override def close(): F[Unit] = underlying.close() - override def mutatePipeline[I2: Socket.Decoder, O2, E2]( + override def mutatePipeline[I2: Socket.Decoder, O2]( mutator: ChannelPipeline => F[Unit] - ): F[Socket[F, I2, O2, E2]] = + ): F[Socket[F, I2, O2]] = underlying.mutatePipeline(mutator) } diff --git a/core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala b/core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala index 9c17a0e..ade3300 100644 --- a/core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala +++ b/core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala @@ -14,7 +14,8 @@ * limitations under the License. */ -package fs2.netty.pipeline +package fs2 +package netty.pipeline import cats.effect.std.Dispatcher import cats.effect.{Async, Sync} @@ -28,11 +29,11 @@ import io.netty.channel.{Channel, ChannelInitializer, ChannelPipeline} // This class and BytePipeline highlight the different way to create // sockets, i.e. rely on Netty handlers or encode transforms in fs2. class AlternativeBytePipeline[F[_]: Async]( - byteBufPipeline: NettyPipeline[F, ByteBuf, ByteBuf, Nothing] -) extends NettyChannelInitializer[F, Byte, Chunk[Byte], Nothing] { + byteBufPipeline: NettyPipeline[F, ByteBuf, ByteBuf] +) extends NettyChannelInitializer[F, Byte, Chunk[Byte]] { override def toChannelInitializer[C <: Channel]( - cb: Socket[F, Byte, Chunk[Byte], Nothing] => F[Unit] + cb: Socket[F, Byte, Chunk[Byte]] => F[Unit] ): F[ChannelInitializer[C]] = byteBufPipeline .toChannelInitializer { byteBufSocket => @@ -52,17 +53,17 @@ object AlternativeBytePipeline { } yield new AlternativeBytePipeline(byteBufPipeline) private class ByteBufToByteChunkSocket[F[_]: Async]( - socket: Socket[F, ByteBuf, ByteBuf, Nothing] - ) extends Socket[F, Byte, Chunk[Byte], Nothing] { + socket: Socket[F, ByteBuf, ByteBuf] + ) extends Socket[F, Byte, Chunk[Byte]] { - override lazy val reads: fs2.Stream[F, Byte] = + override lazy val reads: Stream[F, Byte] = socket.reads .evalMap(bb => Sync[F].delay(ByteBufUtil.getBytes(bb)).map(Chunk.array(_)) ) .flatMap(Stream.chunk) - override lazy val events: fs2.Stream[F, Nothing] = socket.events + override lazy val events: Stream[F, AnyRef] = socket.events override def write(output: Chunk[Byte]): F[Unit] = socket.write(toByteBuf(output)) @@ -78,9 +79,9 @@ object AlternativeBytePipeline { override def close(): F[Unit] = socket.close() - override def mutatePipeline[I2: Socket.Decoder, O2, E2]( + override def mutatePipeline[I2: Socket.Decoder, O2]( mutator: ChannelPipeline => F[Unit] - ): F[Socket[F, I2, O2, E2]] = socket.mutatePipeline[I2, O2, E2](mutator) + ): F[Socket[F, I2, O2]] = socket.mutatePipeline[I2, O2](mutator) private[this] def toByteBuf(chunk: Chunk[Byte]): ByteBuf = chunk match { diff --git a/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala b/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala index 8198cf1..a914899 100644 --- a/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala +++ b/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala @@ -28,11 +28,11 @@ import io.netty.channel.{Channel, ChannelInitializer, ChannelPipeline} import io.netty.handler.codec.bytes.ByteArrayDecoder class BytePipeline[F[_]: Async]( - byteArrayPipeline: NettyPipeline[F, Array[Byte], ByteBuf, Nothing] -) extends NettyChannelInitializer[F, Byte, Chunk[Byte], Nothing] { + byteArrayPipeline: NettyPipeline[F, Array[Byte], ByteBuf] +) extends NettyChannelInitializer[F, Byte, Chunk[Byte]] { override def toChannelInitializer[C <: Channel]( - cb: Socket[F, Byte, Chunk[Byte], Nothing] => F[Unit] + cb: Socket[F, Byte, Chunk[Byte]] => F[Unit] ): F[ChannelInitializer[C]] = byteArrayPipeline .toChannelInitializer { byteArraySocket => @@ -44,7 +44,7 @@ object BytePipeline { def apply[F[_]: Async](dispatcher: Dispatcher[F]): F[BytePipeline[F]] = for { - pipeline <- NettyPipeline[F, Array[Byte], ByteBuf, Nothing]( + pipeline <- NettyPipeline[F, Array[Byte], ByteBuf]( dispatcher, handlers = List( Eval.always(new ByteArrayDecoder) @@ -59,13 +59,13 @@ object BytePipeline { } private class ChunkingByteSocket[F[_]: Async]( - socket: Socket[F, Array[Byte], ByteBuf, Nothing] - ) extends Socket[F, Byte, Chunk[Byte], Nothing] { + socket: Socket[F, Array[Byte], ByteBuf] + ) extends Socket[F, Byte, Chunk[Byte]] { - override lazy val reads: fs2.Stream[F, Byte] = + override lazy val reads: Stream[F, Byte] = socket.reads.map(Chunk.array(_)).flatMap(Stream.chunk) - override lazy val events: fs2.Stream[F, Nothing] = socket.events + override lazy val events: Stream[F, AnyRef] = socket.events override def write(output: Chunk[Byte]): F[Unit] = socket.write(toByteBuf(output)) @@ -81,9 +81,9 @@ object BytePipeline { override def close(): F[Unit] = socket.close() - override def mutatePipeline[I2: Socket.Decoder, O2, E2]( + override def mutatePipeline[I2: Socket.Decoder, O2]( mutator: ChannelPipeline => F[Unit] - ): F[Socket[F, I2, O2, E2]] = socket.mutatePipeline[I2, O2, E2](mutator) + ): F[Socket[F, I2, O2]] = socket.mutatePipeline[I2, O2](mutator) // TODO: alloc over unpooled? private[this] def toByteBuf(chunk: Chunk[Byte]): ByteBuf = diff --git a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala b/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala index b5d5da1..0d0cff7 100644 --- a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala +++ b/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala @@ -12,6 +12,7 @@ import fs2.netty.embedded.Fs2NettyEmbeddedChannel.CommonEncoders._ import fs2.netty.embedded.Fs2NettyEmbeddedChannel.Encoder import io.netty.buffer.{ByteBuf, Unpooled} import io.netty.channel.ChannelHandler.Sharable +import io.netty.channel.socket.ChannelInputShutdownReadComplete import io.netty.channel.{ChannelHandlerContext, ChannelInboundHandler} import io.netty.handler.codec.MessageToMessageDecoder import io.netty.handler.codec.bytes.{ByteArrayDecoder, ByteArrayEncoder} @@ -34,7 +35,7 @@ class NettyPipelineSpec dispatcher => for { pipeline <- NettyPipeline[IO](dispatcher) - socket <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing]( + socket <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf]( pipeline ).map(_._2) @@ -49,11 +50,11 @@ class NettyPipelineSpec dispatcher => for { pipeline <- NettyPipeline[IO](dispatcher) - socket <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing]( + socket <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf]( pipeline ).map(_._2) - events: List[Nothing] <- socket.events + events: List[AnyRef] <- socket.events .interruptAfter(1.second) .compile .toList @@ -65,7 +66,7 @@ class NettyPipelineSpec for { // Given a socket and embedded channel from the default Netty Pipeline pipeline <- NettyPipeline[IO](dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing](pipeline) + x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf](pipeline) (channel, socket) = x // Then configs should be setup, like autoread should be false...maybe move to top test? @@ -101,7 +102,7 @@ class NettyPipelineSpec dispatcher => for { pipeline <- NettyPipeline[IO](dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing](pipeline) + x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf](pipeline) (channel, socket) = x encoder = implicitly[Encoder[Byte]] @@ -127,7 +128,7 @@ class NettyPipelineSpec dispatcher => for { pipeline <- NettyPipeline[IO](dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing](pipeline) + x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf](pipeline) (channel, socket) = x encoder = implicitly[Encoder[Byte]] @@ -159,7 +160,7 @@ class NettyPipelineSpec dispatcher => for { pipeline <- NettyPipeline[IO](dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing](pipeline) + x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf](pipeline) (channel, socket) = x // Netty sanity check @@ -180,7 +181,7 @@ class NettyPipelineSpec dispatcher => for { pipeline <- NettyPipeline[IO](dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing](pipeline) + x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf](pipeline) (channel, socket) = x _ <- socket.close() @@ -195,7 +196,7 @@ class NettyPipelineSpec dispatcher => for { pipeline <- NettyPipeline[IO](dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing](pipeline) + x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf](pipeline) (channel, socket) = x _ <- IO( @@ -212,13 +213,30 @@ class NettyPipelineSpec } yield errMsg shouldEqual "unit test error".some } + "pipeline events appear in fs2-netty as events stream" in withResource { + dispatcher => + for { + pipeline <- NettyPipeline[IO](dispatcher) + x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf](pipeline) + (channel, socket) = x + + _ <- IO( + channel.underlying + .pipeline() + .fireUserEventTriggered(ChannelInputShutdownReadComplete.INSTANCE) + ) + + event <- socket.events.take(1).compile.last + } yield event should_=== Some(ChannelInputShutdownReadComplete.INSTANCE) + } + "mutations" should { "no-op mutation creates a Socket with same behavior as original, while original Socket is unregistered from pipeline and channel" in withResource { dispatcher => for { // Given a channel and socket for the default pipeline pipeline <- NettyPipeline[IO](dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing]( + x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf]( pipeline ) (channel, socket) = x @@ -227,7 +245,7 @@ class NettyPipelineSpec _ <- socket.isDetached.map(_ should beFalse) // When performing a no-op socket pipeline mutation - newSocket <- socket.mutatePipeline[ByteBuf, ByteBuf, Nothing](_ => + newSocket <- socket.mutatePipeline[ByteBuf, ByteBuf](_ => IO.unit ) @@ -279,7 +297,7 @@ class NettyPipelineSpec for { // Given a channel and socket for the default pipeline pipeline <- NettyPipeline[IO](dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf, Nothing]( + x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf]( pipeline ) (channel, socket) = x @@ -293,7 +311,7 @@ class NettyPipelineSpec } } byteSocket <- socket - .mutatePipeline[Array[Byte], Array[Byte], Nothing] { pipeline => + .mutatePipeline[Array[Byte], Array[Byte]] { pipeline => for { _ <- IO(pipeline.addLast(new ByteArrayDecoder)) _ <- IO(pipeline.addLast(new ByteArrayEncoder)) @@ -338,11 +356,11 @@ class NettyPipelineSpec "custom handlers can change the types of reads and writes " in withResource { dispatcher => for { - pipeline <- NettyPipeline[IO, String, String, Nothing]( + pipeline <- NettyPipeline[IO, String, String]( dispatcher, handlers = List(Eval.now(new StringDecoder)) ) - x <- Fs2NettyEmbeddedChannel[IO, String, String, Nothing](pipeline) + x <- Fs2NettyEmbeddedChannel[IO, String, String](pipeline) (channel, socket) = x _ <- channel.writeAllInboundThenFlushThenRunAllPendingTasks( @@ -367,14 +385,14 @@ class NettyPipelineSpec "non sharable handlers must be always evaluated per channel" in withResource { dispatcher => for { - pipeline <- NettyPipeline[IO, String, String, Nothing]( + pipeline <- NettyPipeline[IO, String, String]( dispatcher, handlers = List(Eval.always(new StatefulMessageToReadCountChannelHandler)) ) - x <- Fs2NettyEmbeddedChannel[IO, String, String, Nothing](pipeline) + x <- Fs2NettyEmbeddedChannel[IO, String, String](pipeline) (channelOne, socketOne) = x - y <- Fs2NettyEmbeddedChannel[IO, String, String, Nothing](pipeline) + y <- Fs2NettyEmbeddedChannel[IO, String, String](pipeline) (channelTwo, socketTwo) = y inputs = List("a", "b", "c") @@ -397,7 +415,7 @@ class NettyPipelineSpec "sharable handlers are memoized per channel regardless of the eval policy" in withResource { dispatcher => for { - pipeline <- NettyPipeline[IO, String, String, Nothing]( + pipeline <- NettyPipeline[IO, String, String]( dispatcher, handlers = List( Eval.always( @@ -411,9 +429,9 @@ class NettyPipelineSpec ) ) ) - x <- Fs2NettyEmbeddedChannel[IO, String, String, Nothing](pipeline) + x <- Fs2NettyEmbeddedChannel[IO, String, String](pipeline) (channelOne, socketOne) = x - y <- Fs2NettyEmbeddedChannel[IO, String, String, Nothing](pipeline) + y <- Fs2NettyEmbeddedChannel[IO, String, String](pipeline) (channelTwo, socketTwo) = y inputs = List("a", "b", "c") diff --git a/core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala b/core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala index f5eb802..279c85d 100644 --- a/core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala +++ b/core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala @@ -19,7 +19,7 @@ class BytePipelineSpec "can echo back what is written" in withResource { dispatcher => for { pipeline <- BytePipeline(dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, Byte, Chunk[Byte], Nothing](pipeline) + x <- Fs2NettyEmbeddedChannel[IO, Byte, Chunk[Byte]](pipeline) (channel, socket) = x _ <- channel.writeAllInboundThenFlushThenRunAllPendingTasks("hello world") @@ -43,7 +43,7 @@ class BytePipelineSpec "alternative can echo back what is written" in withResource { dispatcher => for { pipeline <- AlternativeBytePipeline(dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, Byte, Chunk[Byte], Nothing](pipeline) + x <- Fs2NettyEmbeddedChannel[IO, Byte, Chunk[Byte]](pipeline) (channel, socket) = x _ <- channel.writeAllInboundThenFlushThenRunAllPendingTasks("hello world") From 92bc54cf6fa70b029f26c4ae7a6c4d301038eebf Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Sat, 13 Mar 2021 04:36:12 -0500 Subject: [PATCH 17/23] Create PorFunctor instance for Socket --- .../fs2/netty/NettyChannelInitializer.scala | 6 +- .../main/scala/fs2/netty/NettyPipeline.scala | 14 ++-- core/src/main/scala/fs2/netty/Network.scala | 19 +++--- core/src/main/scala/fs2/netty/Socket.scala | 66 ++++++++++++++----- .../main/scala/fs2/netty/SocketHandler.scala | 14 ++-- .../embedded/Fs2NettyEmbeddedChannel.scala | 8 +-- .../incudator/http/HttpClientConnection.scala | 4 +- .../fs2/netty/incudator/http/HttpServer.scala | 2 +- .../fs2/netty/incudator/http/WebSocket.scala | 4 +- .../pipeline/AlternativeBytePipeline.scala | 10 +-- .../fs2/netty/pipeline/BytePipeline.scala | 16 ++--- .../fs2/netty/pipeline/BytePipelineSpec.scala | 4 +- 12 files changed, 101 insertions(+), 66 deletions(-) diff --git a/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala b/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala index 6c8bf4f..83ef0e0 100644 --- a/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala +++ b/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala @@ -19,14 +19,14 @@ package fs2.netty import io.netty.channel.{Channel, ChannelInitializer} import io.netty.channel.socket.SocketChannel -trait NettyChannelInitializer[F[_], I, O] { +trait NettyChannelInitializer[F[_], O, I] { def toSocketChannelInitializer( - cb: Socket[F, I, O] => F[Unit] + cb: Socket[F, O, I] => F[Unit] ): F[ChannelInitializer[SocketChannel]] = toChannelInitializer[SocketChannel](cb) def toChannelInitializer[C <: Channel]( - cb: Socket[F, I, O] => F[Unit] + cb: Socket[F, O, I] => F[Unit] ): F[ChannelInitializer[C]] } diff --git a/core/src/main/scala/fs2/netty/NettyPipeline.scala b/core/src/main/scala/fs2/netty/NettyPipeline.scala index 7065b09..036276e 100644 --- a/core/src/main/scala/fs2/netty/NettyPipeline.scala +++ b/core/src/main/scala/fs2/netty/NettyPipeline.scala @@ -24,16 +24,16 @@ import io.netty.buffer.ByteBuf import io.netty.channel.{Channel, ChannelHandler, ChannelHandlerAdapter, ChannelInitializer} import io.netty.handler.flow.FlowControlHandler -class NettyPipeline[F[_]: Async, I: Socket.Decoder, O] private ( +class NettyPipeline[F[_]: Async, O, I: Socket.Decoder] private ( handlers: List[Eval[ChannelHandler]] )( dispatcher: Dispatcher[F] -) extends NettyChannelInitializer[F, I, O] { +) extends NettyChannelInitializer[F, O, I] { // TODO: there are other interesting type of channels // TODO: Remember ChannelInitializer is Sharable! override def toChannelInitializer[C <: Channel]( - cb: Socket[F, I, O] => F[Unit] + cb: Socket[F, O, I] => F[Unit] ): F[ChannelInitializer[C]] = Sync[F].delay { (ch: C) => { val p = ch.pipeline() @@ -53,7 +53,7 @@ class NettyPipeline[F[_]: Async, I: Socket.Decoder, O] private ( dispatcher.unsafeRunAndForget { // TODO: read up on CE3 Dispatcher, how is it different than Context Switch? Is this taking place async? Also is cats.effect.Effect removed in CE3? - SocketHandler[F, I, O](dispatcher, ch) + SocketHandler[F, O, I](dispatcher, ch) .flatTap(h => Sync[F].delay(p.addLast(h)) ) // TODO: pass EventExecutorGroup @@ -74,12 +74,12 @@ object NettyPipeline { ): F[NettyPipeline[F, ByteBuf, ByteBuf]] = apply(dispatcher, handlers = Nil) - def apply[F[_]: Async, I: Socket.Decoder, O]( + def apply[F[_]: Async, O, I: Socket.Decoder]( dispatcher: Dispatcher[F], handlers: List[Eval[ChannelHandler]] - ): F[NettyPipeline[F, I, O]] = + ): F[NettyPipeline[F, O, I]] = Sync[F].delay( - new NettyPipeline[F, I, O]( + new NettyPipeline[F, O, I]( memoizeSharableHandlers(handlers) )(dispatcher) ) diff --git a/core/src/main/scala/fs2/netty/Network.scala b/core/src/main/scala/fs2/netty/Network.scala index 91909bd..ac6b6d1 100644 --- a/core/src/main/scala/fs2/netty/Network.scala +++ b/core/src/main/scala/fs2/netty/Network.scala @@ -74,13 +74,14 @@ final class Network[F[_]: Async] private ( : Stream[F, Socket[F, Byte, Byte]] = Stream.resource(serverResource(host, Some(port), options)).flatMap(_._2) - def server[I: Socket.Decoder, O]( + // TODO: maybe here it's nicer to have the I first then O?, or will that be confusing if Socket has reversed order? + def server[O, I: Socket.Decoder]( host: Option[Host], port: Port, handlers: NonEmptyList[ChannelHandler], options: List[ChannelOption]) - : Stream[F, Socket[F, I, O]] = - Stream.resource(serverResource[I, O](host, Some(port),handlers, options)).flatMap(_._2) + : Stream[F, Socket[F, O, I]] = + Stream.resource(serverResource[O, I](host, Some(port),handlers, options)).flatMap(_._2) def serverResource( host: Option[Host], @@ -89,26 +90,26 @@ final class Network[F[_]: Async] private ( : Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, Byte, Byte]])] = serverResource(host, port, handlers = Nil,options) - def serverResource[I: Socket.Decoder, O]( + def serverResource[O, I: Socket.Decoder]( host: Option[Host], port: Option[Port], handlers: NonEmptyList[ChannelHandler], options: List[ChannelOption] - ): Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, I, O]])] = + ): Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, O, I]])] = serverResource(host, port, handlers.toList, options) - private def serverResource[I: Socket.Decoder, O]( + private def serverResource[O, I: Socket.Decoder]( host: Option[Host], port: Option[Port], handlers: List[ChannelHandler], options: List[ChannelOption] - ): Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, I, O]])] = + ): Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, O, I]])] = for { dispatcher <- Dispatcher[F] res <- Resource suspend { for { - clientConnections <- Queue.unbounded[F, Socket[F, I, O]] + clientConnections <- Queue.unbounded[F, Socket[F, O, I]] resolvedHost <- host.traverse(_.resolve[F]) @@ -132,7 +133,7 @@ final class Network[F[_]: Async] private ( ) // TODO: read up on CE3 Dispatcher, how is it different than Context Switch? Is this taking place async? dispatcher.unsafeRunAndForget { - SocketHandler[F, I, O](dispatcher, ch) + SocketHandler[F, O, I](dispatcher, ch) .flatTap(h => Sync[F].delay(p.addLast(h))) .flatMap(clientConnections.offer) } diff --git a/core/src/main/scala/fs2/netty/Socket.scala b/core/src/main/scala/fs2/netty/Socket.scala index 8529a56..d61bcec 100644 --- a/core/src/main/scala/fs2/netty/Socket.scala +++ b/core/src/main/scala/fs2/netty/Socket.scala @@ -17,6 +17,7 @@ package fs2 package netty +import cats.arrow.Profunctor import cats.syntax.all._ import io.netty.buffer.ByteBuf import io.netty.channel.ChannelPipeline @@ -25,7 +26,7 @@ import io.netty.channel.ChannelPipeline // and WS use cases this is completely ok. One alternative is scala reflections api, but will overhead be acceptable // along the critical code path (assuming high volume servers/clients)? // Think through variance of types. -trait Socket[F[_], I, O] { +trait Socket[F[_], O, I] { // TODO: Temporarily disabling while making Socket generic enough to test with EmbeddedChannel. Furthermore, these // methods restrict Socket to be a InetChannel which isn't compatible with EmbeddedChannel. Netty also works with @@ -38,19 +39,19 @@ trait Socket[F[_], I, O] { def reads: Stream[F, I] /** - * Handlers may optionally generate events to communicate with downstream handlers. These include but not limited to - * signals about handshake complete, timeouts, and errors. - * - * Some examples from Netty: - * - ChannelInputShutdownReadComplete - * - ChannelInputShutdownEvent - * - SslCompletionEvent - * - ProxyConnectionEvent - * - HandshakeComplete - * - Http2FrameStreamEvent - * - IdleStateEvent - * @return - */ + * Handlers may optionally generate events to communicate with downstream handlers. These include but not limited to + * signals about handshake complete, timeouts, and errors. + * + * Some examples from Netty: + * - ChannelInputShutdownReadComplete + * - ChannelInputShutdownEvent + * - SslCompletionEvent + * - ProxyConnectionEvent + * - HandshakeComplete + * - Http2FrameStreamEvent + * - IdleStateEvent + * @return + */ def events: Stream[F, AnyRef] def write(output: O): F[Unit] @@ -62,9 +63,9 @@ trait Socket[F[_], I, O] { def close(): F[Unit] - def mutatePipeline[I2: Socket.Decoder, O2]( + def mutatePipeline[O2, I2: Socket.Decoder]( mutator: ChannelPipeline => F[Unit] - ): F[Socket[F, I2, O2]] + ): F[Socket[F, O2, I2]] } object Socket { @@ -81,4 +82,37 @@ object Socket { s"pipeline error, expected $ByteBufClassName, but got ${x.getClass.getName}" .asLeft[ByteBuf] } + + //todo Do we then define an IO instance of this? + // Maybe we need to have a custom typeclass that also accounts for pipeline handling type C? Although contravariance + // should handle that? + implicit def ProfunctorInstance[F[_]]: Profunctor[Socket[F, *, *]] = + new Profunctor[Socket[F, *, *]] { + + override def dimap[A, B, C, D]( + fab: Socket[F, A, B] + )(f: C => A)(g: B => D): Socket[F, C, D] = + new Socket[F, C, D] { + override def reads: Stream[F, D] = fab.reads.map(g) + + override def events: Stream[F, AnyRef] = fab.events + + override def write(output: C): F[Unit] = fab.write(f(output)) + + override def writes: Pipe[F, C, INothing] = + _.map(f).through(fab.writes) + + override def isOpen: F[Boolean] = fab.isOpen + + override def isClosed: F[Boolean] = fab.isClosed + + override def isDetached: F[Boolean] = fab.isDetached + + override def close(): F[Unit] = fab.close() + + override def mutatePipeline[O2, I2: Decoder]( + mutator: ChannelPipeline => F[Unit] + ): F[Socket[F, O2, I2]] = fab.mutatePipeline(mutator) + } + } } diff --git a/core/src/main/scala/fs2/netty/SocketHandler.scala b/core/src/main/scala/fs2/netty/SocketHandler.scala index 6bd6708..8463fcd 100644 --- a/core/src/main/scala/fs2/netty/SocketHandler.scala +++ b/core/src/main/scala/fs2/netty/SocketHandler.scala @@ -26,7 +26,7 @@ import io.netty.channel._ import io.netty.handler.flow.FlowControlHandler import io.netty.util.ReferenceCountUtil -private final class SocketHandler[F[_]: Async: Concurrent, I, O]( +private final class SocketHandler[F[_]: Async: Concurrent, O, I]( disp: Dispatcher[F], private var channel: Channel, readsQueue: Queue[F, Option[Either[Throwable, I]]], @@ -34,7 +34,7 @@ private final class SocketHandler[F[_]: Async: Concurrent, I, O]( pipelineMutationSwitch: Deferred[F, Unit] )(implicit inboundDecoder: Socket.Decoder[I]) extends ChannelInboundHandlerAdapter - with Socket[F, I, O] { + with Socket[F, O, I] { // override val localAddress: F[SocketAddress[IpAddress]] = // Sync[F].delay(SocketAddress.fromInetSocketAddress(channel.localAddress())) @@ -155,9 +155,9 @@ private final class SocketHandler[F[_]: Async: Concurrent, I, O]( // where needed. This way we can keep a thread-unsafe mutable queue. disp.unsafeRunAndForget(eventsQueue.offer(evt)) - override def mutatePipeline[I2: Socket.Decoder, O2]( + override def mutatePipeline[O2,I2: Socket.Decoder]( mutator: ChannelPipeline => F[Unit] - ): F[Socket[F, I2, O2]] = + ): F[Socket[F, O2, I2]] = for { // TODO: Edge cases aren't fully tested _ <- pipelineMutationSwitch.complete( @@ -183,7 +183,7 @@ private final class SocketHandler[F[_]: Async: Concurrent, I, O]( pipeline. Maybe protocols need to inform this layer about when exactly to transition. */ _ <- mutator(oldChannel.pipeline()) - sh <- SocketHandler[F, I2, O2](disp, oldChannel) + sh <- SocketHandler[F, O2, I2](disp, oldChannel) // TODO: pass a name for debugging purposes? _ <- Sync[F].delay( oldChannel.pipeline().addLast(new FlowControlHandler(false)) @@ -196,10 +196,10 @@ private final class SocketHandler[F[_]: Async: Concurrent, I, O]( private object SocketHandler { - def apply[F[_]: Async: Concurrent, I: Socket.Decoder, O]( + def apply[F[_]: Async: Concurrent, O, I: Socket.Decoder]( disp: Dispatcher[F], channel: Channel - ): F[SocketHandler[F, I, O]] = + ): F[SocketHandler[F, O, I]] = for { readsQueue <- Queue.unbounded[F, Option[Either[Throwable, I]]] eventsQueue <- Queue.unbounded[F, AnyRef] diff --git a/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala b/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala index 8e31d0f..93f218e 100644 --- a/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala +++ b/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala @@ -75,14 +75,14 @@ final case class Fs2NettyEmbeddedChannel[F[_]] private ( object Fs2NettyEmbeddedChannel { - def apply[F[_], I, O]( - initializer: NettyChannelInitializer[F, I, O] - )(implicit F: Async[F]): F[(Fs2NettyEmbeddedChannel[F], Socket[F, I, O])] = + def apply[F[_], O, I]( + initializer: NettyChannelInitializer[F, O, I] + )(implicit F: Async[F]): F[(Fs2NettyEmbeddedChannel[F], Socket[F, O, I])] = for { channel <- F.delay( new EmbeddedChannelWithAutoRead() ) // With FlowControl/Dispatcher fixes EmbeddedChannelWithAutoRead might not be needed after all. - socket <- F.async[Socket[F, I, O]] { cb => + socket <- F.async[Socket[F, O, I]] { cb => initializer .toChannelInitializer[EmbeddedChannel] { socket => F.delay(cb(socket.asRight[Throwable])) diff --git a/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala b/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala index 2d309e8..b0e2ff3 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala @@ -37,8 +37,8 @@ import io.netty.handler.codec.http.websocketx.{WebSocketFrame, WebSocketServerPr class HttpClientConnection[F[_]: Sync]( clientSocket: Socket[ F, - FullHttpRequest, - FullHttpResponse + FullHttpResponse, + FullHttpRequest ] ) { diff --git a/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala b/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala index 060be62..298e9a4 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala @@ -42,7 +42,7 @@ object HttpServer { network <- Network[F] rawHttpClientConnection <- network - .serverResource[FullHttpRequest, FullHttpResponse]( + .serverResource[FullHttpResponse, FullHttpRequest]( host = None, port = None, handlers = NonEmptyList.of( diff --git a/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala b/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala index d19fca6..1530aa3 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala @@ -51,8 +51,8 @@ class WebSocket[F[_]]( override def close(): F[Unit] = underlying.close() - override def mutatePipeline[I2: Socket.Decoder, O2]( + override def mutatePipeline[O2, I2: Socket.Decoder]( mutator: ChannelPipeline => F[Unit] - ): F[Socket[F, I2, O2]] = + ): F[Socket[F, O2, I2]] = underlying.mutatePipeline(mutator) } diff --git a/core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala b/core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala index ade3300..363e38c 100644 --- a/core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala +++ b/core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala @@ -30,10 +30,10 @@ import io.netty.channel.{Channel, ChannelInitializer, ChannelPipeline} // sockets, i.e. rely on Netty handlers or encode transforms in fs2. class AlternativeBytePipeline[F[_]: Async]( byteBufPipeline: NettyPipeline[F, ByteBuf, ByteBuf] -) extends NettyChannelInitializer[F, Byte, Chunk[Byte]] { +) extends NettyChannelInitializer[F, Chunk[Byte], Byte] { override def toChannelInitializer[C <: Channel]( - cb: Socket[F, Byte, Chunk[Byte]] => F[Unit] + cb: Socket[F, Chunk[Byte], Byte] => F[Unit] ): F[ChannelInitializer[C]] = byteBufPipeline .toChannelInitializer { byteBufSocket => @@ -54,7 +54,7 @@ object AlternativeBytePipeline { private class ByteBufToByteChunkSocket[F[_]: Async]( socket: Socket[F, ByteBuf, ByteBuf] - ) extends Socket[F, Byte, Chunk[Byte]] { + ) extends Socket[F, Chunk[Byte], Byte] { override lazy val reads: Stream[F, Byte] = socket.reads @@ -79,9 +79,9 @@ object AlternativeBytePipeline { override def close(): F[Unit] = socket.close() - override def mutatePipeline[I2: Socket.Decoder, O2]( + override def mutatePipeline[O2, I2: Socket.Decoder]( mutator: ChannelPipeline => F[Unit] - ): F[Socket[F, I2, O2]] = socket.mutatePipeline[I2, O2](mutator) + ): F[Socket[F, O2, I2]] = socket.mutatePipeline[O2, I2](mutator) private[this] def toByteBuf(chunk: Chunk[Byte]): ByteBuf = chunk match { diff --git a/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala b/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala index a914899..cf7aa9e 100644 --- a/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala +++ b/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala @@ -28,11 +28,11 @@ import io.netty.channel.{Channel, ChannelInitializer, ChannelPipeline} import io.netty.handler.codec.bytes.ByteArrayDecoder class BytePipeline[F[_]: Async]( - byteArrayPipeline: NettyPipeline[F, Array[Byte], ByteBuf] -) extends NettyChannelInitializer[F, Byte, Chunk[Byte]] { + byteArrayPipeline: NettyPipeline[F, ByteBuf, Array[Byte]] +) extends NettyChannelInitializer[F, Chunk[Byte], Byte] { override def toChannelInitializer[C <: Channel]( - cb: Socket[F, Byte, Chunk[Byte]] => F[Unit] + cb: Socket[F, Chunk[Byte], Byte] => F[Unit] ): F[ChannelInitializer[C]] = byteArrayPipeline .toChannelInitializer { byteArraySocket => @@ -44,7 +44,7 @@ object BytePipeline { def apply[F[_]: Async](dispatcher: Dispatcher[F]): F[BytePipeline[F]] = for { - pipeline <- NettyPipeline[F, Array[Byte], ByteBuf]( + pipeline <- NettyPipeline[F, ByteBuf, Array[Byte]]( dispatcher, handlers = List( Eval.always(new ByteArrayDecoder) @@ -59,8 +59,8 @@ object BytePipeline { } private class ChunkingByteSocket[F[_]: Async]( - socket: Socket[F, Array[Byte], ByteBuf] - ) extends Socket[F, Byte, Chunk[Byte]] { + socket: Socket[F, ByteBuf, Array[Byte]] + ) extends Socket[F, Chunk[Byte], Byte] { override lazy val reads: Stream[F, Byte] = socket.reads.map(Chunk.array(_)).flatMap(Stream.chunk) @@ -81,9 +81,9 @@ object BytePipeline { override def close(): F[Unit] = socket.close() - override def mutatePipeline[I2: Socket.Decoder, O2]( + override def mutatePipeline[O2, I2: Socket.Decoder]( mutator: ChannelPipeline => F[Unit] - ): F[Socket[F, I2, O2]] = socket.mutatePipeline[I2, O2](mutator) + ): F[Socket[F, O2, I2]] = socket.mutatePipeline[O2, I2](mutator) // TODO: alloc over unpooled? private[this] def toByteBuf(chunk: Chunk[Byte]): ByteBuf = diff --git a/core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala b/core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala index 279c85d..14dbfb8 100644 --- a/core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala +++ b/core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala @@ -19,7 +19,7 @@ class BytePipelineSpec "can echo back what is written" in withResource { dispatcher => for { pipeline <- BytePipeline(dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, Byte, Chunk[Byte]](pipeline) + x <- Fs2NettyEmbeddedChannel[IO, Chunk[Byte], Byte](pipeline) (channel, socket) = x _ <- channel.writeAllInboundThenFlushThenRunAllPendingTasks("hello world") @@ -43,7 +43,7 @@ class BytePipelineSpec "alternative can echo back what is written" in withResource { dispatcher => for { pipeline <- AlternativeBytePipeline(dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, Byte, Chunk[Byte]](pipeline) + x <- Fs2NettyEmbeddedChannel[IO, Chunk[Byte], Byte](pipeline) (channel, socket) = x _ <- channel.writeAllInboundThenFlushThenRunAllPendingTasks("hello world") From 19992feddc8bfb5a38f43f56160a67992f0d8ab8 Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Sat, 13 Mar 2021 04:48:23 -0500 Subject: [PATCH 18/23] Document trying to use profunctor --- .../fs2/netty/pipeline/BytePipeline.scala | 29 ++++++++++++------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala b/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala index cf7aa9e..8b56a1d 100644 --- a/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala +++ b/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala @@ -36,8 +36,13 @@ class BytePipeline[F[_]: Async]( ): F[ChannelInitializer[C]] = byteArrayPipeline .toChannelInitializer { byteArraySocket => + /* + TODO: Can't do this b/c ProFunctor isn't Chunk aware + Sync[F].delay(byteArraySocket.dimap[Chunk[Byte], Byte](toByteBuf)(Chunk.array(_))) + */ Sync[F].delay(new ChunkingByteSocket[F](byteArraySocket)).flatMap(cb) } + } object BytePipeline { @@ -85,17 +90,19 @@ object BytePipeline { mutator: ChannelPipeline => F[Unit] ): F[Socket[F, O2, I2]] = socket.mutatePipeline[O2, I2](mutator) - // TODO: alloc over unpooled? - private[this] def toByteBuf(chunk: Chunk[Byte]): ByteBuf = - chunk match { - case Chunk.ArraySlice(arr, off, len) => - Unpooled.wrappedBuffer(arr, off, len) + } - case c: Chunk.ByteBuffer => - Unpooled.wrappedBuffer(c.toByteBuffer) + // TODO: alloc over unpooled? + private def toByteBuf(chunk: Chunk[Byte]): ByteBuf = + chunk match { + case Chunk.ArraySlice(arr, off, len) => + Unpooled.wrappedBuffer(arr, off, len) + + case c: Chunk.ByteBuffer => + Unpooled.wrappedBuffer(c.toByteBuffer) + + case c => + Unpooled.wrappedBuffer(c.toArray) + } - case c => - Unpooled.wrappedBuffer(c.toArray) - } - } } From c9661f63aa96e1d56ed9e3b9715b22352235dde4 Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Sun, 14 Mar 2021 22:50:03 -0400 Subject: [PATCH 19/23] Reorg packages --- .../fs2/netty/NettyChannelInitializer.scala | 3 +- core/src/main/scala/fs2/netty/Network.scala | 166 +++++++++++------- .../embedded/Fs2NettyEmbeddedChannel.scala | 3 +- .../incudator/http/HttpClientConnection.scala | 11 +- .../fs2/netty/incudator/http/HttpServer.scala | 8 +- .../fs2/netty/incudator/http/WebSocket.scala | 2 +- .../netty/{ => pipeline}/NettyPipeline.scala | 4 +- .../AlternativeBytePipeline.scala | 9 +- .../{ => prebuilt}/BytePipeline.scala | 10 +- .../{ => pipeline/socket}/NoopChannel.scala | 2 +- .../netty/{ => pipeline/socket}/Socket.scala | 2 +- .../{ => pipeline/socket}/SocketHandler.scala | 11 +- .../{ => pipeline}/NettyPipelineSpec.scala | 11 +- .../{ => prebuilt}/BytePipelineSpec.scala | 2 +- 14 files changed, 151 insertions(+), 93 deletions(-) rename core/src/main/scala/fs2/netty/{ => pipeline}/NettyPipeline.scala (96%) rename core/src/main/scala/fs2/netty/pipeline/{ => prebuilt}/AlternativeBytePipeline.scala (93%) rename core/src/main/scala/fs2/netty/pipeline/{ => prebuilt}/BytePipeline.scala (92%) rename core/src/main/scala/fs2/netty/{ => pipeline/socket}/NoopChannel.scala (99%) rename core/src/main/scala/fs2/netty/{ => pipeline/socket}/Socket.scala (99%) rename core/src/main/scala/fs2/netty/{ => pipeline/socket}/SocketHandler.scala (97%) rename core/src/test/scala/fs2/netty/{ => pipeline}/NettyPipelineSpec.scala (98%) rename core/src/test/scala/fs2/netty/pipeline/{ => prebuilt}/BytePipelineSpec.scala (98%) diff --git a/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala b/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala index 83ef0e0..f080980 100644 --- a/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala +++ b/core/src/main/scala/fs2/netty/NettyChannelInitializer.scala @@ -16,8 +16,9 @@ package fs2.netty -import io.netty.channel.{Channel, ChannelInitializer} +import fs2.netty.pipeline.socket.Socket import io.netty.channel.socket.SocketChannel +import io.netty.channel.{Channel, ChannelInitializer} trait NettyChannelInitializer[F[_], O, I] { diff --git a/core/src/main/scala/fs2/netty/Network.scala b/core/src/main/scala/fs2/netty/Network.scala index ac6b6d1..570c3a8 100644 --- a/core/src/main/scala/fs2/netty/Network.scala +++ b/core/src/main/scala/fs2/netty/Network.scala @@ -18,13 +18,14 @@ package fs2 package netty import cats.data.NonEmptyList -import cats.effect.{Async, Concurrent, Resource, Sync} import cats.effect.std.{Dispatcher, Queue} +import cats.effect.{Async, Concurrent, Resource, Sync} import cats.syntax.all._ import com.comcast.ip4s.{Host, IpAddress, Port, SocketAddress} +import fs2.netty.pipeline.socket.{Socket, SocketHandler} import io.netty.bootstrap.{Bootstrap, ServerBootstrap} -import io.netty.channel.{Channel, ChannelHandler, ChannelInitializer, EventLoopGroup, ServerChannel, ChannelOption => JChannelOption} import io.netty.channel.socket.SocketChannel +import io.netty.channel.{Channel, ChannelHandler, ChannelInitializer, EventLoopGroup, ServerChannel, ChannelOption => JChannelOption} import java.net.InetSocketAddress import java.util.concurrent.ThreadFactory @@ -32,34 +33,44 @@ import java.util.concurrent.atomic.AtomicInteger // TODO: Do we need to distinguish between TCP (connection based network) and UDP (connection-less network)? final class Network[F[_]: Async] private ( - parent: EventLoopGroup, // TODO: custom value class? - child: EventLoopGroup, - clientChannelClazz: Class[_ <: Channel], - serverChannelClazz: Class[_ <: ServerChannel]) { + parent: EventLoopGroup, // TODO: custom value class? + child: EventLoopGroup, + clientChannelClazz: Class[_ <: Channel], + serverChannelClazz: Class[_ <: ServerChannel] +) { def client( - addr: SocketAddress[Host], - options: List[ChannelOption] = Nil) - : Resource[F, Socket[F, Byte, Byte]] = + addr: SocketAddress[Host], + options: List[ChannelOption] = Nil + ): Resource[F, Socket[F, Byte, Byte]] = Dispatcher[F] flatMap { disp => Resource suspend { Concurrent[F].deferred[Socket[F, Byte, Byte]] flatMap { d => addr.host.resolve[F] flatMap { resolved => Sync[F] delay { val bootstrap = new Bootstrap - bootstrap.group(child) + bootstrap + .group(child) .channel(clientChannelClazz) - .option(JChannelOption.AUTO_READ.asInstanceOf[JChannelOption[Any]], false) // backpressure + .option( + JChannelOption.AUTO_READ.asInstanceOf[JChannelOption[Any]], + false + ) // backpressure .handler(initializer(disp)(d.complete(_).void)) options.foreach(opt => bootstrap.option(opt.key, opt.value)) val connectChannel = Sync[F] defer { - val cf = bootstrap.connect(resolved.toInetAddress, addr.port.value) + val cf = + bootstrap.connect(resolved.toInetAddress, addr.port.value) fromNettyFuture[F](cf.pure[F]).as(cf.channel()) } - Resource.make(connectChannel <* d.get)(ch => fromNettyFuture(Sync[F].delay(ch.close())).void).evalMap(_ => d.get) + Resource + .make(connectChannel <* d.get)(ch => + fromNettyFuture(Sync[F].delay(ch.close())).void + ) + .evalMap(_ => d.get) } } } @@ -68,27 +79,29 @@ final class Network[F[_]: Async] private ( //TODO: Add back default args for opts, removed to fix compilation error for overloaded method def server( - host: Option[Host], - port: Port, - options: List[ChannelOption]) - : Stream[F, Socket[F, Byte, Byte]] = + host: Option[Host], + port: Port, + options: List[ChannelOption] + ): Stream[F, Socket[F, Byte, Byte]] = Stream.resource(serverResource(host, Some(port), options)).flatMap(_._2) // TODO: maybe here it's nicer to have the I first then O?, or will that be confusing if Socket has reversed order? def server[O, I: Socket.Decoder]( - host: Option[Host], - port: Port, - handlers: NonEmptyList[ChannelHandler], - options: List[ChannelOption]) - : Stream[F, Socket[F, O, I]] = - Stream.resource(serverResource[O, I](host, Some(port),handlers, options)).flatMap(_._2) + host: Option[Host], + port: Port, + handlers: NonEmptyList[ChannelHandler], + options: List[ChannelOption] + ): Stream[F, Socket[F, O, I]] = + Stream + .resource(serverResource[O, I](host, Some(port), handlers, options)) + .flatMap(_._2) def serverResource( - host: Option[Host], - port: Option[Port], - options: List[ChannelOption]) - : Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, Byte, Byte]])] = - serverResource(host, port, handlers = Nil,options) + host: Option[Host], + port: Option[Port], + options: List[ChannelOption] + ): Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, Byte, Byte]])] = + serverResource(host, port, handlers = Nil, options) def serverResource[O, I: Socket.Decoder]( host: Option[Host], @@ -175,16 +188,15 @@ final class Network[F[_]: Async] private ( } } yield res - - implicit val decoder: Socket.Decoder[Byte] = new Socket.Decoder[Byte] { + implicit val decoder: Socket.Decoder[Byte] = new Socket.Decoder[Byte] { override def decode(x: AnyRef): Either[String, Byte] = ??? } - private[this] def initializer( - disp: Dispatcher[F])( - result: Socket[F, Byte, Byte] => F[Unit]) - : ChannelInitializer[SocketChannel] = + private[this] def initializer(disp: Dispatcher[F])( + result: Socket[F, Byte, Byte] => F[Unit] + ): ChannelInitializer[SocketChannel] = new ChannelInitializer[SocketChannel] { + def initChannel(ch: SocketChannel) = { val p = ch.pipeline() ch.config().setAutoRead(false) @@ -203,23 +215,33 @@ object Network { private[this] val (eventLoopClazz, serverChannelClazz, clientChannelClazz) = { val (e, s, c) = uring().orElse(epoll()).orElse(kqueue()).getOrElse(nio()) - (e, s.asInstanceOf[Class[_ <: ServerChannel]], c.asInstanceOf[Class[_ <: Channel]]) + ( + e, + s.asInstanceOf[Class[_ <: ServerChannel]], + c.asInstanceOf[Class[_ <: Channel]] + ) } def apply[F[_]: Async]: Resource[F, Network[F]] = { // TODO configure threads def instantiate(name: String) = Sync[F] delay { - val constr = eventLoopClazz.getDeclaredConstructor(classOf[Int], classOf[ThreadFactory]) - val result = constr.newInstance(new Integer(1), new ThreadFactory { - private val ctr = new AtomicInteger(0) - def newThread(r: Runnable): Thread = { - val t = new Thread(r) - t.setDaemon(true) - t.setName(s"fs2-netty-$name-io-worker-${ctr.getAndIncrement()}") - t.setPriority(Thread.MAX_PRIORITY) - t + val constr = eventLoopClazz.getDeclaredConstructor( + classOf[Int], + classOf[ThreadFactory] + ) + val result = constr.newInstance( + new Integer(1), + new ThreadFactory { + private val ctr = new AtomicInteger(0) + def newThread(r: Runnable): Thread = { + val t = new Thread(r) + t.setDaemon(true) + t.setName(s"fs2-netty-$name-io-worker-${ctr.getAndIncrement()}") + t.setPriority(Thread.MAX_PRIORITY) + t + } } - }) + ) result.asInstanceOf[EventLoopGroup] } @@ -232,7 +254,10 @@ object Network { (instantiateR("server"), instantiateR("client")) mapN { (server, client) => try { val meth = eventLoopClazz.getDeclaredMethod("setIoRatio", classOf[Int]) - meth.invoke(server, new Integer(90)) // TODO tweak this a bit more; 100 was worse than 50 and 90 was a dramatic step up from both + meth.invoke( + server, + new Integer(90) + ) // TODO tweak this a bit more; 100 was worse than 50 and 90 was a dramatic step up from both meth.invoke(client, new Integer(90)) } catch { case _: Exception => () @@ -244,13 +269,27 @@ object Network { private[this] def uring() = try { - if (sys.props.get("fs2.netty.use.io_uring").map(_.toBoolean).getOrElse(false)) { + if ( + sys.props + .get("fs2.netty.use.io_uring") + .map(_.toBoolean) + .getOrElse(false) + ) { Class.forName("io.netty.incubator.channel.uring.IOUringEventLoop") - Some(( - Class.forName("io.netty.incubator.channel.uring.IOUringEventLoopGroup"), - Class.forName("io.netty.incubator.channel.uring.IOUringServerSocketChannel"), - Class.forName("io.netty.incubator.channel.uring.IOUringSocketChannel"))) + Some( + ( + Class.forName( + "io.netty.incubator.channel.uring.IOUringEventLoopGroup" + ), + Class.forName( + "io.netty.incubator.channel.uring.IOUringServerSocketChannel" + ), + Class.forName( + "io.netty.incubator.channel.uring.IOUringSocketChannel" + ) + ) + ) } else { None } @@ -262,10 +301,13 @@ object Network { try { Class.forName("io.netty.channel.epoll.EpollEventLoop") - Some(( - Class.forName("io.netty.channel.epoll.EpollEventLoopGroup"), - Class.forName("io.netty.channel.epoll.EpollServerSocketChannel"), - Class.forName("io.netty.channel.epoll.EpollSocketChannel"))) + Some( + ( + Class.forName("io.netty.channel.epoll.EpollEventLoopGroup"), + Class.forName("io.netty.channel.epoll.EpollServerSocketChannel"), + Class.forName("io.netty.channel.epoll.EpollSocketChannel") + ) + ) } catch { case _: Throwable => None } @@ -274,10 +316,13 @@ object Network { try { Class.forName("io.netty.channel.kqueue.KQueueEventLoop") - Some(( - Class.forName("io.netty.channel.kqueue.KQueueEventLoopGroup"), - Class.forName("io.netty.channel.kqueue.KQueueServerSocketChannel"), - Class.forName("io.netty.channel.kqueue.KQueueSocketChannel"))) + Some( + ( + Class.forName("io.netty.channel.kqueue.KQueueEventLoopGroup"), + Class.forName("io.netty.channel.kqueue.KQueueServerSocketChannel"), + Class.forName("io.netty.channel.kqueue.KQueueSocketChannel") + ) + ) } catch { case _: Throwable => None } @@ -286,5 +331,6 @@ object Network { ( Class.forName("io.netty.channel.nio.NioEventLoopGroup"), Class.forName("io.netty.channel.socket.nio.NioServerSocketChannel"), - Class.forName("io.netty.channel.socket.nio.NioSocketChannel")) + Class.forName("io.netty.channel.socket.nio.NioSocketChannel") + ) } diff --git a/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala b/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala index 93f218e..8a9e3f4 100644 --- a/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala +++ b/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala @@ -20,7 +20,8 @@ package netty.embedded import cats.effect.{Async, Sync} import cats.implicits._ import fs2.netty.embedded.Fs2NettyEmbeddedChannel.Encoder -import fs2.netty.{NettyChannelInitializer, Socket} +import fs2.netty.NettyChannelInitializer +import fs2.netty.pipeline.socket.Socket import io.netty.buffer.{ByteBuf, Unpooled} import io.netty.channel.embedded.EmbeddedChannel diff --git a/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala b/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala index b0e2ff3..08d3b3a 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/HttpClientConnection.scala @@ -21,8 +21,8 @@ import cats.data.Kleisli import cats.effect.Sync import cats.syntax.all._ import fs2.Stream -import fs2.netty.Socket import fs2.netty.incudator.http.HttpClientConnection._ +import fs2.netty.pipeline.socket.Socket import io.netty.buffer.Unpooled import io.netty.channel.{ChannelHandlerContext, ChannelPipeline} import io.netty.handler.codec.TooLongFrameException @@ -31,9 +31,6 @@ import io.netty.handler.codec.http.websocketx.WebSocketServerProtocolHandler.Han import io.netty.handler.codec.http.websocketx.{WebSocketFrame, WebSocketServerProtocolHandler} // TODO: this is just a fancy function over Socket, so maybe just make this an object and a function? -// U could be io.netty.handler.timeout.IdleStateEvent if we wanted to handle connection closure, but in this -// context we want to close the channel anyway and just be notified why it was closed. However, we should likely -// send HttpResponseStatus.REQUEST_TIMEOUT for cleaner close. So change U type and handle at FS2 layer. class HttpClientConnection[F[_]: Sync]( clientSocket: Socket[ F, @@ -42,6 +39,12 @@ class HttpClientConnection[F[_]: Sync]( ] ) { + // TODO: Add idle state handler and handle io.netty.handler.timeout.IdleStateEvent from read side with + // HttpResponseStatus.REQUEST_TIMEOUT for a clean close in case of race condition where client is in the process of + // sending a request. However, need to track weather a request is inFlight, so as not to close connection just + // because server is taking long to respond, triggering a Idle Event from read side. Client is just waiting so it + // cannot send another request. + def successfullyDecodedReads( httpRouter: Kleisli[F, FullHttpRequest, FullHttpResponse], webSocketRouter: Kleisli[F, FullHttpRequest, WebSocketResponse[F]] diff --git a/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala b/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala index 298e9a4..89fe362 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala @@ -18,9 +18,10 @@ package fs2.netty.incudator.http import cats.data.NonEmptyList import cats.effect.{Async, Resource} -import fs2.Stream import cats.syntax.all._ -import fs2.netty.{Network, Socket} +import fs2.Stream +import fs2.netty.Network +import fs2.netty.pipeline.socket.Socket import io.netty.handler.codec.http._ import io.netty.handler.timeout.ReadTimeoutHandler @@ -28,7 +29,8 @@ import scala.concurrent.duration.FiniteDuration object HttpServer { - implicit val decoder = new Socket.Decoder[FullHttpRequest] { + implicit val decoder = new Socket.Decoder[FullHttpRequest] { + override def decode(x: AnyRef): Either[String, FullHttpRequest] = x match { case req: FullHttpRequest => req.asRight[String] case _ => "non http message, pipeline error".asLeft[FullHttpRequest] diff --git a/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala b/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala index 1530aa3..50385f3 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/WebSocket.scala @@ -16,7 +16,7 @@ package fs2.netty.incudator.http -import fs2.netty.Socket +import fs2.netty.pipeline.socket.Socket import fs2.{INothing, Pipe, Stream} import io.netty.channel.ChannelPipeline import io.netty.handler.codec.http.websocketx.WebSocketFrame diff --git a/core/src/main/scala/fs2/netty/NettyPipeline.scala b/core/src/main/scala/fs2/netty/pipeline/NettyPipeline.scala similarity index 96% rename from core/src/main/scala/fs2/netty/NettyPipeline.scala rename to core/src/main/scala/fs2/netty/pipeline/NettyPipeline.scala index 036276e..ab2ad83 100644 --- a/core/src/main/scala/fs2/netty/NettyPipeline.scala +++ b/core/src/main/scala/fs2/netty/pipeline/NettyPipeline.scala @@ -14,12 +14,14 @@ * limitations under the License. */ -package fs2.netty +package fs2.netty.pipeline import cats.Eval import cats.effect.std.Dispatcher import cats.effect.{Async, Sync} import cats.syntax.all._ +import fs2.netty.NettyChannelInitializer +import fs2.netty.pipeline.socket.{Socket, SocketHandler} import io.netty.buffer.ByteBuf import io.netty.channel.{Channel, ChannelHandler, ChannelHandlerAdapter, ChannelInitializer} import io.netty.handler.flow.FlowControlHandler diff --git a/core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala b/core/src/main/scala/fs2/netty/pipeline/prebuilt/AlternativeBytePipeline.scala similarity index 93% rename from core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala rename to core/src/main/scala/fs2/netty/pipeline/prebuilt/AlternativeBytePipeline.scala index 363e38c..a127cbb 100644 --- a/core/src/main/scala/fs2/netty/pipeline/AlternativeBytePipeline.scala +++ b/core/src/main/scala/fs2/netty/pipeline/prebuilt/AlternativeBytePipeline.scala @@ -14,14 +14,15 @@ * limitations under the License. */ -package fs2 -package netty.pipeline +package fs2.netty.pipeline.prebuilt import cats.effect.std.Dispatcher import cats.effect.{Async, Sync} import cats.syntax.all._ -import fs2.netty.pipeline.AlternativeBytePipeline.ByteBufToByteChunkSocket -import fs2.netty.{NettyChannelInitializer, NettyPipeline, Socket} +import fs2.netty.pipeline.NettyPipeline +import fs2.netty.pipeline.prebuilt.AlternativeBytePipeline._ +import fs2.netty.NettyChannelInitializer +import fs2.netty.pipeline.socket.Socket import fs2.{Chunk, INothing, Pipe, Stream} import io.netty.buffer.{ByteBuf, ByteBufUtil, Unpooled} import io.netty.channel.{Channel, ChannelInitializer, ChannelPipeline} diff --git a/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala b/core/src/main/scala/fs2/netty/pipeline/prebuilt/BytePipeline.scala similarity index 92% rename from core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala rename to core/src/main/scala/fs2/netty/pipeline/prebuilt/BytePipeline.scala index 8b56a1d..c4ea3d3 100644 --- a/core/src/main/scala/fs2/netty/pipeline/BytePipeline.scala +++ b/core/src/main/scala/fs2/netty/pipeline/prebuilt/BytePipeline.scala @@ -14,15 +14,17 @@ * limitations under the License. */ -package fs2 -package netty.pipeline +package fs2.netty.pipeline.prebuilt import cats.Eval import cats.effect.std.Dispatcher import cats.effect.{Async, Sync} import cats.syntax.all._ -import fs2.netty.pipeline.BytePipeline._ -import fs2.netty.{NettyChannelInitializer, NettyPipeline, Socket} +import fs2.netty.pipeline.NettyPipeline +import fs2.netty.pipeline.prebuilt.BytePipeline._ +import fs2.netty.NettyChannelInitializer +import fs2.netty.pipeline.socket.Socket +import fs2.{Chunk, INothing, Pipe, Stream} import io.netty.buffer.{ByteBuf, Unpooled} import io.netty.channel.{Channel, ChannelInitializer, ChannelPipeline} import io.netty.handler.codec.bytes.ByteArrayDecoder diff --git a/core/src/main/scala/fs2/netty/NoopChannel.scala b/core/src/main/scala/fs2/netty/pipeline/socket/NoopChannel.scala similarity index 99% rename from core/src/main/scala/fs2/netty/NoopChannel.scala rename to core/src/main/scala/fs2/netty/pipeline/socket/NoopChannel.scala index 3828fb9..ae2ba7e 100644 --- a/core/src/main/scala/fs2/netty/NoopChannel.scala +++ b/core/src/main/scala/fs2/netty/pipeline/socket/NoopChannel.scala @@ -14,7 +14,7 @@ * limitations under the License. */ -package fs2.netty +package fs2.netty.pipeline.socket import io.netty.buffer.ByteBufAllocator import io.netty.channel._ diff --git a/core/src/main/scala/fs2/netty/Socket.scala b/core/src/main/scala/fs2/netty/pipeline/socket/Socket.scala similarity index 99% rename from core/src/main/scala/fs2/netty/Socket.scala rename to core/src/main/scala/fs2/netty/pipeline/socket/Socket.scala index d61bcec..caee684 100644 --- a/core/src/main/scala/fs2/netty/Socket.scala +++ b/core/src/main/scala/fs2/netty/pipeline/socket/Socket.scala @@ -15,7 +15,7 @@ */ package fs2 -package netty +package netty.pipeline.socket import cats.arrow.Profunctor import cats.syntax.all._ diff --git a/core/src/main/scala/fs2/netty/SocketHandler.scala b/core/src/main/scala/fs2/netty/pipeline/socket/SocketHandler.scala similarity index 97% rename from core/src/main/scala/fs2/netty/SocketHandler.scala rename to core/src/main/scala/fs2/netty/pipeline/socket/SocketHandler.scala index 8463fcd..9f805cf 100644 --- a/core/src/main/scala/fs2/netty/SocketHandler.scala +++ b/core/src/main/scala/fs2/netty/pipeline/socket/SocketHandler.scala @@ -15,18 +15,19 @@ */ package fs2 -package netty +package netty.pipeline.socket -import cats.effect.std.{Dispatcher, Queue} import cats.effect._ +import cats.effect.std.{Dispatcher, Queue} import cats.syntax.all._ import cats.{Applicative, Functor} +import fs2.netty.fromNettyFuture import io.netty.buffer.ByteBuf import io.netty.channel._ import io.netty.handler.flow.FlowControlHandler import io.netty.util.ReferenceCountUtil -private final class SocketHandler[F[_]: Async: Concurrent, O, I]( +final class SocketHandler[F[_]: Async: Concurrent, O, I] private( disp: Dispatcher[F], private var channel: Channel, readsQueue: Queue[F, Option[Either[Throwable, I]]], @@ -155,7 +156,7 @@ private final class SocketHandler[F[_]: Async: Concurrent, O, I]( // where needed. This way we can keep a thread-unsafe mutable queue. disp.unsafeRunAndForget(eventsQueue.offer(evt)) - override def mutatePipeline[O2,I2: Socket.Decoder]( + override def mutatePipeline[O2, I2: Socket.Decoder]( mutator: ChannelPipeline => F[Unit] ): F[Socket[F, O2, I2]] = for { @@ -194,7 +195,7 @@ private final class SocketHandler[F[_]: Async: Concurrent, O, I]( // not to self: if we want to schedule an action to be done when channel is closed, can also do `ctx.channel.closeFuture.addListener` } -private object SocketHandler { +object SocketHandler { def apply[F[_]: Async: Concurrent, O, I: Socket.Decoder]( disp: Dispatcher[F], diff --git a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala b/core/src/test/scala/fs2/netty/pipeline/NettyPipelineSpec.scala similarity index 98% rename from core/src/test/scala/fs2/netty/NettyPipelineSpec.scala rename to core/src/test/scala/fs2/netty/pipeline/NettyPipelineSpec.scala index 0d0cff7..5658df5 100644 --- a/core/src/test/scala/fs2/netty/NettyPipelineSpec.scala +++ b/core/src/test/scala/fs2/netty/pipeline/NettyPipelineSpec.scala @@ -1,15 +1,16 @@ -package fs2 -package netty +package fs2.netty.pipeline import cats.Eval import cats.effect.std.Dispatcher import cats.effect.testing.specs2.CatsResource import cats.effect.{IO, Resource} import cats.syntax.all._ -import fs2.netty.NettyPipelineSpec.{SharableStatefulByteBufToReadCountChannelHandler, SharableStatefulStringToReadCountChannelHandler, StatefulMessageToReadCountChannelHandler} +import fs2.Stream import fs2.netty.embedded.Fs2NettyEmbeddedChannel import fs2.netty.embedded.Fs2NettyEmbeddedChannel.CommonEncoders._ import fs2.netty.embedded.Fs2NettyEmbeddedChannel.Encoder +import fs2.netty.pipeline.NettyPipelineSpec._ +import fs2.netty.pipeline.socket.Socket import io.netty.buffer.{ByteBuf, Unpooled} import io.netty.channel.ChannelHandler.Sharable import io.netty.channel.socket.ChannelInputShutdownReadComplete @@ -245,9 +246,7 @@ class NettyPipelineSpec _ <- socket.isDetached.map(_ should beFalse) // When performing a no-op socket pipeline mutation - newSocket <- socket.mutatePipeline[ByteBuf, ByteBuf](_ => - IO.unit - ) + newSocket <- socket.mutatePipeline[ByteBuf, ByteBuf](_ => IO.unit) // Then new socket should be able to receive and write ByteBuf's encoder = implicitly[Encoder[Byte]] diff --git a/core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala b/core/src/test/scala/fs2/netty/pipeline/prebuilt/BytePipelineSpec.scala similarity index 98% rename from core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala rename to core/src/test/scala/fs2/netty/pipeline/prebuilt/BytePipelineSpec.scala index 14dbfb8..970a29f 100644 --- a/core/src/test/scala/fs2/netty/pipeline/BytePipelineSpec.scala +++ b/core/src/test/scala/fs2/netty/pipeline/prebuilt/BytePipelineSpec.scala @@ -1,4 +1,4 @@ -package fs2.netty.pipeline +package fs2.netty.pipeline.prebuilt import cats.effect.std.Dispatcher import cats.effect.testing.specs2.CatsResource From d25eb8aa19d92b2e468f76e232637d4e3cef3532 Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Fri, 19 Mar 2021 09:20:26 -0400 Subject: [PATCH 20/23] WIP: Start using NettyPipeline --- core/src/main/scala/fs2/netty/Network.scala | 250 +++++++++--------- .../fs2/netty/incudator/http/HttpServer.scala | 52 ++-- .../netty/pipeline/socket/SocketHandler.scala | 7 +- .../test/scala/fs2/netty/NetworkSpec.scala | 27 +- 4 files changed, 178 insertions(+), 158 deletions(-) diff --git a/core/src/main/scala/fs2/netty/Network.scala b/core/src/main/scala/fs2/netty/Network.scala index 570c3a8..8d7c635 100644 --- a/core/src/main/scala/fs2/netty/Network.scala +++ b/core/src/main/scala/fs2/netty/Network.scala @@ -17,15 +17,15 @@ package fs2 package netty -import cats.data.NonEmptyList import cats.effect.std.{Dispatcher, Queue} import cats.effect.{Async, Concurrent, Resource, Sync} import cats.syntax.all._ import com.comcast.ip4s.{Host, IpAddress, Port, SocketAddress} -import fs2.netty.pipeline.socket.{Socket, SocketHandler} +import fs2.netty.pipeline.NettyPipeline +import fs2.netty.pipeline.socket.Socket import io.netty.bootstrap.{Bootstrap, ServerBootstrap} -import io.netty.channel.socket.SocketChannel -import io.netty.channel.{Channel, ChannelHandler, ChannelInitializer, EventLoopGroup, ServerChannel, ChannelOption => JChannelOption} +import io.netty.buffer.ByteBuf +import io.netty.channel.{Channel, EventLoopGroup, ServerChannel, ChannelOption => JChannelOption} import java.net.InetSocketAddress import java.util.concurrent.ThreadFactory @@ -41,40 +41,57 @@ final class Network[F[_]: Async] private ( def client( addr: SocketAddress[Host], - options: List[ChannelOption] = Nil - ): Resource[F, Socket[F, Byte, Byte]] = - Dispatcher[F] flatMap { disp => - Resource suspend { - Concurrent[F].deferred[Socket[F, Byte, Byte]] flatMap { d => - addr.host.resolve[F] flatMap { resolved => - Sync[F] delay { - val bootstrap = new Bootstrap - bootstrap - .group(child) - .channel(clientChannelClazz) - .option( - JChannelOption.AUTO_READ.asInstanceOf[JChannelOption[Any]], - false - ) // backpressure - .handler(initializer(disp)(d.complete(_).void)) - - options.foreach(opt => bootstrap.option(opt.key, opt.value)) - - val connectChannel = Sync[F] defer { - val cf = - bootstrap.connect(resolved.toInetAddress, addr.port.value) - fromNettyFuture[F](cf.pure[F]).as(cf.channel()) - } - - Resource - .make(connectChannel <* d.get)(ch => - fromNettyFuture(Sync[F].delay(ch.close())).void - ) - .evalMap(_ => d.get) - } - } + options: List[ChannelOption] + ): Resource[F, Socket[F, ByteBuf, ByteBuf]] = + for { + disp <- Dispatcher[F] + pipeline <- Resource.eval(NettyPipeline(disp)) + c <- client(addr, pipeline, options) + } yield c + + def client[O, I]( + addr: SocketAddress[Host], + pipelineInitializer: NettyChannelInitializer[F, O, I], + options: List[ChannelOption] + ): Resource[F, Socket[F, O, I]] = + Resource.suspend { + for { + futureSocket <- Concurrent[F].deferred[Socket[F, O, I]] + + initializer <- pipelineInitializer.toSocketChannelInitializer( + futureSocket.complete(_).void + ) + + resolvedHost <- addr.host.resolve[F] + + bootstrap <- Sync[F].delay { + val bootstrap = new Bootstrap + bootstrap + .group(child) + .channel(clientChannelClazz) + .option( + JChannelOption.AUTO_READ.asInstanceOf[JChannelOption[Any]], + false + ) // backpressure TODO: backpressure creating the connection or is this reads? + .handler(initializer) + + options.foreach(opt => bootstrap.option(opt.key, opt.value)) + bootstrap } - } + + // TODO: Log properly as info, debug, or trace. Or send as an event to another stream. Maybe the whole network could have an event stream. + _ <- Sync[F].delay(println(bootstrap.config())) + + connectChannel = Sync[F] defer { + val cf = + bootstrap.connect(resolvedHost.toInetAddress, addr.port.value) + fromNettyFuture[F](cf.pure[F]).as(cf.channel()) + } + } yield Resource + .make(connectChannel <* futureSocket.get)(ch => + fromNettyFuture(Sync[F].delay(ch.close())).void + ) + .evalMap(_ => futureSocket.get) } //TODO: Add back default args for opts, removed to fix compilation error for overloaded method @@ -82,132 +99,101 @@ final class Network[F[_]: Async] private ( host: Option[Host], port: Port, options: List[ChannelOption] - ): Stream[F, Socket[F, Byte, Byte]] = + ): Stream[F, Socket[F, ByteBuf, ByteBuf]] = Stream.resource(serverResource(host, Some(port), options)).flatMap(_._2) // TODO: maybe here it's nicer to have the I first then O?, or will that be confusing if Socket has reversed order? def server[O, I: Socket.Decoder]( host: Option[Host], port: Port, - handlers: NonEmptyList[ChannelHandler], + pipelineInitializer: NettyChannelInitializer[F, O, I], options: List[ChannelOption] ): Stream[F, Socket[F, O, I]] = Stream - .resource(serverResource[O, I](host, Some(port), handlers, options)) + .resource( + serverResource[O, I](host, Some(port), pipelineInitializer, options) + ) .flatMap(_._2) def serverResource( host: Option[Host], port: Option[Port], options: List[ChannelOption] - ): Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, Byte, Byte]])] = - serverResource(host, port, handlers = Nil, options) + ): Resource[ + F, + (SocketAddress[IpAddress], Stream[F, Socket[F, ByteBuf, ByteBuf]]) + ] = + for { + dispatcher <- Dispatcher[F] + pipeline <- Resource.eval(NettyPipeline[F](dispatcher)) + sr <- serverResource(host, port, pipeline, options) + } yield sr def serverResource[O, I: Socket.Decoder]( host: Option[Host], port: Option[Port], - handlers: NonEmptyList[ChannelHandler], + pipelineInitializer: NettyChannelInitializer[F, O, I], options: List[ChannelOption] ): Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, O, I]])] = - serverResource(host, port, handlers.toList, options) + Resource suspend { + for { + clientConnections <- Queue.unbounded[F, Socket[F, O, I]] - private def serverResource[O, I: Socket.Decoder]( - host: Option[Host], - port: Option[Port], - handlers: List[ChannelHandler], - options: List[ChannelOption] - ): Resource[F, (SocketAddress[IpAddress], Stream[F, Socket[F, O, I]])] = - for { - dispatcher <- Dispatcher[F] + resolvedHost <- host.traverse(_.resolve[F]) - res <- Resource suspend { - for { - clientConnections <- Queue.unbounded[F, Socket[F, O, I]] - - resolvedHost <- host.traverse(_.resolve[F]) - - bootstrap <- Sync[F] delay { - val bootstrap = new ServerBootstrap - bootstrap - .group(parent, child) - .option( - JChannelOption.AUTO_READ.asInstanceOf[JChannelOption[Any]], - false - ) // backpressure for accepting connections, not reads on any individual connection - //.childOption() TODO: Any useful ones? - .channel(serverChannelClazz) - .childHandler(new ChannelInitializer[SocketChannel] { - override def initChannel(ch: SocketChannel): Unit = { - val p = ch.pipeline() - ch.config().setAutoRead(false) - - handlers.foldLeft(p)((pipeline, handler) => - pipeline.addLast(handler) - ) - // TODO: read up on CE3 Dispatcher, how is it different than Context Switch? Is this taking place async? - dispatcher.unsafeRunAndForget { - SocketHandler[F, O, I](dispatcher, ch) - .flatTap(h => Sync[F].delay(p.addLast(h))) - .flatMap(clientConnections.offer) - } - } - }) - - options.foreach(opt => bootstrap.option(opt.key, opt.value)) - bootstrap - } + socketInitializer <- pipelineInitializer.toSocketChannelInitializer( + clientConnections.offer + ) - // TODO: Log properly as info, debug, or trace - _ <- Sync[F].delay(println(bootstrap.config())) + bootstrap <- Sync[F] delay { + val bootstrap = new ServerBootstrap + bootstrap + .group(parent, child) + .option( + JChannelOption.AUTO_READ.asInstanceOf[JChannelOption[Any]], + false + ) // backpressure for accepting connections, not reads on any individual connection + //.childOption() TODO: Any useful ones? + .channel(serverChannelClazz) + .childHandler(socketInitializer) + + options.foreach(opt => bootstrap.option(opt.key, opt.value)) + bootstrap + } - // TODO: is the right name? Bind uses the parent ELG that calla TCP accept which yields a connection to child ELG? - tcpAcceptChannel = Sync[F] defer { - val cf = bootstrap.bind( - resolvedHost.map(_.toInetAddress).orNull, - port.map(_.value).getOrElse(0) - ) - fromNettyFuture[F](cf.pure[F]).as(cf.channel()) - } - } yield Resource - .make(tcpAcceptChannel) { ch => - fromNettyFuture[F](Sync[F].delay(ch.close())).void - } - .evalMap { ch => - Sync[F] - .delay( - SocketAddress.fromInetSocketAddress( - ch.localAddress().asInstanceOf[InetSocketAddress] - ) + // TODO: Log properly as info, debug, or trace. Also can print localAddress + _ <- Sync[F].delay(println(bootstrap.config())) + + // TODO: is the right name? Bind uses the parent ELG that calla TCP accept which yields a connection to child ELG? + tcpAcceptChannel = Sync[F] defer { + val cf = bootstrap.bind( + resolvedHost.map(_.toInetAddress).orNull, + port.map(_.value).getOrElse(0) + ) + fromNettyFuture[F](cf.pure[F]).as(cf.channel()) + } + } yield Resource + .make(tcpAcceptChannel) { ch => + fromNettyFuture[F](Sync[F].delay(ch.close())).void + } + .evalMap { ch => + Sync[F] + .delay( + SocketAddress.fromInetSocketAddress( + ch.localAddress().asInstanceOf[InetSocketAddress] ) - .tupleRight( - Stream.repeatEval( - Sync[F].delay(ch.read()) *> clientConnections.take - ) + ) + .tupleRight( + Stream.repeatEval( + Sync[F].delay(ch.read()) *> clientConnections.take ) - } - } - } yield res + ) + } + } implicit val decoder: Socket.Decoder[Byte] = new Socket.Decoder[Byte] { override def decode(x: AnyRef): Either[String, Byte] = ??? } - - private[this] def initializer(disp: Dispatcher[F])( - result: Socket[F, Byte, Byte] => F[Unit] - ): ChannelInitializer[SocketChannel] = - new ChannelInitializer[SocketChannel] { - - def initChannel(ch: SocketChannel) = { - val p = ch.pipeline() - ch.config().setAutoRead(false) - - disp unsafeRunAndForget { - SocketHandler[F, Byte, Byte](disp, ch) flatMap { s => - Sync[F].delay(p.addLast(s)) *> result(s) - } - } - } - } } object Network { diff --git a/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala b/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala index 89fe362..31886ee 100644 --- a/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala +++ b/core/src/main/scala/fs2/netty/incudator/http/HttpServer.scala @@ -16,11 +16,13 @@ package fs2.netty.incudator.http -import cats.data.NonEmptyList +import cats.Eval +import cats.effect.std.Dispatcher import cats.effect.{Async, Resource} import cats.syntax.all._ import fs2.Stream import fs2.netty.Network +import fs2.netty.pipeline.NettyPipeline import fs2.netty.pipeline.socket.Socket import io.netty.handler.codec.http._ import io.netty.handler.timeout.ReadTimeoutHandler @@ -43,26 +45,42 @@ object HttpServer { for { network <- Network[F] - rawHttpClientConnection <- network - .serverResource[FullHttpResponse, FullHttpRequest]( - host = None, - port = None, - handlers = NonEmptyList.of( - new HttpServerCodec( - httpConfigs.parsing.maxInitialLineLength, - httpConfigs.parsing.maxHeaderSize, - httpConfigs.parsing.maxChunkSize + dispatcher <- Dispatcher[F] + + pipeline <- Resource.eval( + NettyPipeline[F, FullHttpResponse, FullHttpRequest]( + dispatcher, + List( + Eval.always( + new HttpServerCodec( + httpConfigs.parsing.maxInitialLineLength, + httpConfigs.parsing.maxHeaderSize, + httpConfigs.parsing.maxChunkSize + ) ), - new HttpServerKeepAliveHandler, - new HttpObjectAggregator( - httpConfigs.parsing.maxHttpContentLength + Eval.always(new HttpServerKeepAliveHandler), + Eval.always( + new HttpObjectAggregator( + httpConfigs.parsing.maxHttpContentLength + ) ), - new ReadTimeoutHandler( // TODO: this also closes channel when exception is fired, should HttpClientConnection just handle that Idle Events? - httpConfigs.requestTimeoutPeriod.length, - httpConfigs.requestTimeoutPeriod.unit + // TODO: this also closes channel when exception is fired, should HttpClientConnection just handle that Idle Events? + Eval.always( + new ReadTimeoutHandler( + httpConfigs.requestTimeoutPeriod.length, + httpConfigs.requestTimeoutPeriod.unit + ) ) // new HttpPipeliningBlockerHandler - ), + ) + ) + ) + + rawHttpClientConnection <- network + .serverResource( + host = None, + port = None, + pipeline, options = Nil ) .map(_._2) diff --git a/core/src/main/scala/fs2/netty/pipeline/socket/SocketHandler.scala b/core/src/main/scala/fs2/netty/pipeline/socket/SocketHandler.scala index 9f805cf..e03afb2 100644 --- a/core/src/main/scala/fs2/netty/pipeline/socket/SocketHandler.scala +++ b/core/src/main/scala/fs2/netty/pipeline/socket/SocketHandler.scala @@ -27,7 +27,7 @@ import io.netty.channel._ import io.netty.handler.flow.FlowControlHandler import io.netty.util.ReferenceCountUtil -final class SocketHandler[F[_]: Async: Concurrent, O, I] private( +final class SocketHandler[F[_]: Async: Concurrent, O, I] private ( disp: Dispatcher[F], private var channel: Channel, readsQueue: Queue[F, Option[Either[Throwable, I]]], @@ -87,7 +87,7 @@ final class SocketHandler[F[_]: Async: Concurrent, O, I] private( override def write(output: O): F[Unit] = fromNettyFuture[F]( - /* Sync[F].delay(println(debug(output))) *> */ Sync[F].delay( + /*Sync[F].delay(println(s"Write ${debug(output)}")) *>*/ Sync[F].delay( channel.writeAndFlush(output) ) ).void @@ -120,6 +120,7 @@ final class SocketHandler[F[_]: Async: Concurrent, O, I] private( case Right(i) => // TODO: what's the perf impact of unsafeRunSync-only vs. unsafeRunAndForget-&-FlowControlHandler? +// println(s"READ ${debug(msg)}") disp.unsafeRunAndForget(readsQueue.offer(i.asRight[Exception].some)) } @@ -132,7 +133,7 @@ final class SocketHandler[F[_]: Async: Concurrent, O, I] private( new String(arr) case _ => - "" + "blah" } override def exceptionCaught(ctx: ChannelHandlerContext, t: Throwable) = diff --git a/core/src/test/scala/fs2/netty/NetworkSpec.scala b/core/src/test/scala/fs2/netty/NetworkSpec.scala index 26ba7c5..fa1b029 100644 --- a/core/src/test/scala/fs2/netty/NetworkSpec.scala +++ b/core/src/test/scala/fs2/netty/NetworkSpec.scala @@ -19,7 +19,6 @@ package netty import cats.effect.IO import cats.effect.testing.specs2.CatsResource - import org.specs2.mutable.SpecificationLike class NetworkSpec extends CatsResource[IO, Network[IO]] with SpecificationLike { @@ -31,8 +30,21 @@ class NetworkSpec extends CatsResource[IO, Network[IO]] with SpecificationLike { Network[IO].use_.as(ok) } + // TODO: this is a tricky test to pass. It passes the same ByteBuf from client to server. This is problematic b/c + // SocketHandler releases after reads, so server releases then client. But Netty throws exception b/c it's + // already been released. Another issue is comparing ByteBuf results, the equal method fails on ByteBuf's with + // refcount == 0 as there's no data to read (Netty throws exception). + // The expectation for typical server/client IO, is that Netty releases the ByteBuf's after writing. This test also + // passed before, so perhaps a new bug. Old client would have created a BteBuf from bytes, old server SocketHandler + // would have copied out the bytes, released ByteBuf, then when writing, it would create a new ByteBuf which client + // would read/release in it's SocketHandler. So there should have been 2 ByteBuf's...need to validate that. + // Also what to do when there's just one?! Is this just something in tests or localhost? Maybe SocketHandler + // shouldn't release??? // "support a simple echo use-case" in withResource { net => -// val data = List[Byte](1, 2, 3, 4, 5, 6, 7) +//// val data = List[Byte](1, 2, 3, 4, 5, 6, 7).map(byte => { +// val data = List[String]("G").map(str => { +// Unpooled.wrappedBuffer(str.getBytes()) +// }) // // val rsrc = net.serverResource(None, None, Nil) flatMap { // case (isa, incoming) => @@ -43,17 +55,20 @@ class NetworkSpec extends CatsResource[IO, Network[IO]] with SpecificationLike { // for { // _ <- handler.compile.drain.background // -// results <- net.client(isa) flatMap { socket => -// Stream.emits(data) +// results <- net.client(isa, options = Nil) flatMap { socket => +// Stream +// .emits(data) // .through(socket.writes) // .merge(socket.reads) // .take(data.length.toLong) -// .compile.resource.toList +// .compile +// .resource +// .toList // } // } yield results // } // -// rsrc.use(IO.pure(_)) flatMap { results => +// rsrc.use(IO.pure) flatMap { results => // IO { // results mustEqual data // } From b7e8f1047563262d3300b769ff350d77deb68ded Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Sat, 1 May 2021 22:33:18 -0400 Subject: [PATCH 21/23] WIP: Pass echo test --- .../test/scala/fs2/netty/NetworkSpec.scala | 89 ++++++++++--------- 1 file changed, 45 insertions(+), 44 deletions(-) diff --git a/core/src/test/scala/fs2/netty/NetworkSpec.scala b/core/src/test/scala/fs2/netty/NetworkSpec.scala index fa1b029..f963abc 100644 --- a/core/src/test/scala/fs2/netty/NetworkSpec.scala +++ b/core/src/test/scala/fs2/netty/NetworkSpec.scala @@ -19,8 +19,11 @@ package netty import cats.effect.IO import cats.effect.testing.specs2.CatsResource +import io.netty.buffer.Unpooled import org.specs2.mutable.SpecificationLike +import java.nio.charset.Charset + class NetworkSpec extends CatsResource[IO, Network[IO]] with SpecificationLike { val resource = Network[IO] @@ -30,49 +33,47 @@ class NetworkSpec extends CatsResource[IO, Network[IO]] with SpecificationLike { Network[IO].use_.as(ok) } - // TODO: this is a tricky test to pass. It passes the same ByteBuf from client to server. This is problematic b/c - // SocketHandler releases after reads, so server releases then client. But Netty throws exception b/c it's - // already been released. Another issue is comparing ByteBuf results, the equal method fails on ByteBuf's with - // refcount == 0 as there's no data to read (Netty throws exception). - // The expectation for typical server/client IO, is that Netty releases the ByteBuf's after writing. This test also - // passed before, so perhaps a new bug. Old client would have created a BteBuf from bytes, old server SocketHandler - // would have copied out the bytes, released ByteBuf, then when writing, it would create a new ByteBuf which client - // would read/release in it's SocketHandler. So there should have been 2 ByteBuf's...need to validate that. - // Also what to do when there's just one?! Is this just something in tests or localhost? Maybe SocketHandler - // shouldn't release??? -// "support a simple echo use-case" in withResource { net => -//// val data = List[Byte](1, 2, 3, 4, 5, 6, 7).map(byte => { -// val data = List[String]("G").map(str => { -// Unpooled.wrappedBuffer(str.getBytes()) -// }) -// -// val rsrc = net.serverResource(None, None, Nil) flatMap { -// case (isa, incoming) => -// val handler = incoming flatMap { socket => -// socket.reads.through(socket.writes) -// } -// -// for { -// _ <- handler.compile.drain.background -// -// results <- net.client(isa, options = Nil) flatMap { socket => -// Stream -// .emits(data) -// .through(socket.writes) -// .merge(socket.reads) -// .take(data.length.toLong) -// .compile -// .resource -// .toList -// } -// } yield results -// } -// -// rsrc.use(IO.pure) flatMap { results => -// IO { -// results mustEqual data -// } -// } -// } + "support a simple echo use-case" in withResource { net => + val data = (1 to 2) // TODO: this breaks with 3; client writes the last element, but server never reads. + .map(_.toString) + .toList + .map(str => { + (Unpooled.wrappedBuffer(str.getBytes()), str) + }) + + val rsrc = net.serverResource(None, None, Nil) flatMap { + case (isa, incoming) => + val handler = incoming flatMap { socket => + socket.reads + // Without retain, the client seemingly gets the exact same ByteBuf that server processes. This results + // in exceptions as both client and server release ByteBuf. The root cause is unclear. + .evalTap(bb => IO(bb.retain())) + .through(socket.writes) + } + + for { + _ <- handler.compile.drain.background + + results <- net.client(isa, options = Nil) flatMap { cSocket => + Stream + .emits(data) + .map(_._1) + .through(cSocket.writes) + .merge(cSocket.reads) + .take(data.length.toLong) + .evalMap(bb => IO(bb.toString(Charset.defaultCharset()))) + .compile + .resource + .toList + } + } yield results + } + + rsrc.use(IO.pure) flatMap { results => + IO { + results mustEqual data.map(_._2) + } + } + } } } From a3e408fa41c6ab372bab3b5c1d900f4472074b87 Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Sat, 6 Nov 2021 22:41:00 -0400 Subject: [PATCH 22/23] Fix test --- .../test/scala/fs2/netty/NetworkSpec.scala | 54 ++++++++++++------- 1 file changed, 36 insertions(+), 18 deletions(-) diff --git a/core/src/test/scala/fs2/netty/NetworkSpec.scala b/core/src/test/scala/fs2/netty/NetworkSpec.scala index f963abc..6079a3a 100644 --- a/core/src/test/scala/fs2/netty/NetworkSpec.scala +++ b/core/src/test/scala/fs2/netty/NetworkSpec.scala @@ -17,16 +17,17 @@ package fs2 package netty -import cats.effect.IO import cats.effect.testing.specs2.CatsResource -import io.netty.buffer.Unpooled +import cats.effect.{IO, Resource} +import io.netty.buffer.{ByteBuf, Unpooled} import org.specs2.mutable.SpecificationLike import java.nio.charset.Charset +import scala.collection.mutable.ListBuffer class NetworkSpec extends CatsResource[IO, Network[IO]] with SpecificationLike { - val resource = Network[IO] + override val resource: Resource[IO, Network[IO]] = Network[IO] "network tcp sockets" should { "create a network instance" in { @@ -34,19 +35,12 @@ class NetworkSpec extends CatsResource[IO, Network[IO]] with SpecificationLike { } "support a simple echo use-case" in withResource { net => - val data = (1 to 2) // TODO: this breaks with 3; client writes the last element, but server never reads. - .map(_.toString) - .toList - .map(str => { - (Unpooled.wrappedBuffer(str.getBytes()), str) - }) + val msg = "Echo me" val rsrc = net.serverResource(None, None, Nil) flatMap { - case (isa, incoming) => + case (ip, incoming) => val handler = incoming flatMap { socket => socket.reads - // Without retain, the client seemingly gets the exact same ByteBuf that server processes. This results - // in exceptions as both client and server release ByteBuf. The root cause is unclear. .evalTap(bb => IO(bb.retain())) .through(socket.writes) } @@ -54,26 +48,50 @@ class NetworkSpec extends CatsResource[IO, Network[IO]] with SpecificationLike { for { _ <- handler.compile.drain.background - results <- net.client(isa, options = Nil) flatMap { cSocket => + results <- net.client(ip, options = Nil) flatMap { cSocket => Stream - .emits(data) - .map(_._1) + // Send individual bytes as the simplest use case + .emits(msg.getBytes) + .evalMap(byteToByteBuf) .through(cSocket.writes) .merge(cSocket.reads) - .take(data.length.toLong) - .evalMap(bb => IO(bb.toString(Charset.defaultCharset()))) + .flatMap(byteBufToStream) + .take(msg.length.toLong) + .map(byteToString) .compile .resource .toList + .map(_.mkString) } } yield results } rsrc.use(IO.pure) flatMap { results => IO { - results mustEqual data.map(_._2) + results mustEqual msg } } } } + + private def byteToByteBuf(byte: Byte): IO[ByteBuf] = IO { + val arr = new Array[Byte](1) + arr(0) = byte + Unpooled.wrappedBuffer(new String(arr).getBytes()) + } + + private def byteBufToStream(bb: ByteBuf): Stream[IO, Byte] = { + val buffer = new ListBuffer[Byte] + bb.forEachByte((value: Byte) => { + val _ = buffer.addOne(value) + true + }) + Stream.fromIterator[IO].apply[Byte](buffer.iterator, 1) + } + + private def byteToString(b: Byte): String = { + val arr = new Array[Byte](1) + arr(0) = b + new String(arr, Charset.defaultCharset()) + } } From 7ba73c2031d262ff606eed77942a82d96195f995 Mon Sep 17 00:00:00 2001 From: Michael Mienko Date: Thu, 11 Nov 2021 21:50:00 -0500 Subject: [PATCH 23/23] Clean up tests around basic pipeline --- .../EmbeddedChannelWithAutoRead.scala | 14 +- .../embedded/Fs2NettyEmbeddedChannel.scala | 17 + .../netty/pipeline/NettyPipelineSpec.scala | 315 ++++++++++++------ 3 files changed, 234 insertions(+), 112 deletions(-) diff --git a/core/src/main/scala/fs2/netty/embedded/EmbeddedChannelWithAutoRead.scala b/core/src/main/scala/fs2/netty/embedded/EmbeddedChannelWithAutoRead.scala index 07121c1..fa67f68 100644 --- a/core/src/main/scala/fs2/netty/embedded/EmbeddedChannelWithAutoRead.scala +++ b/core/src/main/scala/fs2/netty/embedded/EmbeddedChannelWithAutoRead.scala @@ -34,6 +34,8 @@ class EmbeddedChannelWithAutoRead extends EmbeddedChannel { private lazy val tempInboundMessages = new util.ArrayDeque[util.AbstractMap.SimpleEntry[Any, ChannelPromise]]() + def areInboundMessagesBuffered: Boolean = !tempInboundMessages.isEmpty + def writeInboundFixed(msgs: Any*): Boolean = { ensureOpen() if (msgs.isEmpty) @@ -137,16 +139,4 @@ class EmbeddedChannelWithAutoRead extends EmbeddedChannel { } } - - private def debug(x: Any) = x match { - case bb: ByteBuf => - val b = bb.readByte() - bb.resetReaderIndex() - val arr = Array[Byte](1) - arr(0) = b - new String(arr) - - case _ => - "" - } } diff --git a/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala b/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala index 8a9e3f4..314e25f 100644 --- a/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala +++ b/core/src/main/scala/fs2/netty/embedded/Fs2NettyEmbeddedChannel.scala @@ -25,6 +25,9 @@ import fs2.netty.pipeline.socket.Socket import io.netty.buffer.{ByteBuf, Unpooled} import io.netty.channel.embedded.EmbeddedChannel +import java.util +import java.util.Queue + /** * Better, safer, and clearer api for testing channels * For use in tests only. @@ -67,6 +70,18 @@ final case class Fs2NettyEmbeddedChannel[F[_]] private ( def flushInbound(): F[Unit] = F.delay(underlying.flushInbound()).void + def flushOutbound(): F[Unit] = F.delay(underlying.flushOutbound()).void + + def inboundMessages: F[util.Queue[AnyRef]] = + F.delay(underlying.inboundMessages()) + + def outboundMessages: F[util.Queue[AnyRef]] = + F.delay(underlying.outboundMessages()) + + def runScheduledPendingTasks: F[Long] = F.delay { + underlying.runScheduledPendingTasks() + } + def isOpen: F[Boolean] = F.pure(underlying.isOpen) def isClosed: F[Boolean] = F.pure(!underlying.isOpen) @@ -76,6 +91,8 @@ final case class Fs2NettyEmbeddedChannel[F[_]] private ( object Fs2NettyEmbeddedChannel { + val NoTasksToRun: Long = -1L + def apply[F[_], O, I]( initializer: NettyChannelInitializer[F, O, I] )(implicit F: Async[F]): F[(Fs2NettyEmbeddedChannel[F], Socket[F, O, I])] = diff --git a/core/src/test/scala/fs2/netty/pipeline/NettyPipelineSpec.scala b/core/src/test/scala/fs2/netty/pipeline/NettyPipelineSpec.scala index 5658df5..38267e7 100644 --- a/core/src/test/scala/fs2/netty/pipeline/NettyPipelineSpec.scala +++ b/core/src/test/scala/fs2/netty/pipeline/NettyPipelineSpec.scala @@ -1,7 +1,7 @@ package fs2.netty.pipeline import cats.Eval -import cats.effect.std.Dispatcher +import cats.effect.std.{Dispatcher, Queue} import cats.effect.testing.specs2.CatsResource import cats.effect.{IO, Resource} import cats.syntax.all._ @@ -21,157 +21,230 @@ import io.netty.handler.codec.string.StringDecoder import io.netty.util.ReferenceCountUtil import org.specs2.mutable.SpecificationLike +import java.nio.channels.ClosedChannelException import java.util +import scala.collection.mutable.ListBuffer import scala.concurrent.duration._ class NettyPipelineSpec extends CatsResource[IO, Dispatcher[IO]] with SpecificationLike { - // TODO: where does 10s timeout come from? override val resource: Resource[IO, Dispatcher[IO]] = Dispatcher[IO] - "default pipeline, i.e. no extra Channel handlers" should { - "zero reads in Netty corresponds to an empty fs2-netty ByteBuf reads stream" in withResource { - dispatcher => + "default pipeline, i.e. no extra Channel handlers and reads and writes are on ByteBuf's" should { + "no activity on Netty channel should correspond to no activity on socket and vice-versa" in withResource { + implicit dispatcher => for { - pipeline <- NettyPipeline[IO](dispatcher) - socket <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf]( - pipeline - ).map(_._2) + // Given a socket and embedded channel from the default Netty Pipeline + x <- NettyEmbeddedChannelWithByteBufPipeline + (channel, socket) = x + + // Then configs should be setup for backpressure + _ <- IO(channel.underlying.config().isAutoRead should beFalse) + _ <- IO(channel.underlying.config().isAutoClose should beTrue) + _ <- IO( + channel.underlying + .config() + .getWriteBufferLowWaterMark shouldEqual 32 * 1024 + ) + _ <- IO( + channel.underlying + .config() + .getWriteBufferHighWaterMark shouldEqual 64 * 1024 + ) + // When flushing inbound events, i.e. calling read complete, on an empty channel + _ <- channel.flushInbound() + + // Then there are no reads on socket reads stream + // TODO: what's the canonical way to check for empty stream? reads <- socket.reads - .interruptAfter(1.second) + .interruptAfter(Duration.Zero) .compile - .toList // TODO: what's the proper way to check for empty stream? - } yield reads should beEmpty - } + .toList + _ <- IO(reads should beEmpty) - "zero events in Netty pipeline corresponds to an empty fs2-netty events stream" in withResource { - dispatcher => - for { - pipeline <- NettyPipeline[IO](dispatcher) - socket <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf]( - pipeline - ).map(_._2) + // When trigger Netty to run events when there aren't any to run + nextTaskTime <- channel.runScheduledPendingTasks + _ <- IO(nextTaskTime shouldEqual Fs2NettyEmbeddedChannel.NoTasksToRun) + // Then there should be no events on the socket events stream events: List[AnyRef] <- socket.events - .interruptAfter(1.second) + .interruptAfter(Duration.Zero) .compile .toList - } yield events should beEmpty + _ <- IO(events should beEmpty) + + // When there's no activity on Netty channel, but channel is still active + _ <- IO(channel.isOpen) + + // Then there should not be any exceptions + isOpen <- socket.isOpen + _ <- IO(isOpen shouldEqual true) + isClosed <- socket.isClosed + _ <- IO(isClosed shouldEqual false) + + // When there's no activity on socket writes + _ <- channel.flushOutbound() + + // Then there's no message on Netty channel outbound queue + writes <- channel.outboundMessages + _ <- IO(writes.isEmpty shouldEqual true) + + // And finally, no exceptions on the Netty channel + _ <- IO(channel.underlying.checkException()) + } yield ok } - "reads from Netty appear in fs2-netty as reads stream as ByteBuf objects" in withResource { - dispatcher => + "reading on socket without backpressure results in from Netty reading onto its channel" in withResource { + implicit dispatcher => for { // Given a socket and embedded channel from the default Netty Pipeline - pipeline <- NettyPipeline[IO](dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf](pipeline) + x <- NettyEmbeddedChannelWithByteBufPipeline (channel, socket) = x - // Then configs should be setup, like autoread should be false...maybe move to top test? - _ <- IO(channel.underlying.config().isAutoRead should beFalse) - _ <- IO(channel.underlying.config().isAutoClose should beTrue) + // And list of ByteBuf's + byteBufs <- IO( + List( + Unpooled.wrappedBuffer("hello".getBytes), + Unpooled.buffer(1, 1).writeByte(' '), + Unpooled.copiedBuffer("world".getBytes) + ) + ) - // And list of single byte ByteBuf's - encoder = implicitly[Encoder[Byte]] - byteBufs = "hello world".getBytes().map(encoder.encode) + // And a socket that doesn't backpressure reads, i.e. always accepts elements from stream + queue <- Queue.unbounded[IO, String] + _ <- socket.reads + .flatMap(byteBufToByteStream) + .map(byteToString) + .evalMap(queue.offer) + .compile + .drain + .background + .use { _ => + for { + // When writing each ByteBuf individually to the channel + _ <- channel + .writeAllInboundThenFlushThenRunAllPendingTasks(byteBufs: _*) + + // Then messages should be consumed from Netty + _ <- IO.sleep(200.millis) + _ <- IO( + channel.underlying.areInboundMessagesBuffered shouldEqual false + ) + + // And reads on socket yield the original message sent on channel + str <- (0 until "hello world".length).toList + .traverseFilter(_ => queue.tryTake) + .map(_.mkString) + _ <- IO(str shouldEqual "hello world") + + // And ByteBuf's should be released + _ <- IO(byteBufs.map(_.refCnt()) shouldEqual List.fill(3)(0)) + } yield () + } + } yield ok + } - // When writing each ByteBuf individually to the channel + "backpressure on socket reads results in Netty NOT reading onto its channel" in withResource { + implicit dispatcher => + for { + // Given a socket and embedded channel from the default Netty Pipeline + x <- NettyEmbeddedChannelWithByteBufPipeline + (channel, socket) = x + + // And list of ByteBuf's + byteBufs <- IO( + List( + Unpooled.wrappedBuffer("hello".getBytes), + Unpooled.buffer(1, 1).writeByte(' '), + Unpooled.copiedBuffer("world".getBytes) + ) + ) + + // And a socket with backpressure, i.e. socket reads aren't being consumed + + // When writing each ByteBuf to the channel areMsgsAdded <- channel .writeAllInboundThenFlushThenRunAllPendingTasks(byteBufs: _*) - // Then messages aren't added to the inbound buffer because autoread should be off + // Then messages are NOT added onto the Netty channel + _ <- IO.sleep( + 200.millis + ) // give fs2-Netty chance to read like in non-backpressure test _ <- IO(areMsgsAdded should beFalse) + _ <- IO( + channel.underlying.areInboundMessagesBuffered shouldEqual true + ) // And reads on socket yield the original message sent on channel str <- socket.reads - .map(_.readByte()) + .flatMap(byteBufToByteStream) .take(11) - .foldMap(byteToString) + .map(byteToString) .compile - .last - _ <- IO(str shouldEqual "hello world".some) + .toList + .map(_.mkString) + _ <- IO(str shouldEqual "hello world") // And ByteBuf's should be released - _ <- IO(byteBufs.map(_.refCnt()) shouldEqual Array.fill(11)(0)) + _ <- IO(byteBufs.map(_.refCnt()) shouldEqual List.fill(3)(0)) } yield ok } - "writing ByteBuf's onto fs2-netty socket appear on Netty's channel" in withResource { - dispatcher => + "writing onto fs2-netty socket appear on Netty's channel" in withResource { + implicit dispatcher => for { - pipeline <- NettyPipeline[IO](dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf](pipeline) + // Given a socket and embedded channel from the default Netty Pipeline + x <- NettyEmbeddedChannelWithByteBufPipeline (channel, socket) = x - encoder = implicitly[Encoder[Byte]] - byteBufs = "hello world".getBytes().map(encoder.encode).toList - // TODO: make resource? -// _ <- IO.unit.guarantee(IO(byteBufs.foreach(ReferenceCountUtil.release))) + // And list of ByteBuf's + byteBufs <- IO( + List( + Unpooled.wrappedBuffer("hello".getBytes), + Unpooled.buffer(1, 1).writeByte(' '), + Unpooled.copiedBuffer("world".getBytes) + ) + ) + // When writing each ByteBuf to the socket _ <- byteBufs.traverse(socket.write) - str <- (0 until 11).toList - .traverse { _ => - IO(channel.underlying.readOutbound[ByteBuf]()) - .flatMap(bb => IO(bb.readByte())) - } - .map(_.toArray) - .map(new String(_)) - - _ <- IO(str shouldEqual "hello world") - } yield ok - } - - "piping any reads to writes just echos back ByteBuf's written onto Netty's channel" in withResource { - dispatcher => - for { - pipeline <- NettyPipeline[IO](dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf](pipeline) - (channel, socket) = x - - encoder = implicitly[Encoder[Byte]] - byteBufs = "hello world".getBytes().map(encoder.encode).toList - - _ <- channel - .writeAllInboundThenFlushThenRunAllPendingTasks(byteBufs: _*) - _ <- socket.reads - // fs2-netty automatically releases - .evalMap(bb => IO(bb.retain())) + // Then Netty channel has messages in its outbound queue + str <- Stream + .fromIterator[IO]((0 until 3).iterator, chunkSize = 100) + .evalMap(_ => IO(channel.underlying.readOutbound[ByteBuf]())) + .flatMap(byteBufToByteStream) .take(11) - .through(socket.writes) + .map(byteToString) .compile - .drain - - str <- (0 until 11).toList - .traverse { _ => - IO(channel.underlying.readOutbound[ByteBuf]()) - .flatMap(bb => IO(bb.readByte())) - } - .map(_.toArray) - .map(new String(_)) - + .toList + .map(_.mkString) _ <- IO(str shouldEqual "hello world") + + // And ByteBuf's are not released. Embedded channel doesn't release, but real channel should. + _ <- IO(byteBufs.map(_.refCnt()) shouldEqual List.fill(3)(1)) + _ <- IO.unit.guarantee( + IO(byteBufs.foreach(ReferenceCountUtil.release)) + ) } yield ok } "closed connection in Netty appears as closed streams in fs2-netty" in withResource { - dispatcher => + implicit dispatcher => for { - pipeline <- NettyPipeline[IO](dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf](pipeline) + x <- NettyEmbeddedChannelWithByteBufPipeline (channel, socket) = x // Netty sanity check _ <- channel.isOpen.flatMap(isOpen => IO(isOpen should beTrue)) _ <- socket.isOpen.flatMap(isOpen => IO(isOpen should beTrue)) - // TODO: wrapper methods for underlying _ <- channel.close() - // Netty sanity check, maybe move these to their own test file for Embedded Channel + // Netty sanity check _ <- channel.isClosed.flatMap(isClosed => IO(isClosed should beTrue)) _ <- socket.isOpen.flatMap(isOpen => IO(isOpen should beFalse)) _ <- socket.isClosed.flatMap(isClosed => IO(isClosed should beTrue)) @@ -179,10 +252,9 @@ class NettyPipelineSpec } "closing connection in fs2-netty closes underlying Netty channel" in withResource { - dispatcher => + implicit dispatcher => for { - pipeline <- NettyPipeline[IO](dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf](pipeline) + x <- NettyEmbeddedChannelWithByteBufPipeline (channel, socket) = x _ <- socket.close() @@ -193,11 +265,34 @@ class NettyPipelineSpec } yield ok } + "writing onto a closed socket is a no-op and throws an exception" in withResource { + implicit dispatcher => + for { + x <- NettyEmbeddedChannelWithByteBufPipeline + (channel, socket) = x + _ <- channel.close() + + byteBuf <- IO(Unpooled.wrappedBuffer("hi".getBytes)) + caughtClosedChannelException <- socket + .write(byteBuf) + .as(false) + .handleErrorWith { + case _: ClosedChannelException => true.pure[IO] + case _ => false.pure[IO] + } + + _ <- IO(caughtClosedChannelException shouldEqual true) + + _ <- channel.outboundMessages.flatMap(out => + IO(out.isEmpty shouldEqual true) + ) + } yield ok + } + "exceptions in Netty pipeline raises an exception on the reads stream" in withResource { - dispatcher => + implicit dispatcher => for { - pipeline <- NettyPipeline[IO](dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf](pipeline) + x <- NettyEmbeddedChannelWithByteBufPipeline (channel, socket) = x _ <- IO( @@ -211,14 +306,17 @@ class NettyPipelineSpec .handleErrorWith(t => Stream.emit(t.getMessage)) .compile .last - } yield errMsg shouldEqual "unit test error".some + _ <- IO(errMsg shouldEqual "unit test error".some) + + _ <- channel.isOpen.flatMap(isOpen => IO(isOpen shouldEqual true)) + _ <- socket.isOpen.flatMap(isOpen => IO(isOpen shouldEqual true)) + } yield ok } "pipeline events appear in fs2-netty as events stream" in withResource { - dispatcher => + implicit dispatcher => for { - pipeline <- NettyPipeline[IO](dispatcher) - x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf](pipeline) + x <- NettyEmbeddedChannelWithByteBufPipeline (channel, socket) = x _ <- IO( @@ -459,6 +557,23 @@ class NettyPipelineSpec object NettyPipelineSpec { + private def NettyEmbeddedChannelWithByteBufPipeline(implicit + dispatcher: Dispatcher[IO] + ) = + for { + pipeline <- NettyPipeline[IO](dispatcher) + x <- Fs2NettyEmbeddedChannel[IO, ByteBuf, ByteBuf](pipeline) + } yield x + + private def byteBufToByteStream(bb: ByteBuf): Stream[IO, Byte] = { + val buffer = new ListBuffer[Byte] + bb.forEachByte((value: Byte) => { + val _ = buffer.addOne(value) + true + }) + Stream.fromIterator[IO](buffer.iterator, 1) + } + /** * Does not use MessageToMessageDecoder, SimpleChannelInboundHandler, or anything that extends ChannelHandlerAdapter. * Netty tacks if a ChannelHandlerAdapter annotated with @Sharable is added. Netty will throw an exception if such a