Netty新增執行緒池實現非同步處理
tomcat 非同步執行緒模型大概可以理解為:acceptor負責接受新來的連線,然後把連線初始化後丟給poller來做io,然後又交給處理業務的exec執行緒池非同步處理業務邏輯。
所以如果IO執行緒和handler 在一個執行緒裡面,如果handler 執行某個邏輯比較耗時,比如查資料庫、服務間通訊等會嚴重影響整個netty的效能。這時候就需要考慮將耗時操作非同步處理。
netty 中加入執行緒池有兩種方式:
第一種是handler 中加入執行緒池
第二種是Context 中加入執行緒池
1. handler 加入執行緒池
核心程式碼如下:
1. 服務端相關程式碼
EchoServer
package cn.xm.netty.example.echo; import io.netty.bootstrap.ServerBootstrap; import io.netty.channel.*; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.handler.logging.LogLevel;import io.netty.handler.logging.LoggingHandler; import io.netty.handler.ssl.SslContext; import io.netty.handler.ssl.SslContextBuilder; import io.netty.handler.ssl.util.SelfSignedCertificate; public final class EchoServer { static final boolean SSL = System.getProperty("ssl") != null; staticfinal int PORT = Integer.parseInt(System.getProperty("port", "8007")); public static void main(String[] args) throws Exception { final SslContext sslCtx; if (SSL) { SelfSignedCertificate ssc = new SelfSignedCertificate(); sslCtx = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()).build(); } else { sslCtx = null; } EventLoopGroup bossGroup = new NioEventLoopGroup(1); EventLoopGroup workerGroup = new NioEventLoopGroup(); final EchoServerHandler serverHandler = new EchoServerHandler(); try { ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, 100) .handler(new LoggingHandler(LogLevel.INFO)) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { ChannelPipeline p = ch.pipeline(); if (sslCtx != null) { p.addLast(sslCtx.newHandler(ch.alloc())); } p.addLast(new EchoServerHandler2()); p.addLast(serverHandler); } }); ChannelFuture f = b.bind(PORT).sync(); f.channel().closeFuture().sync(); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } } }
EchoServerHandler
package cn.xm.netty.example.echo; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.DefaultEventLoopGroup; import io.netty.util.CharsetUtil; public class EchoServerHandler extends ChannelInboundHandlerAdapter { private static final DefaultEventLoopGroup eventExecutors = new DefaultEventLoopGroup(16); @Override public void channelRead(ChannelHandlerContext ctx, Object msg) { System.out.println("cn.xm.netty.example.echo.EchoServerHandler.channelRead thread: " + Thread.currentThread().getName()); // 強轉為netty的ByteBuffer(實際就是包裝的ByteBuffer) ByteBuf byteBuf = (ByteBuf) msg; System.out.println("客戶端傳送的訊息是:" + byteBuf.toString(CharsetUtil.UTF_8)); System.out.println("客戶端地址:" + ctx.channel().remoteAddress()); ctx.writeAndFlush(Unpooled.copiedBuffer("hello, 客戶端!0!", CharsetUtil.UTF_8)); // ctx.channel().eventLoop().execute(new Runnable() { eventExecutors.execute(new Runnable() { @Override public void run() { // 比如這裡我們將一個特別耗時的任務轉為非同步執行(也就是任務提交到NioEventLoop的taskQueue中) System.out.println("java.lang.Runnable.run thread: " + Thread.currentThread().getName()); try { Thread.sleep(10 * 1000); } catch (InterruptedException e) { e.printStackTrace(); } ctx.writeAndFlush(Unpooled.copiedBuffer("hello, 客戶端!1!", CharsetUtil.UTF_8)); } }); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { // Close the connection when an exception is raised. cause.printStackTrace(); ctx.close(); } }
EchoServerHandler2
package cn.xm.netty.example.echo; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelOutboundHandlerAdapter; import io.netty.channel.ChannelPromise; import io.netty.channel.DefaultEventLoopGroup; public class EchoServerHandler2 extends ChannelOutboundHandlerAdapter { private static final DefaultEventLoopGroup eventExecutors = new DefaultEventLoopGroup(16); @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { super.write(ctx, msg, promise); System.out.println("cn.xm.netty.example.echo.EchoServerHandler2.write called, threadName: " + Thread.currentThread().getName()); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { // Close the connection when an exception is raised. cause.printStackTrace(); ctx.close(); } }
2. client 程式碼
EchoClient
package cn.xm.netty.example.echo; import io.netty.bootstrap.Bootstrap; import io.netty.channel.*; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.handler.ssl.SslContext; import io.netty.handler.ssl.SslContextBuilder; import io.netty.handler.ssl.util.InsecureTrustManagerFactory; public final class EchoClient { static final boolean SSL = System.getProperty("ssl") != null; static final String HOST = System.getProperty("host", "127.0.0.1"); static final int PORT = Integer.parseInt(System.getProperty("port", "8007")); public static void main(String[] args) throws Exception { final SslContext sslCtx; if (SSL) { sslCtx = SslContextBuilder.forClient() .trustManager(InsecureTrustManagerFactory.INSTANCE).build(); } else { sslCtx = null; } // Configure the client. EventLoopGroup group = new NioEventLoopGroup(); try { Bootstrap b = new Bootstrap(); b.group(group) .channel(NioSocketChannel.class) .option(ChannelOption.TCP_NODELAY, true) .handler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { ChannelPipeline p = ch.pipeline(); if (sslCtx != null) { p.addLast(sslCtx.newHandler(ch.alloc(), HOST, PORT)); } p.addLast(new EchoClientHandler()); } }); // Start the client. ChannelFuture f = b.connect(HOST, PORT).sync(); // Wait until the connection is closed. f.channel().closeFuture().sync(); } finally { // Shut down the event loop to terminate all threads. group.shutdownGracefully(); } } }
EchoClientHandler
package cn.xm.netty.example.echo; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.util.CharsetUtil; public class EchoClientHandler extends ChannelInboundHandlerAdapter { @Override public void channelActive(ChannelHandlerContext ctx) { System.out.println("ClientHandler ctx: " + ctx); ctx.writeAndFlush(Unpooled.copiedBuffer("hello, 伺服器!", CharsetUtil.UTF_8)); } @Override public void channelRead(ChannelHandlerContext ctx, Object msg) { // 強轉為netty的ByteBuffer(實際就是包裝的ByteBuffer) ByteBuf byteBuf = (ByteBuf) msg; System.out.println("伺服器會送的訊息是:" + byteBuf.toString(CharsetUtil.UTF_8)); System.out.println("伺服器地址:" + ctx.channel().remoteAddress()); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { cause.printStackTrace(); ctx.close(); } }
3. 測試
先啟動服務端,然後啟動客戶端,然後檢視服務端控制檯如下:
cn.xm.netty.example.echo.EchoServerHandler.channelRead thread: nioEventLoopGroup-3-1 客戶端傳送的訊息是:hello, 伺服器! 客戶端地址:/127.0.0.1:54247 cn.xm.netty.example.echo.EchoServerHandler2.write called, threadName: nioEventLoopGroup-3-1 java.lang.Runnable.run thread: defaultEventLoopGroup-4-1 cn.xm.netty.example.echo.EchoServerHandler2.write called, threadName: nioEventLoopGroup-3-1
4. 分析
可以看到上面的邏輯是:
(1) 當IO執行緒輪詢到一個socket 事件後,IO執行緒開始處理,當走到EchoServerHandler 比較耗時的操作之後,將耗時任務交給執行緒池。
(2) 當耗時任務執行完畢再執行ctx.writeAndFlush 時,會將這個任務再交給IO執行緒,過程如下(也就是最終的寫操作都會交給IO執行緒):
1》io.netty.channel.AbstractChannelHandlerContext#write(java.lang.Object, boolean, io.netty.channel.ChannelPromise)
private void write(Object msg, boolean flush, ChannelPromise promise) { AbstractChannelHandlerContext next = findContextOutbound(); final Object m = pipeline.touch(msg, next); EventExecutor executor = next.executor(); if (executor.inEventLoop()) { if (flush) { next.invokeWriteAndFlush(m, promise); } else { next.invokeWrite(m, promise); } } else { AbstractWriteTask task; if (flush) { task = WriteAndFlushTask.newInstance(next, m, promise); } else { task = WriteTask.newInstance(next, m, promise); } safeExecute(executor, task, promise, m); } }
這裡走的是else 程式碼塊的程式碼,因為 當前執行緒不屬於IO執行緒裡面, 所以就走else。 else 程式碼塊的邏輯是建立一個寫Task, 然後呼叫io.netty.channel.AbstractChannelHandlerContext#safeExecute:
private static void safeExecute(EventExecutor executor, Runnable runnable, ChannelPromise promise, Object msg) { try { executor.execute(runnable); } catch (Throwable cause) { try { promise.setFailure(cause); } finally { if (msg != null) { ReferenceCountUtil.release(msg); } } } }
可以看到是呼叫execotor.execute 方法加入自己的任務佇列裡面。io.netty.util.concurrent.SingleThreadEventExecutor#execute
public void execute(Runnable task) { if (task == null) { throw new NullPointerException("task"); } boolean inEventLoop = inEventLoop(); if (inEventLoop) { addTask(task); } else { startThread(); addTask(task); if (isShutdown() && removeTask(task)) { reject(); } } if (!addTaskWakesUp && wakesUpForTask(task)) { wakeup(inEventLoop); } }
補充:Handler 中加非同步還有一種方式就是建立一個任務,加入到自己的任務佇列,這個實際也佔用的是IO執行緒
package cn.xm.netty.example.echo; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.util.CharsetUtil; public class EchoServerHandler extends ChannelInboundHandlerAdapter { @Override public void channelRead(ChannelHandlerContext ctx, Object msg) { System.out.println("cn.xm.netty.example.echo.EchoServerHandler.channelRead thread: " + Thread.currentThread().getName()); // 強轉為netty的ByteBuffer(實際就是包裝的ByteBuffer) ByteBuf byteBuf = (ByteBuf) msg; System.out.println("客戶端傳送的訊息是:" + byteBuf.toString(CharsetUtil.UTF_8)); System.out.println("客戶端地址:" + ctx.channel().remoteAddress()); ctx.writeAndFlush(Unpooled.copiedBuffer("hello, 客戶端!0!", CharsetUtil.UTF_8)); ctx.channel().eventLoop().execute(new Runnable() { @Override public void run() { // 比如這裡我們將一個特別耗時的任務轉為非同步執行(也就是任務提交到NioEventLoop的taskQueue中) System.out.println("java.lang.Runnable.run thread: " + Thread.currentThread().getName()); try { Thread.sleep(10 * 1000); } catch (InterruptedException e) { e.printStackTrace(); } ctx.writeAndFlush(Unpooled.copiedBuffer("hello, 客戶端!1!", CharsetUtil.UTF_8)); } }); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { // Close the connection when an exception is raised. cause.printStackTrace(); ctx.close(); } }
測試: 可以看出非同步也用的是當前的IO執行緒
cn.xm.netty.example.echo.EchoServerHandler.channelRead thread: nioEventLoopGroup-3-1 客戶端傳送的訊息是:hello, 伺服器! 客戶端地址:/127.0.0.1:53721 cn.xm.netty.example.echo.EchoServerHandler2.write called, threadName: nioEventLoopGroup-3-1 java.lang.Runnable.run thread: nioEventLoopGroup-3-1 cn.xm.netty.example.echo.EchoServerHandler2.write called, threadName: nioEventLoopGroup-3-1
2. Context 中增加非同步執行緒池
1. 程式碼改造
EchoServer 程式碼改造
package cn.xm.netty.example.echo; import io.netty.bootstrap.ServerBootstrap; import io.netty.channel.*; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.handler.logging.LogLevel; import io.netty.handler.logging.LoggingHandler; import io.netty.handler.ssl.SslContext; import io.netty.handler.ssl.SslContextBuilder; import io.netty.handler.ssl.util.SelfSignedCertificate; public final class EchoServer { static final boolean SSL = System.getProperty("ssl") != null; static final int PORT = Integer.parseInt(System.getProperty("port", "8007")); public static void main(String[] args) throws Exception { final SslContext sslCtx; if (SSL) { SelfSignedCertificate ssc = new SelfSignedCertificate(); sslCtx = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()).build(); } else { sslCtx = null; } EventLoopGroup bossGroup = new NioEventLoopGroup(1); EventLoopGroup workerGroup = new NioEventLoopGroup(); DefaultEventLoopGroup group = new DefaultEventLoopGroup(16); final EchoServerHandler serverHandler = new EchoServerHandler(); try { ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, 100) .handler(new LoggingHandler(LogLevel.INFO)) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { ChannelPipeline p = ch.pipeline(); if (sslCtx != null) { p.addLast(sslCtx.newHandler(ch.alloc())); } p.addLast(group, new EchoServerHandler2()); p.addLast(group, serverHandler); } }); ChannelFuture f = b.bind(PORT).sync(); f.channel().closeFuture().sync(); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } } }
呼叫p.addLast 的時候指定使用的執行緒組。 如果不指定,預設使用的是IO執行緒組。 如果指定了就使用指定的執行緒組。 這樣就類似於Tomcat8 的執行緒模型。接收請求-》IO-》處理 分別在不同的執行緒裡面。
EchoServerHandler程式碼改造: 正常處理,無需非同步開執行緒
package cn.xm.netty.example.echo; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.util.CharsetUtil; public class EchoServerHandler extends ChannelInboundHandlerAdapter { @Override public void channelRead(ChannelHandlerContext ctx, Object msg) { System.out.println("cn.xm.netty.example.echo.EchoServerHandler.channelRead thread: " + Thread.currentThread().getName()); // 強轉為netty的ByteBuffer(實際就是包裝的ByteBuffer) ByteBuf byteBuf = (ByteBuf) msg; System.out.println("客戶端傳送的訊息是:" + byteBuf.toString(CharsetUtil.UTF_8)); System.out.println("客戶端地址:" + ctx.channel().remoteAddress()); ctx.writeAndFlush(Unpooled.copiedBuffer("hello, 客戶端!0!", CharsetUtil.UTF_8)); // 比如這裡我們將一個特別耗時的任務轉為非同步執行(也就是任務提交到NioEventLoop的taskQueue中) System.out.println("java.lang.Runnable.run thread: " + Thread.currentThread().getName()); try { Thread.sleep(10 * 1000); } catch (InterruptedException e) { e.printStackTrace(); } ctx.writeAndFlush(Unpooled.copiedBuffer("hello, 客戶端!1!", CharsetUtil.UTF_8)); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { // Close the connection when an exception is raised. cause.printStackTrace(); ctx.close(); } }
2. 測試結果:
cn.xm.netty.example.echo.EchoServerHandler.channelRead thread: defaultEventLoopGroup-4-1 客戶端傳送的訊息是:hello, 伺服器! 客戶端地址:/127.0.0.1:52966 cn.xm.netty.example.echo.EchoServerHandler2.write called, threadName: defaultEventLoopGroup-4-1 java.lang.Runnable.run thread: defaultEventLoopGroup-4-1 cn.xm.netty.example.echo.EchoServerHandler2.write called, threadName: defaultEventLoopGroup-4-1
可以看到都是在自己開的執行緒組裡面完成的任務。
3. 程式碼檢視
(1)從之前的原始碼查閱到, context 封裝了handler、pipeline、executor 等資訊。 在p.addLast 的時候我們指定了自己的執行緒組,檢視原始碼
io.netty.channel.DefaultChannelPipeline#addLast(io.netty.util.concurrent.EventExecutorGroup, io.netty.channel.ChannelHandler...)
@Override public final ChannelPipeline addLast(EventExecutorGroup executor, ChannelHandler... handlers) { if (handlers == null) { throw new NullPointerException("handlers"); } for (ChannelHandler h: handlers) { if (h == null) { break; } addLast(executor, null, h); } return this; } @Override public final ChannelPipeline addLast(EventExecutorGroup group, String name, ChannelHandler handler) { final AbstractChannelHandlerContext newCtx; synchronized (this) { checkMultiplicity(handler); newCtx = newContext(group, filterName(name, handler), handler); addLast0(newCtx); // If the registered is false it means that the channel was not registered on an eventloop yet. // In this case we add the context to the pipeline and add a task that will call // ChannelHandler.handlerAdded(...) once the channel is registered. if (!registered) { newCtx.setAddPending(); callHandlerCallbackLater(newCtx, true); return this; } EventExecutor executor = newCtx.executor(); if (!executor.inEventLoop()) { newCtx.setAddPending(); executor.execute(new Runnable() { @Override public void run() { callHandlerAdded0(newCtx); } }); return this; } } callHandlerAdded0(newCtx); return this; }
io.netty.channel.DefaultChannelPipeline#newContext
private AbstractChannelHandlerContext newContext(EventExecutorGroup group, String name, ChannelHandler handler) { return new DefaultChannelHandlerContext(this, childExecutor(group), name, handler); }
可以看到使用了自定義的執行緒組。並且記錄到了DefaultChannelHandlerContext 屬性裡。
(2) 不指定執行緒組,預設使用的是null
io.netty.channel.DefaultChannelPipeline#addLast(io.netty.channel.ChannelHandler...)
public final ChannelPipeline addLast(ChannelHandler... handlers) { return addLast(null, handlers); }
(3) io.netty.channel.AbstractChannelHandlerContext#invokeChannelRead(io.netty.channel.AbstractChannelHandlerContext, java.lang.Object)
static void invokeChannelRead(final AbstractChannelHandlerContext next, Object msg) { final Object m = next.pipeline.touch(ObjectUtil.checkNotNull(msg, "msg"), next); EventExecutor executor = next.executor(); if (executor.inEventLoop()) { next.invokeChannelRead(m); } else { executor.execute(new Runnable() { @Override public void run() { next.invokeChannelRead(m); } }); } }
我們檢視next屬性如下:
1》io.netty.channel.AbstractChannelHandlerContext#executor 獲取executor 方法如下:
@Override public EventExecutor executor() { if (executor == null) { return channel().eventLoop(); } else { return executor; } }
可以看到,如果指定了就返回指定的,未指定返回channel 的executor, 也就是IO執行緒。
2》接下來executor.inEventLoop() 為false, 所以走else 程式碼塊的非同步邏輯。
總結:
第一種在handler中新增非同步,比較靈活,可以只將耗時的程式碼塊加入非同步。非同步也會延長介面響應時間,因為需要先加入佇列。
第二種方式是netty的標準方式,相當於整個handler 都非同步操作。不論耗時不耗時,都加入佇列非同步進行處理。這樣理解清洗,可能不夠靈活。
【當你用心寫完每一篇部落格之後,你會發現它比你用程式碼實現功能更有成就感!】