`
nepxion
  • 浏览: 37242 次
  • 性别: Icon_minigender_1
  • 来自: 上海
社区版块
存档分类
最新评论

(十二) Nepxion-Thunder分布式RPC集成框架 - 配置调优

阅读更多

Nepxion-Thunder(QQ 群 471164539)发布在https://github.com/Nepxion/

 

1. 本地配置

如果本地配置启动,需要在XML配置config="local"

<thunder:registry id="registry" type="zookeeper" address="localhost:2181" config="local"/>

 

新建业务工程时候,在相关路径下新建thunder-ext.properties。配置取值的优先级顺序是thunder-ext.properties->thunder.properties(在thunder.jar里)。如果配置名相同,取优先级高的值。
它是一个集成式的配置,对于通信中间件,应用哪个就配置哪个,内容如下:

# 该项只用在hessian上,即访问Hessian Servlet的Web module  
# Web module path, if that is provided as a web service
path = /thunder

# 在配置XML的时候,调用模式缺省下的默认值  
# Spring XML里不配置async,即采用默认同步的方式
# Method invoke with async false, that is a default value
async = false

# 在配置XML的时候,广播模式缺省下的默认值  
# Spring XML里不配置broadcast,即采用默认禁止广播方式
# Method invoke with broadcast false, that is a default value
broadcast = false

# 在配置XML的时候,同步超时时间缺省下的默认值  
# Spring XML里不配置同步方法的timeout,即采用默认同步超时为30秒,如果超过30秒,同步方法没返回,就抛出超时异常
# Sync method timeout, that is a default value
syncTimeout = 30 * 1000

# 在配置XML的时候,异步超时时间缺省下的默认值  
# Spring XML里不配置异步方法的timeout,即采用默认异步超时为60秒,如果超过5分钟,异步结果没回调回来,就清除异步缓存,并抛出异常
# Async method timeout, aysnc cache will be remove when timeout, that is a default value
asyncTimeout = 5 * 60 * 1000

# 异步守护线程对异步超时结果的扫描,默认10秒扫描一次,该值不能在XML里定义
# Async method scan interval, thread will start to clear timeout cache every scan interval
asyncScan = 10 * 1000

# 是否采用压缩,采用压缩其实是用少量CPU的开销,来换取网络传输的开销
# 一般来说,2KB-20KB的消息体,通过压缩传递,吞吐量TPS可以提高20%-30%;但是大数据量会增加CPU开销
# 如果采用Netty协议,它内部会有一个JdkZlib等压缩算法,可以再次压缩
# Compress transport data, maybe it works well for 2KB-20KB
compress = false

# 因网络原因(例如所有服务器挂掉)下,调用失败后,Thunder将自动采取重复调用,以保证请求不丢失。当侦测到网络原因调用失败后,会负载均衡去调用其他的服务器。  
# 该项机制,只支持Netty和Hessian。MQ出现网络原因而服务调用,会自动采取重连而阻塞进程,所以没必要实现这种机制。  
# Load Balance Configuration (used for Netty and Hessian)
# 是否需要重试,如果true,客户端将阻塞线程,等到服务端至少一台恢复;如果false,将直接抛出异常
loadBalanceRetry = true
# 重试次数
loadBalanceRetryTimes = 10000
# 每次重试的间隔时间
loadBalanceRetryDelay = 5 * 1000

# 是否需要异步异常事件发布通知
# If event notification is true, all produce and consume failure will be thrown out via an async event, so that will make the invocation do a retry 
eventNotification = true

# 调用过程中,出现异常用邮件通知
# If event notification and smtp mail notification are all true, all produce and consume failure will be sent as a mail
# 关闭邮件通知
smtpNotification = false

# 是否负载均衡信息的打印(负载均衡信息有助于判断哪台服务器被选中),开启后,可能会造成日志量大增
# Load balance log will help to indicate which server will be selected (used for Netty and Hessian)
loadBalanceLogPrint = true

# 是否传输信息的打印(传输信息有助于判断请求发往哪台服务器,从哪台服务器响应)
# Transport log will help to indicate the transport information
transportLogPrint = true

# 是否心跳信息的打印(心跳信息有助于判断Netty数据通道是否还存活),开启后,可能会造成日志量大增
# Heart beat log will help to indicate the data channel is alive or not (used for Netty)
heartBeatLogPrint = true

# 是否序列化后字节数组压缩和未压缩等长度信息的打印(长度信息有助于判断业务对象的大小,供业务部门选择是否压缩)
# Serializer log will help to indicate the byte array length
serializerLogPrint = false

# 异常通知的白名单,一旦指定,白名单里的异常将不会通过邮件发送,用逗号分隔
# Failure Notification exclusion, use "," to separate
smtpNotificationExclusion = com.nepxion.thunder.security.SecurityException
# 是否是Ssl模式
smtpSsl = true
# 邮件服务器的地址
smtpHost = smtp.qq.com
# 发件人的账户
smtpUser = *******@qq.com
# 发件人的账户密码
smtpPassword = 
# 发件人的邮件地址 
smtpMailFrom = *******@qq.com
# 收件人的邮件地址列表,用逗号分隔
smtpMailTo = *******@qq.com,*******@qq.com
# 抄送的邮件地址列表,用逗号分隔
smtpMailCC = 
# 暗送的邮件地址列表,用逗号分隔
smtpMailBCC = 

# 线程池  
# Thread Pool Configuration
# 线程隔离,一旦值为true,每个接口将对应一个线程池
# Multi thread pool
threadPoolMultiMode = false
# CPU unit
threadPoolServerCorePoolSize = 4
# CPU unit
threadPoolServerMaximumPoolSize = 8
threadPoolServerKeepAliveTime = 15 * 60 * 1000
threadPoolServerAllowCoreThreadTimeout = false
# CPU unit
threadPoolClientCorePoolSize = 4
# CPU unit
threadPoolClientMaximumPoolSize = 8
threadPoolClientKeepAliveTime = 15 * 60 * 1000
threadPoolClientAllowCoreThreadTimeout = false
# LinkedBlockingQueue, ArrayBlockingQueue, SynchronousQueue
threadPoolQueue = LinkedBlockingQueue
# CPU unit (Used for LinkedBlockingQueue or ArrayBlockingQueue)
threadPoolQueueCapacity = 1024
# BlockingPolicyWithReport, CallerRunsPolicyWithReport, AbortPolicyWithReport, RejectedPolicyWithReport, DiscardedPolicyWithReport
# BlockingPolicyWithReport :使用阻塞生产者的饱和策略,不抛弃任务,也不抛出异常,当队列满时改为调用BlockingQueue.put来实现生产者的阻塞
# CallerRunsPolicyWithReport :使用Caller-Runs(调用者执行)饱和策略,不抛弃任务,也不抛出异常,而是将当前任务回退到发起这个调用者执行的线程所在的上级线程去执行
# AbortPolicyWithReport :任务饱和时, 抛弃任务,抛出异常
# RejectedPolicyWithReport :如果该任务实现了RejectedRunnable接口,那么交给用户去实现拒绝服务的逻辑,否则以FIFO的方式抛弃队列中一部分现有任务,再添加新任务
# DiscardedPolicyWithReport :任务饱和时以FIFO的方式抛弃队列中一部分现有任务,再添加新任务
threadPoolRejectedPolicy = BlockingPolicyWithReport

# 对象池,主要是用于FST快速序列化实现  
# FST Object Pool Configuration
# CPU unit
fstObjectPoolMaxTotal = 64
# CPU unit
fstObjectPoolMaxIdle = 32
# CPU unit
fstObjectPoolMinIdle = 16
fstObjectPoolMaxWaitMillis = 3000
fstObjectPoolTimeBetweenEvictionRunsMillis = 1000 * 60 * 15
fstObjectPoolMinEvictableIdleTimeMillis = 1000 * 60 * 30
fstObjectPoolSoftMinEvictableIdleTimeMillis = 1000 * 60 * 30
fstObjectPoolBlockWhenExhausted = true
fstObjectPoolLifo = true
fstObjectPoolFairness = false

# Netty调优  
# Netty Configuration
nettySoBacklog = 128
nettySoSendBuffer = 64 * 1024
nettySoReceiveBuffer = 64 * 1024
nettyWriteBufferHighWaterMark = 32 * 1024
nettyWriteBufferLowWaterMark = 8 * 1024
nettyMaxMessageSize = 1 * 1024 * 1024
nettyWriteIdleTime = 90 * 1000
nettyReadIdleTime = 60 * 1000
nettyAllIdleTime = 45 * 1000
nettyWriteTimeout = 5 * 1000
nettyReadTimeout = 5 * 1000
nettyConnectTimeout = 10 * 1000
nettyReconnectDelay = 1000

# Hessian调优  
# Hessian Configuration
hessianReadTimeout = 30 * 1000
hessianConnectTimeout = 10 * 1000

# Redis调优  
# Redis Configuration
# CPU unit
redisObjectPoolMaxTotal = 30
# CPU unit
redisObjectPoolMaxIdle = 10
# CPU unit
redisObjectPoolMinIdle = 10
redisObjectPoolMaxWaitMillis = 1000 * 3
redisObjectPoolTimeBetweenEvictionRunsMillis = 1000 * 30
redisObjectPoolMinEvictableIdleTimeMillis = 1000 * 60
redisObjectPoolSoftMinEvictableIdleTimeMillis = 1000 * 60
redisObjectPoolBlockWhenExhausted = true
redisObjectPoolLifo = true
redisObjectPoolFairness = false
redisSoTimeout = 5 * 1000
redisConnectionTimeout = 5 * 1000
redisDataExpiration = 15 * 24 * 60 * 1000
# Setinel parameters
redisReconnectionWait = 5
redisDatabase = 0
# Cluster parameters
redisMaxRedirections = 5

# Splunk调优
# Splunk Configuration
splunkMaximumTime = 60

# 消息队列调优  
# Kafka Configuration
kafka.producer.acks = all
kafka.producer.linger.ms = 0
kafka.producer.retries = 0
kafka.producer.buffer.memory = 33554432
kafka.producer.send.buffer.bytes = 131072
kafka.producer.receive.buffer.bytes = 32768
kafka.producer.max.request.size = 1048576
kafka.producer.batch.size = 16384
kafka.producer.timeout.ms = 30000
kafka.producer.request.timeout.ms = 30000
kafka.producer.connections.max.idle.ms = 540000
kafka.producer.reconnect.backoff.ms = 50
kafka.producer.retry.backoff.ms = 100
kafka.producer.max.block.ms = 60000
kafka.producer.max.in.flight.requests.per.connection = 5
kafka.producer.metadata.max.age.ms = 300000
kafka.producer.metadata.fetch.timeout.ms = 60000

kafka.consumer.server.poll.timeout.ms = 1000
kafka.consumer.client.poll.timeout.ms = 1000
kafka.consumer.enable.auto.commit = true
kafka.consumer.auto.offset.reset = latest
kafka.consumer.send.buffer.bytes = 131072
kafka.consumer.receive.buffer.bytes = 32768
kafka.consumer.max.partition.fetch.bytes = 1048576
kafka.consumer.fetch.min.bytes = 1024
kafka.consumer.fetch.max.wait.ms = 500
kafka.consumer.auto.commit.interval.ms = 10000
kafka.consumer.heartbeat.interval.ms = 3000
kafka.consumer.session.timeout.ms = 30000
kafka.consumer.request.timeout.ms = 40000
kafka.consumer.connections.max.idle.ms = 540000
kafka.consumer.reconnect.backoff.ms = 50
kafka.consumer.retry.backoff.ms = 100
kafka.consumer.metadata.max.age.ms = 300000

# MQ Common Configuration (used for ActiveMQ and Tibco)
mqRetryNotificationDelay = 5 * 1000
mqReconnectOnException = true

# CacheConnectionFactory Configuration (used for ActiveMQ and Tibco)
mqSessionCacheSize = 20
mqCacheConsumers = true
mqCacheProducers = true

# PoolConnectionFactory Configuration (used for ActiveMQ and Tibco)
mqMaxConnections = 20
mqMaximumActiveSessionPerConnection = 100
mqIdleTimeout = 30 * 1000
mqExpiryTimeout = 0
mqBlockIfSessionPoolIsFull = true
mqBlockIfSessionPoolIsFullTimeout = -1
mqTimeBetweenExpirationCheckMillis = -1
mqCreateConnectionOnStartup = true

# DefaultMessageListenerContainer Configuration (used for ActiveMQ and Tibco)
mqConcurrentConsumers = 10
mqMaxConcurrentConsumers = 1
mqReceiveTimeout = 1000
mqRecoveryInterval = 5000
mqIdleConsumerLimit = 1
mqIdleTaskExecutionLimit = 1
mqCacheLevel = 4
mqAcceptMessagesWhileStopping = false

# Apache HttpComponent调优,作为辅助组件,支持同步和异步两种方式,可被外部调用
# Apache Configuration
apacheBacklogSize = 128
apacheSendBufferSize = 64 * 1024
apacheReceiveBufferSize = 64 * 1024
apacheSoTimeout = 5 * 1000
apacheConnectTimeout = 5 * 1000
# CPU unit
apacheMaxTotal = 32

# Zookeeper调优
# Zookeeper Configuration
zookeeperSessionTimeout = 15 * 1000
zookeeperConnectTimeout = 15 * 1000
zookeeperConnectWaitTime = 1000


# Zookeeper的连接配置,效果等同于Spring XML的<thunder:registry ... address="localhost:2181".../>
# Zookeeper Connection Configuration
zookeeperAddress = localhost:2181

# Redis的哨兵连接配置,用于Netty和Hessian的发布/订阅功能,以及日志缓存上
# Redis Sentinel Connection Configuration
redisSentinel = 192.168.126.151:16379;192.168.126.151:26379;192.168.126.151:36379
redisMasterName = mymaster
redisClientName =  
redisPassword = 

# Redis的集群连接配置,用于日志缓存上,哨兵和集群配置最好两者选其一,推荐用哨兵配置
# Redis Cluster Connection Configuration
# redisCluster = 192.168.126.131:7001;192.168.126.131:7002;192.168.126.131:7003;192.168.126.131:7004;192.168.126.131:7005;192.168.126.131:7006

# Splunk日志服务器配置
# Splunk Connection Configuration
splunkHost = localhost
splunkPort = 8089
splunkUserName = admin
splunkPassword = admin

# Kafka的连接配置
# Kafka Connection Configuration
kafka-1.kafka.producer.bootstrap.servers = localhost:9092
kafka-1.kafka.consumer.bootstrap.servers = localhost:9092

# ActiveMQ的连接配置
# ActiveMQ Connection Configuration
activeMQ-1.mqUrl = failover://(tcp://localhost:61616)?initialReconnectDelay=1000&jms.prefetchPolicy.all=5
# activeMQ-1.mqUrl = failover://(tcp://broker1:61616,tcp://broker2:61616)?randomize=true
# SingleConnectionFactory, CachingConnectionFactory, PooledConnectionFactory
activeMQ-1.mqConnectionFactoryType = CachingConnectionFactory
activeMQ-1.mqUserName = admin
activeMQ-1.mqPassword = admin
# activeMQ-1.mqJndiName = ConnectionFactory

# Tibco的连接配置
# Tibco Connection Configuration
tibco-1.mqUrl = tcp://localhost:7222
# SingleConnectionFactory, CachingConnectionFactory, PooledConnectionFactory
tibco-1.mqConnectionFactoryType = CachingConnectionFactory
tibco-1.mqUserName = admin
tibco-1.mqPassword =
tibco-1.mqJndiName = seashellConnectionFactory

tibco-2.mqUrl = tcp://localhost:7222
# SingleConnectionFactory, CachingConnectionFactory, PooledConnectionFactory
tibco-2.mqConnectionFactoryType = CachingConnectionFactory
tibco-2.mqUserName = admin
tibco-2.mqPassword =
tibco-2.mqJndiName = FTQueueConnectionFactory


 2. 远程配置

如果本地配置启动,需要在XML配置config="remote"

<thunder:registry id="registry" type="zookeeper" address="localhost:2181" config="remote"/>
配置取值的优先级顺序是远程配置->thunder-ext.properties->thunder.properties(在thunder.jar里)。
如何进行远程配置,参照(九) Thunder分布式RPC框架 - 治理中心
分享到:
评论
4 楼 nepxion 2016-01-14  
目前只支持Java
adanz 写道
好東東,是否支持其它語言客戶端?

3 楼 adanz 2016-01-13  
好東東,是否支持其它語言客戶端?
2 楼 nepxion 2015-12-02  
感谢捧场,还在不断完善中
1 楼 tairan_0729 2015-12-01  
写的很好,正在拜读中

相关推荐

Global site tag (gtag.js) - Google Analytics