mirror of
https://gitee.com/dromara/RuoYi-Cloud-Plus.git
synced 2026-04-23 10:58:34 +08:00
update 将 nacos seata 的docker镜像替换为apache官方镜像 sky-agent插件改为由用户自行下载
This commit is contained in:
@@ -27,20 +27,23 @@ services:
|
||||
network_mode: "host"
|
||||
|
||||
nacos:
|
||||
image: ruoyi/ruoyi-nacos:2.5.3
|
||||
image: nacos/nacos-server:v3.1.1
|
||||
container_name: nacos
|
||||
ports:
|
||||
- "8848:8848"
|
||||
- "9848:9848"
|
||||
- "9849:9849"
|
||||
- "18080:18080"
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
JAVA_OPTS: "-Xms256m -Xmx512m"
|
||||
MODE: standalone
|
||||
PREFER_HOST_MODE: hostname
|
||||
JVM_XMS: 256m
|
||||
JVM_XMX: 512m
|
||||
volumes:
|
||||
# 日志目录 注意集群模式下 日志目录不能一致 需要区分例如 nacos1 nacos2
|
||||
- /docker/nacos/logs/:/root/nacos/logs
|
||||
# 集群配置文件 集群所有nacos都必须使用此文件
|
||||
- /docker/nacos/conf/cluster.conf:/root/nacos/conf/cluster.conf
|
||||
- /docker/nacos/logs/:/home/nacos/logs
|
||||
# 默认单机模式 使用 mysql 等按 nacos 文档在配置文件内修改 所有配置均可在配置文件修改
|
||||
- /docker/nacos/conf/application.properties:/home/nacos/conf/application.properties
|
||||
network_mode: "host"
|
||||
|
||||
redis:
|
||||
@@ -95,7 +98,7 @@ services:
|
||||
network_mode: "host"
|
||||
|
||||
seata-server:
|
||||
image: ruoyi/ruoyi-seata-server:2.5.3
|
||||
image: apache/seata-server:2.6.0
|
||||
container_name: seata-server
|
||||
ports:
|
||||
- "7091:7091"
|
||||
@@ -106,9 +109,8 @@ services:
|
||||
# SEATA_IP: 127.0.0.1
|
||||
SEATA_PORT: 8091
|
||||
volumes:
|
||||
- /docker/ruoyi-seata-server/logs/:/ruoyi/seata-server/logs
|
||||
# skywalking 探针
|
||||
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
|
||||
- /docker/seata/conf/application.yml:/seata-server/resources/application.yml
|
||||
- /docker/seata/logs/:/root/logs/seata
|
||||
privileged: true
|
||||
network_mode: "host"
|
||||
|
||||
|
||||
326
script/docker/nacos/conf/application.properties
Normal file
326
script/docker/nacos/conf/application.properties
Normal file
@@ -0,0 +1,326 @@
|
||||
#
|
||||
# Copyright 1999-2025 Alibaba Group Holding Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
#--------------- Nacos Common Configurations ---------------#
|
||||
|
||||
#*************** Nacos port Related Configurations ***************#
|
||||
### Nacos Server Main port
|
||||
nacos.server.main.port=8848
|
||||
|
||||
#*************** Network Related Configurations ***************#
|
||||
### If prefer hostname over ip for Nacos server addresses in cluster.conf:
|
||||
# nacos.inetutils.prefer-hostname-over-ip=false
|
||||
|
||||
### Specify local server's IP:
|
||||
# nacos.inetutils.ip-address=
|
||||
|
||||
#*************** Datasource Related Configurations ***************#
|
||||
### nacos.plugin.datasource.log.enabled=true
|
||||
#spring.sql.init.platform=mysql
|
||||
### Count of DB:
|
||||
# db.num=1
|
||||
|
||||
### Connect URL of DB:
|
||||
# db.url.0=jdbc:mysql://127.0.0.1:3306/nacos?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC
|
||||
# db.user=nacos
|
||||
# db.password=nacos
|
||||
|
||||
#*************** Metrics Related Configurations ***************#
|
||||
### Metrics for prometheus
|
||||
#management.endpoints.web.exposure.include=prometheus
|
||||
|
||||
### Metrics for elastic search
|
||||
management.metrics.export.elastic.enabled=false
|
||||
#management.metrics.export.elastic.host=http://localhost:9200
|
||||
|
||||
### Metrics for influx
|
||||
management.metrics.export.influx.enabled=false
|
||||
#management.metrics.export.influx.db=springboot
|
||||
#management.metrics.export.influx.uri=http://localhost:8086
|
||||
#management.metrics.export.influx.auto-create-db=true
|
||||
#management.metrics.export.influx.consistency=one
|
||||
#management.metrics.export.influx.compressed=true
|
||||
|
||||
#*************** Console Related Configurations ***************#
|
||||
|
||||
### CORS (Cross-Origin Resource Sharing) configurations for console
|
||||
### Whether to allow credentials (cookies, authorization headers, TLS client certificates)
|
||||
# nacos.console.cors.allow-credentials=true
|
||||
|
||||
### Allowed headers, comma separated. Empty means allow all headers (*)
|
||||
# nacos.console.cors.allowed-headers=
|
||||
|
||||
### Maximum age (in seconds) of the CORS preflight request cache
|
||||
# nacos.console.cors.max-age=18000
|
||||
|
||||
### Allowed HTTP methods, comma separated. Empty means allow all methods (*)
|
||||
# nacos.console.cors.allowed-methods=
|
||||
|
||||
### Allowed origins, comma separated. Empty means allow all origin patterns (*)
|
||||
### Example: nacos.console.cors.allowed-origins=http://localhost:8080,https://example.com
|
||||
# nacos.console.cors.allowed-origins=
|
||||
|
||||
#*************** Core Related Configurations ***************#
|
||||
|
||||
### set the WorkerID manually
|
||||
# nacos.core.snowflake.worker-id=
|
||||
|
||||
### Member-MetaData
|
||||
# nacos.core.member.meta.site=
|
||||
# nacos.core.member.meta.adweight=
|
||||
# nacos.core.member.meta.weight=
|
||||
|
||||
### MemberLookup
|
||||
### Addressing pattern category, If set, the priority is highest
|
||||
# nacos.core.member.lookup.type=[file,address-server]
|
||||
|
||||
## Set the cluster list with a configuration file or command-line argument
|
||||
# nacos.member.list=192.168.16.101:8847?raft_port=8807,192.168.16.101?raft_port=8808,192.168.16.101:8849?raft_port=8809
|
||||
|
||||
## for AddressServerMemberLookup
|
||||
# Maximum number of retries to query the address server upon initialization
|
||||
# nacos.core.address-server.retry=5
|
||||
## Server domain name address of [address-server] mode
|
||||
# address.server.domain=jmenv.tbsite.net
|
||||
## Server port of [address-server] mode
|
||||
#address.server.port=8080
|
||||
## Request address of [address-server] mode
|
||||
# address.server.url=/nacos/serverlist
|
||||
|
||||
#*************** JRaft Related Configurations ***************#
|
||||
|
||||
### Sets the Raft cluster election timeout, default value is 5 second
|
||||
# nacos.core.protocol.raft.data.election_timeout_ms=5000
|
||||
### Sets the amount of time the Raft snapshot will execute periodically, default is 30 minute
|
||||
# nacos.core.protocol.raft.data.snapshot_interval_secs=30
|
||||
### raft internal worker threads
|
||||
# nacos.core.protocol.raft.data.core_thread_num=8
|
||||
### Number of threads required for raft business request processing
|
||||
# nacos.core.protocol.raft.data.cli_service_thread_num=4
|
||||
### raft linear read strategy. Safe linear reads are used by default, that is, the Leader tenure is confirmed by heartbeat
|
||||
# nacos.core.protocol.raft.data.read_index_type=ReadOnlySafe
|
||||
### rpc request timeout, default 5 seconds
|
||||
# nacos.core.protocol.raft.data.rpc_request_timeout_ms=5000
|
||||
### enable to support prometheus service discovery
|
||||
#nacos.prometheus.metrics.enabled=true
|
||||
|
||||
#*************** Distro Related Configurations ***************#
|
||||
|
||||
### Distro data sync delay time, when sync task delayed, task will be merged for same data key. Default 1 second.
|
||||
# nacos.core.protocol.distro.data.sync.delayMs=1000
|
||||
### Distro data sync timeout for one sync data, default 3 seconds.
|
||||
# nacos.core.protocol.distro.data.sync.timeoutMs=3000
|
||||
### Distro data sync retry delay time when sync data failed or timeout, same behavior with delayMs, default 3 seconds.
|
||||
# nacos.core.protocol.distro.data.sync.retryDelayMs=3000
|
||||
### Distro data verify interval time, verify synced data whether expired for a interval. Default 5 seconds.
|
||||
# nacos.core.protocol.distro.data.verify.intervalMs=5000
|
||||
### Distro data verify timeout for one verify, default 3 seconds.
|
||||
# nacos.core.protocol.distro.data.verify.timeoutMs=3000
|
||||
### Distro data load retry delay when load snapshot data failed, default 30 seconds.
|
||||
# nacos.core.protocol.distro.data.load.retryDelayMs=30000
|
||||
### enable to support prometheus service discovery
|
||||
#nacos.prometheus.metrics.enabled=true
|
||||
|
||||
#*************** Grpc Configurations ***************#
|
||||
|
||||
### Sets the maximum message size allowed to be received on the server.
|
||||
#nacos.remote.server.grpc.sdk.max-inbound-message-size=10485760
|
||||
### Sets the time(milliseconds) without read activity before sending a keepalive ping. The typical default is two hours.
|
||||
#nacos.remote.server.grpc.sdk.keep-alive-time=7200000
|
||||
### Sets a time(milliseconds) waiting for read activity after sending a keepalive ping. Defaults to 20 seconds.
|
||||
#nacos.remote.server.grpc.sdk.keep-alive-timeout=20000
|
||||
### Sets a time(milliseconds) that specify the most aggressive keep-alive time clients are permitted to configure. The typical default is 5 minutes
|
||||
#nacos.remote.server.grpc.sdk.permit-keep-alive-time=300000
|
||||
### cluster grpc(inside the nacos server) configuration
|
||||
#nacos.remote.server.grpc.cluster.max-inbound-message-size=10485760
|
||||
### Sets the time(milliseconds) without read activity before sending a keepalive ping. The typical default is two hours.
|
||||
#nacos.remote.server.grpc.cluster.keep-alive-time=7200000
|
||||
### Sets a time(milliseconds) waiting for read activity after sending a keepalive ping. Defaults to 20 seconds.
|
||||
#nacos.remote.server.grpc.cluster.keep-alive-timeout=20000
|
||||
### Sets a time(milliseconds) that specify the most aggressive keep-alive time clients are permitted to configure. The typical default is 5 minutes
|
||||
#nacos.remote.server.grpc.cluster.permit-keep-alive-time=300000
|
||||
|
||||
#*************** Config Module Related Configurations ***************#
|
||||
|
||||
### the maximum retry times for push
|
||||
nacos.config.push.maxRetryTime=50
|
||||
|
||||
#*************** Naming Module Related Configurations ***************#
|
||||
### Data dispatch task execution period in milliseconds:
|
||||
|
||||
### If enable data warmup. If set to false, the server would accept request without local data preparation:
|
||||
# nacos.naming.data.warmup=true
|
||||
|
||||
### If enable the instance auto expiration, kind like of health check of instance:
|
||||
# nacos.naming.expireInstance=true
|
||||
|
||||
nacos.naming.empty-service.auto-clean=true
|
||||
nacos.naming.empty-service.clean.initial-delay-ms=50000
|
||||
nacos.naming.empty-service.clean.period-time-ms=30000
|
||||
|
||||
|
||||
#*************** AI Module Related Configurations ***************#
|
||||
|
||||
### Whether nacos ai module is enabled, default true. the ai module need both config module and naming module enabled.
|
||||
# nacos.extension.ai.enabled=true
|
||||
|
||||
### Whether nacos mcp registry is enabled, default is false.
|
||||
### When enabled=true, Nacos will start a `mcp registry` server with new port with `nacos.ai.mcp.registry.port`
|
||||
#nacos.ai.mcp.registry.enabled=false
|
||||
|
||||
### Nacos mcp registry port, default 9080:
|
||||
nacos.ai.mcp.registry.port=9080
|
||||
|
||||
#--------------- Nacos Web Server Configurations ---------------#
|
||||
|
||||
#*************** Nacos Web Server Related Configurations ***************#
|
||||
### Nacos Server Web context path:
|
||||
nacos.server.contextPath=/nacos
|
||||
|
||||
#*************** Access Log Related Configurations ***************#
|
||||
### If turn on the access log:
|
||||
server.tomcat.accesslog.enabled=false
|
||||
|
||||
### accesslog automatic cleaning time
|
||||
server.tomcat.accesslog.max-days=30
|
||||
|
||||
### The access log pattern:
|
||||
server.tomcat.accesslog.pattern=%h %l %u %t "%r" %s %b %D %{User-Agent}i %{Request-Source}i
|
||||
|
||||
### The directory of access log:
|
||||
server.tomcat.basedir=file:.
|
||||
|
||||
#*************** API Related Configurations ***************#
|
||||
### Include message field
|
||||
server.error.include-message=ALWAYS
|
||||
|
||||
### Enabled for open API compatibility
|
||||
# nacos.core.api.compatibility.client.enabled=true
|
||||
### Enabled for admin API compatibility
|
||||
# nacos.core.api.compatibility.admin.enabled=false
|
||||
### Enabled for console API compatibility
|
||||
# nacos.core.api.compatibility.console.enabled=false
|
||||
|
||||
#--------------- Nacos Console Configurations ---------------#
|
||||
|
||||
#*************** Nacos Console Related Configurations ***************#
|
||||
### Nacos Console Main port
|
||||
nacos.console.port=18080
|
||||
### Nacos Server Web context path:
|
||||
nacos.console.contextPath=
|
||||
|
||||
### Nacos Server context path, which link to nacos server `nacos.server.contextPath`, works when deployment type is `console`
|
||||
nacos.console.remote.server.context-path=/nacos
|
||||
|
||||
#************** Console UI Configuration ***************#
|
||||
|
||||
### Turn on/off the nacos console ui.
|
||||
#nacos.console.ui.enabled=true
|
||||
|
||||
#--------------- Nacos Plugin Configurations ---------------#
|
||||
|
||||
#*************** CMDB Plugin Related Configurations ***************#
|
||||
### The interval to dump external CMDB in seconds:
|
||||
# nacos.cmdb.dumpTaskInterval=3600
|
||||
|
||||
### The interval of polling data change event in seconds:
|
||||
# nacos.cmdb.eventTaskInterval=10
|
||||
|
||||
### The interval of loading labels in seconds:
|
||||
# nacos.cmdb.labelTaskInterval=300
|
||||
|
||||
### If turn on data loading task:
|
||||
# nacos.cmdb.loadDataAtStart=false
|
||||
|
||||
#*************** Auth Plugin Related Configurations ***************#
|
||||
### The ignore urls of auth, will be deprecated in the future:
|
||||
nacos.security.ignore.urls=/,/error,/**/*.css,/**/*.js,/**/*.html,/**/*.map,/**/*.svg,/**/*.png,/**/*.ico,/console-ui/public/**,/v1/auth/**,/v1/console/health/**,/actuator/**,/v1/console/server/**
|
||||
|
||||
### The auth system to use, default 'nacos' and 'ldap' is supported, other type should be implemented by yourself:
|
||||
nacos.core.auth.system.type=nacos
|
||||
|
||||
### If turn on auth system:
|
||||
# Whether open nacos server API auth system
|
||||
nacos.core.auth.enabled=false
|
||||
# Whether open nacos admin API auth system
|
||||
nacos.core.auth.admin.enabled=true
|
||||
# Whether open nacos console API auth system
|
||||
nacos.core.auth.console.enabled=true
|
||||
|
||||
### Turn on/off caching of auth information. By turning on this switch, the update of auth information would have a 15 seconds delay.
|
||||
nacos.core.auth.caching.enabled=true
|
||||
|
||||
### worked when nacos.core.auth.enabled=true
|
||||
### The two properties is the white list for auth and used by identity the request from other server.
|
||||
nacos.core.auth.server.identity.key=ruoyi-vue-plus-key
|
||||
nacos.core.auth.server.identity.value=ruoyi-vue-plus-value
|
||||
|
||||
### worked when nacos.core.auth.system.type=nacos or nacos.core.auth.console.enabled=true
|
||||
### The token expiration in seconds:
|
||||
nacos.core.auth.plugin.nacos.token.cache.enable=false
|
||||
nacos.core.auth.plugin.nacos.token.expire.seconds=18000
|
||||
### The default token (Base64 string):
|
||||
#nacos.core.auth.plugin.nacos.token.secret.key=VGhpc0lzTXlDdXN0b21TZWNyZXRLZXkwMTIzNDU2Nzg=
|
||||
nacos.core.auth.plugin.nacos.token.secret.key=rE7bYayhpvduYwCxuhckybEPDXyna6xwm5m7MZjtjrdXjVxAbXAMccXHyaJvB346
|
||||
|
||||
### worked when nacos.core.auth.system.type=ldap,{0} is Placeholder,replace login username
|
||||
#nacos.core.auth.ldap.url=ldap://localhost:389
|
||||
#nacos.core.auth.ldap.basedc=dc=example,dc=org
|
||||
#nacos.core.auth.ldap.userDn=cn=admin,${nacos.core.auth.ldap.basedc}
|
||||
#nacos.core.auth.ldap.password=admin
|
||||
#nacos.core.auth.ldap.userdn=cn={0},dc=example,dc=org
|
||||
#nacos.core.auth.ldap.filter.prefix=uid
|
||||
#nacos.core.auth.ldap.case.sensitive=true
|
||||
#nacos.core.auth.ldap.ignore.partial.result.exception=false
|
||||
|
||||
#*************** Control Plugin Related Configurations ***************#
|
||||
# plugin type
|
||||
#nacos.plugin.control.manager.type=nacos
|
||||
|
||||
# local control rule storage dir, default ${nacos.home}/data/connection and ${nacos.home}/data/tps
|
||||
#nacos.plugin.control.rule.local.basedir=${nacos.home}
|
||||
|
||||
# external control rule storage type, if exist
|
||||
#nacos.plugin.control.rule.external.storage=
|
||||
|
||||
#*************** Config Change Plugin Related Configurations ***************#
|
||||
# webhook
|
||||
#nacos.core.config.plugin.webhook.enabled=false
|
||||
# It is recommended to use EB https://help.aliyun.com/document_detail/413974.html
|
||||
#nacos.core.config.plugin.webhook.url=http://localhost:8080/webhook/send?token=***
|
||||
# The content push max capacity ,byte
|
||||
#nacos.core.config.plugin.webhook.contentMaxCapacity=102400
|
||||
|
||||
# whitelist
|
||||
#nacos.core.config.plugin.whitelist.enabled=false
|
||||
# The import file suffixs
|
||||
#nacos.core.config.plugin.whitelist.suffixs=xml,text,properties,yaml,html
|
||||
# fileformatcheck,which validate the import file of type and content
|
||||
#nacos.core.config.plugin.fileformatcheck.enabled=false
|
||||
|
||||
#*************** Istio Plugin Related Configurations ***************#
|
||||
### If turn on the MCP server:
|
||||
nacos.istio.mcp.server.enabled=false
|
||||
|
||||
#--------------- Nacos Experimental Features Configurations ---------------#
|
||||
|
||||
#*************** K8s Related Configurations ***************#
|
||||
### If turn on the K8s sync:
|
||||
nacos.k8s.sync.enabled=false
|
||||
|
||||
### If use the Java API from an application outside a kubernetes cluster
|
||||
#nacos.k8s.sync.outsideCluster=false
|
||||
#nacos.k8s.sync.kubeConfig=/.kube/config
|
||||
@@ -1,4 +0,0 @@
|
||||
# 集群配置文件 ip+端口
|
||||
127.0.0.1:8848
|
||||
127.0.0.1:8848
|
||||
127.0.0.1:8848
|
||||
189
script/docker/seata/conf/application.yml
Normal file
189
script/docker/seata/conf/application.yml
Normal file
@@ -0,0 +1,189 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
server:
|
||||
port: 8091
|
||||
spring:
|
||||
application:
|
||||
name: seata-server
|
||||
main:
|
||||
web-application-type: none
|
||||
logging:
|
||||
config: classpath:logback-spring.xml
|
||||
file:
|
||||
path: ${log.home:${user.home}/logs/seata}
|
||||
extend:
|
||||
logstash-appender:
|
||||
# off by default
|
||||
enabled: false
|
||||
destination: 127.0.0.1:4560
|
||||
kafka-appender:
|
||||
# off by default
|
||||
enabled: false
|
||||
bootstrap-servers: 127.0.0.1:9092
|
||||
topic: logback_to_logstash
|
||||
producer:
|
||||
acks: 0
|
||||
linger-ms: 1000
|
||||
max-block-ms: 0
|
||||
metric-appender:
|
||||
# off by default
|
||||
enabled: false
|
||||
seata:
|
||||
config:
|
||||
# support: nacos 、 consul 、 apollo 、 zk 、 etcd3
|
||||
type: file
|
||||
nacos:
|
||||
server-addr: 127.0.0.1:8848
|
||||
namespace: public
|
||||
group: DEFAULT_GROUP
|
||||
context-path:
|
||||
##1.The following configuration is for the open source version of Nacos
|
||||
username: nacos
|
||||
password: nacos
|
||||
##2.The following configuration is for the MSE Nacos on aliyun
|
||||
#access-key:
|
||||
#secret-key:
|
||||
##3.The following configuration is used to deploy on Aliyun ECS or ACK without authentication
|
||||
#ram-role-name:
|
||||
data-id: seata-server.properties
|
||||
registry:
|
||||
# support: nacos 、 eureka 、 redis 、 zk 、 consul 、 etcd3 、 sofa 、 seata
|
||||
type: file
|
||||
# preferred-networks: 30.240.*
|
||||
metadata:
|
||||
weight: 100
|
||||
nacos:
|
||||
application: seata-server
|
||||
server-addr: 127.0.0.1:8848
|
||||
group: DEFAULT_GROUP
|
||||
namespace: public
|
||||
cluster: default
|
||||
context-path:
|
||||
##1.The following configuration is for the open source version of Nacos
|
||||
username: nacos
|
||||
password: nacos
|
||||
##2.The following configuration is for the MSE Nacos on aliyun
|
||||
#access-key:
|
||||
#secret-key:
|
||||
##3.The following configuration is used to deploy on Aliyun ECS or ACK without authentication
|
||||
#ram-role-name:
|
||||
|
||||
server:
|
||||
service-port: 8091 # If not configured, the default is '${server.port}'
|
||||
max-commit-retry-timeout: -1
|
||||
max-rollback-retry-timeout: -1
|
||||
rollback-failed-unlock-enable: false
|
||||
enable-check-auth: true
|
||||
enable-parallel-request-handle: true
|
||||
enable-parallel-handle-branch: false
|
||||
retry-dead-threshold: 70000
|
||||
xaer-nota-retry-timeout: 60000
|
||||
enableParallelRequestHandle: true
|
||||
applicationDataLimitCheck: true
|
||||
applicationDataLimit: 64000
|
||||
recovery:
|
||||
committing-retry-period: 1000
|
||||
async-committing-retry-period: 1000
|
||||
rollbacking-retry-period: 1000
|
||||
end-status-retry-period: 1000
|
||||
timeout-retry-period: 1000
|
||||
undo:
|
||||
log-save-days: 7
|
||||
log-delete-period: 86400000
|
||||
session:
|
||||
branch-async-queue-size: 5000 #branch async remove queue size
|
||||
enable-branch-async-remove: false #enable to asynchronous remove branchSession
|
||||
ratelimit:
|
||||
enable: false
|
||||
bucketTokenNumPerSecond: 999999
|
||||
bucketTokenMaxNum: 999999
|
||||
bucketTokenInitialNum: 999999
|
||||
http:
|
||||
filter:
|
||||
xss:
|
||||
keywords: ["<script>", "</script>", "javascript:", "vbscript:"]
|
||||
store:
|
||||
# support: file 、 db 、 redis 、 raft
|
||||
mode: file
|
||||
session:
|
||||
mode: file
|
||||
lock:
|
||||
mode: file
|
||||
file:
|
||||
dir: sessionStore
|
||||
max-branch-session-size: 16384
|
||||
max-global-session-size: 512
|
||||
file-write-buffer-cache-size: 16384
|
||||
session-reload-read-size: 100
|
||||
flush-disk-mode: async
|
||||
db:
|
||||
datasource: hikari
|
||||
db-type: mysql
|
||||
driver-class-name: com.mysql.jdbc.Driver
|
||||
url: jdbc:mysql://127.0.0.1:3306/seata?rewriteBatchedStatements=true
|
||||
user: root
|
||||
password: root
|
||||
min-conn: 10
|
||||
max-conn: 100
|
||||
global-table: global_table
|
||||
branch-table: branch_table
|
||||
lock-table: lock_table
|
||||
distributed-lock-table: distributed_lock
|
||||
vgroup-table: vgroup_table
|
||||
query-limit: 1000
|
||||
max-wait: 5000
|
||||
hikari:
|
||||
idle-timeout: 600000
|
||||
keepalive-time: 120000
|
||||
max-lifetime: 1800000
|
||||
validation-timeout: 5000
|
||||
redis:
|
||||
mode: single
|
||||
# support: lua 、 pipeline
|
||||
type: lua
|
||||
database: 0
|
||||
min-conn: 10
|
||||
max-conn: 100
|
||||
password:
|
||||
max-total: 100
|
||||
query-limit: 1000
|
||||
single:
|
||||
host: 127.0.0.1
|
||||
port: 6379
|
||||
sentinel:
|
||||
master-name:
|
||||
sentinel-hosts:
|
||||
sentinel-password:
|
||||
metrics:
|
||||
enabled: false
|
||||
registry-type: compact
|
||||
exporter-list: prometheus
|
||||
exporter-prometheus-port: 9898
|
||||
transport:
|
||||
rpc-tc-request-timeout: 15000
|
||||
enable-tc-server-batch-send-response: false
|
||||
# HTTP thread pool
|
||||
min-http-pool-size: 10
|
||||
max-http-pool-size: 100
|
||||
max-http-task-queue-size: 1000
|
||||
http-pool-keep-alive-time: 500
|
||||
shutdown:
|
||||
wait: 3
|
||||
thread-factory:
|
||||
boss-thread-prefix: NettyBoss
|
||||
worker-thread-prefix: NettyServerNIOWorker
|
||||
boss-thread-size: 1
|
||||
@@ -1,14 +0,0 @@
|
||||
# 说明
|
||||
项目使用 `skywalking` 官方 `agent` 探针做了精简和扩充
|
||||
<br>
|
||||
从官方自带的插件库 删除了项目中大概率不会用到的插件
|
||||
<br>
|
||||
保留了项目中可能会用到的插件
|
||||
<br>
|
||||
扩展了一些官方不支持的插件
|
||||
<br>
|
||||
插件过多会导致很严重的性能问题 建议不要用过多插件
|
||||
# 扩展
|
||||
项目开发中遇到一些插件包内没有的功能可以去 `skywalking` 官方下载
|
||||
<br>
|
||||
将下载好的插件放入 `plugins` 目录下即可
|
||||
@@ -1,234 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
=======================================================================
|
||||
Apache SkyWalking Subcomponents:
|
||||
|
||||
The Apache SkyWalking project contains subcomponents with separate copyright
|
||||
notices and license terms. Your use of the source code for the these
|
||||
subcomponents is subject to the terms and conditions of the following
|
||||
licenses.
|
||||
|
||||
========================================================================
|
||||
Apache 2.0 licenses
|
||||
========================================================================
|
||||
|
||||
The following components are provided under the Apache License. See project link for details.
|
||||
The text of each license is the standard Apache 2.0 license.
|
||||
|
||||
raphw (byte-buddy) 1.17.6: http://bytebuddy.net/ , Apache 2.0
|
||||
Google: grpc-java 1.74.0: https://github.com/grpc/grpc-java, Apache 2.0
|
||||
Google: gson 2.13.1: https://github.com/google/gson , Apache 2.0
|
||||
Google: proto-google-common-protos 2.59.2: https://github.com/googleapis/googleapis , Apache 2.0
|
||||
Google: jsr305 3.0.2: http://central.maven.org/maven2/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.pom , Apache 2.0
|
||||
Google: guava 32.1.3: https://github.com/google/guava , Apache 2.0
|
||||
netty 4.1.124: https://github.com/netty/netty/blob/4.1/LICENSE.txt, Apache 2.0
|
||||
async-profiler 3.0: https://github.com/async-profiler/async-profiler/blob/v3.0/LICENSE, Apache 2.0
|
||||
|
||||
========================================================================
|
||||
BSD licenses
|
||||
========================================================================
|
||||
|
||||
The following components are provided under a BSD license. See project link for details.
|
||||
The text of each license is also included at licenses/LICENSE-[project].txt.
|
||||
|
||||
asm 9.2:https://gitlab.ow2.org , BSD-3-Clause
|
||||
@@ -1,299 +0,0 @@
|
||||
Apache SkyWalking
|
||||
Copyright 2017-2024 The Apache Software Foundation
|
||||
|
||||
This product includes software developed at
|
||||
The Apache Software Foundation (http://www.apache.org/).
|
||||
|
||||
========================================================================
|
||||
|
||||
grpc-java NOTICE
|
||||
|
||||
========================================================================
|
||||
Copyright 2014, gRPC Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
-----------------------------------------------------------------------
|
||||
|
||||
This product contains a modified portion of 'OkHttp', an open source
|
||||
HTTP & SPDY client for Android and Java applications, which can be obtained
|
||||
at:
|
||||
|
||||
* LICENSE:
|
||||
* okhttp/third_party/okhttp/LICENSE (Apache License 2.0)
|
||||
* HOMEPAGE:
|
||||
* https://github.com/square/okhttp
|
||||
* LOCATION_IN_GRPC:
|
||||
* okhttp/third_party/okhttp
|
||||
|
||||
This product contains a modified portion of 'Netty', an open source
|
||||
networking library, which can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* netty/third_party/netty/LICENSE.txt (Apache License 2.0)
|
||||
* HOMEPAGE:
|
||||
* https://netty.io
|
||||
* LOCATION_IN_GRPC:
|
||||
* netty/third_party/netty
|
||||
|
||||
========================================================================
|
||||
|
||||
grpc NOTICE
|
||||
|
||||
========================================================================
|
||||
|
||||
Copyright 2014 gRPC authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
|
||||
========================================================================
|
||||
|
||||
netty NOTICE
|
||||
|
||||
========================================================================
|
||||
|
||||
|
||||
The Netty Project
|
||||
=================
|
||||
|
||||
Please visit the Netty web site for more information:
|
||||
|
||||
* http://netty.io/
|
||||
|
||||
Copyright 2014 The Netty Project
|
||||
|
||||
The Netty Project licenses this file to you under the Apache License,
|
||||
version 2.0 (the "License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at:
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
Also, please refer to each LICENSE.<component>.txt file, which is located in
|
||||
the 'license' directory of the distribution file, for the license terms of the
|
||||
components that this product depends on.
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
This product contains the extensions to Java Collections Framework which has
|
||||
been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.jsr166y.txt (Public Domain)
|
||||
* HOMEPAGE:
|
||||
* http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/
|
||||
* http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/
|
||||
|
||||
This product contains a modified version of Robert Harder's Public Domain
|
||||
Base64 Encoder and Decoder, which can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.base64.txt (Public Domain)
|
||||
* HOMEPAGE:
|
||||
* http://iharder.sourceforge.net/current/java/base64/
|
||||
|
||||
This product contains a modified portion of 'Webbit', an event based
|
||||
WebSocket and HTTP server, which can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.webbit.txt (BSD License)
|
||||
* HOMEPAGE:
|
||||
* https://github.com/joewalnes/webbit
|
||||
|
||||
This product contains a modified portion of 'SLF4J', a simple logging
|
||||
facade for Java, which can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.slf4j.txt (MIT License)
|
||||
* HOMEPAGE:
|
||||
* http://www.slf4j.org/
|
||||
|
||||
This product contains a modified portion of 'Apache Harmony', an open source
|
||||
Java SE, which can be obtained at:
|
||||
|
||||
* NOTICE:
|
||||
* license/NOTICE.harmony.txt
|
||||
* LICENSE:
|
||||
* license/LICENSE.harmony.txt (Apache License 2.0)
|
||||
* HOMEPAGE:
|
||||
* http://archive.apache.org/dist/harmony/
|
||||
|
||||
This product contains a modified portion of 'jbzip2', a Java bzip2 compression
|
||||
and decompression library written by Matthew J. Francis. It can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.jbzip2.txt (MIT License)
|
||||
* HOMEPAGE:
|
||||
* https://code.google.com/p/jbzip2/
|
||||
|
||||
This product contains a modified portion of 'libdivsufsort', a C API library to construct
|
||||
the suffix array and the Burrows-Wheeler transformed string for any input string of
|
||||
a constant-size alphabet written by Yuta Mori. It can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.libdivsufsort.txt (MIT License)
|
||||
* HOMEPAGE:
|
||||
* https://github.com/y-256/libdivsufsort
|
||||
|
||||
This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM,
|
||||
which can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.jctools.txt (ASL2 License)
|
||||
* HOMEPAGE:
|
||||
* https://github.com/JCTools/JCTools
|
||||
|
||||
This product optionally depends on 'JZlib', a re-implementation of zlib in
|
||||
pure Java, which can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.jzlib.txt (BSD style License)
|
||||
* HOMEPAGE:
|
||||
* http://www.jcraft.com/jzlib/
|
||||
|
||||
This product optionally depends on 'Compress-LZF', a Java library for encoding and
|
||||
decoding data in LZF format, written by Tatu Saloranta. It can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.compress-lzf.txt (Apache License 2.0)
|
||||
* HOMEPAGE:
|
||||
* https://github.com/ning/compress
|
||||
|
||||
This product optionally depends on 'lz4', a LZ4 Java compression
|
||||
and decompression library written by Adrien Grand. It can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.lz4.txt (Apache License 2.0)
|
||||
* HOMEPAGE:
|
||||
* https://github.com/jpountz/lz4-java
|
||||
|
||||
This product optionally depends on 'lzma-java', a LZMA Java compression
|
||||
and decompression library, which can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.lzma-java.txt (Apache License 2.0)
|
||||
* HOMEPAGE:
|
||||
* https://github.com/jponge/lzma-java
|
||||
|
||||
This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression
|
||||
and decompression library written by William Kinney. It can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.jfastlz.txt (MIT License)
|
||||
* HOMEPAGE:
|
||||
* https://code.google.com/p/jfastlz/
|
||||
|
||||
This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data
|
||||
interchange format, which can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.protobuf.txt (New BSD License)
|
||||
* HOMEPAGE:
|
||||
* https://github.com/google/protobuf
|
||||
|
||||
This product optionally depends on 'Bouncy Castle Crypto APIs' to generate
|
||||
a temporary self-signed X.509 certificate when the JVM does not provide the
|
||||
equivalent functionality. It can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.bouncycastle.txt (MIT License)
|
||||
* HOMEPAGE:
|
||||
* http://www.bouncycastle.org/
|
||||
|
||||
This product optionally depends on 'Snappy', a compression library produced
|
||||
by Google Inc, which can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.snappy.txt (New BSD License)
|
||||
* HOMEPAGE:
|
||||
* https://github.com/google/snappy
|
||||
|
||||
This product optionally depends on 'JBoss Marshalling', an alternative Java
|
||||
serialization API, which can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.jboss-marshalling.txt (GNU LGPL 2.1)
|
||||
* HOMEPAGE:
|
||||
* http://www.jboss.org/jbossmarshalling
|
||||
|
||||
This product optionally depends on 'Caliper', Google's micro-
|
||||
benchmarking framework, which can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.caliper.txt (Apache License 2.0)
|
||||
* HOMEPAGE:
|
||||
* https://github.com/google/caliper
|
||||
|
||||
This product optionally depends on 'Apache Commons Logging', a logging
|
||||
framework, which can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.commons-logging.txt (Apache License 2.0)
|
||||
* HOMEPAGE:
|
||||
* http://commons.apache.org/logging/
|
||||
|
||||
This product optionally depends on 'Apache Log4J', a logging framework, which
|
||||
can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.log4j.txt (Apache License 2.0)
|
||||
* HOMEPAGE:
|
||||
* http://logging.apache.org/log4j/
|
||||
|
||||
This product optionally depends on 'Aalto XML', an ultra-high performance
|
||||
non-blocking XML processor, which can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.aalto-xml.txt (Apache License 2.0)
|
||||
* HOMEPAGE:
|
||||
* http://wiki.fasterxml.com/AaltoHome
|
||||
|
||||
This product contains a modified version of 'HPACK', a Java implementation of
|
||||
the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.hpack.txt (Apache License 2.0)
|
||||
* HOMEPAGE:
|
||||
* https://github.com/twitter/hpack
|
||||
|
||||
This product contains a modified portion of 'Apache Commons Lang', a Java library
|
||||
provides utilities for the java.lang API, which can be obtained at:
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.commons-lang.txt (Apache License 2.0)
|
||||
* HOMEPAGE:
|
||||
* https://commons.apache.org/proper/commons-lang/
|
||||
|
||||
|
||||
This product contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build.
|
||||
|
||||
* LICENSE:
|
||||
* license/LICENSE.mvn-wrapper.txt (Apache License 2.0)
|
||||
* HOMEPAGE:
|
||||
* https://github.com/takari/maven-wrapper
|
||||
|
||||
3
script/docker/skywalking/agent/README.md
Normal file
3
script/docker/skywalking/agent/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# 说明
|
||||
自行下载 `skywalking` 官方 `agent` 将内容解压到当前文件夹下(注意解压不要带其他文件夹包裹 应直接把具体的agent程序解压到文件夹下)
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,344 +0,0 @@
|
||||
collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800}
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# The service name in UI
|
||||
# ${service name} = [${group name}::]${logic name}
|
||||
# The group name is optional only.
|
||||
agent.service_name=${SW_AGENT_NAME:Your_ApplicationName}
|
||||
agent.service_name#length=${SW_AGENT_NAME_MAX_LENGTH:50}
|
||||
|
||||
# The agent namespace
|
||||
agent.namespace=${SW_AGENT_NAMESPACE:}
|
||||
|
||||
# The agent cluster
|
||||
agent.cluster=${SW_AGENT_CLUSTER:}
|
||||
|
||||
# The number of sampled traces per 3 seconds
|
||||
# Negative or zero means off, by default
|
||||
agent.sample_n_per_3_secs=${SW_AGENT_SAMPLE:-1}
|
||||
|
||||
# Authentication active is based on backend setting, see application.yml for more details.
|
||||
agent.authentication=${SW_AGENT_AUTHENTICATION:}
|
||||
|
||||
# The max number of TraceSegmentRef in a single span to keep memory cost estimatable.
|
||||
agent.trace_segment_ref_limit_per_span=${SW_TRACE_SEGMENT_LIMIT:500}
|
||||
|
||||
# The max number of logs in a single span to keep memory cost estimatable.
|
||||
agent.log_limit_per_span=${SW_LOG_LIMIT_PER_SPAN:500}
|
||||
|
||||
# The max amount of spans in a single segment.
|
||||
# Through this config item, SkyWalking keep your application memory cost estimated.
|
||||
agent.span_limit_per_segment=${SW_AGENT_SPAN_LIMIT:300}
|
||||
|
||||
# If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by `,`.
|
||||
agent.ignore_suffix=${SW_AGENT_IGNORE_SUFFIX:.jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg}
|
||||
|
||||
# If true, SkyWalking agent will save all instrumented classes files in `/debugging` folder.
|
||||
# SkyWalking team may ask for these files in order to resolve compatible problem.
|
||||
agent.is_open_debugging_class=${SW_AGENT_OPEN_DEBUG:false}
|
||||
|
||||
# Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will
|
||||
# generate an 32-bit uuid. BY Default, SkyWalking uses UUID@hostname as the instance name. Max length is 50(UTF-8 char)
|
||||
agent.instance_name=${SW_AGENT_INSTANCE_NAME:}
|
||||
agent.instance_name#length=${SW_AGENT_INSTANCE_NAME_MAX_LENGTH:50}
|
||||
|
||||
# service instance properties in json format. e.g. agent.instance_properties_json = {"org": "apache-skywalking"}
|
||||
agent.instance_properties_json=${SW_INSTANCE_PROPERTIES_JSON:}
|
||||
|
||||
# How depth the agent goes, when log all cause exceptions.
|
||||
agent.cause_exception_depth=${SW_AGENT_CAUSE_EXCEPTION_DEPTH:5}
|
||||
|
||||
# Force reconnection period of grpc, based on grpc_channel_check_interval.
|
||||
agent.force_reconnection_period=${SW_AGENT_FORCE_RECONNECTION_PERIOD:1}
|
||||
|
||||
# The operationName max length
|
||||
# Notice, in the current practice, we don't recommend the length over 190.
|
||||
agent.operation_name_threshold=${SW_AGENT_OPERATION_NAME_THRESHOLD:150}
|
||||
|
||||
# sky服务端不可用也保持跟踪
|
||||
# Keep tracing even the backend is not available if this value is true.
|
||||
agent.keep_tracing=${SW_AGENT_KEEP_TRACING:true}
|
||||
|
||||
# The agent use gRPC plain text in default.
|
||||
# If true, SkyWalking agent uses TLS even no CA file detected.
|
||||
agent.force_tls=${SW_AGENT_FORCE_TLS:false}
|
||||
|
||||
# gRPC SSL trusted ca file.
|
||||
agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt}
|
||||
|
||||
# enable mTLS when ssl_key_path and ssl_cert_chain_path exist.
|
||||
agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:}
|
||||
|
||||
agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:}
|
||||
|
||||
# Enable the agent kernel services and instrumentation.
|
||||
agent.enable=${SW_AGENT_ENABLE:true}
|
||||
|
||||
# Limit the length of the ipv4 list size.
|
||||
osinfo.ipv4_list_size=${SW_AGENT_OSINFO_IPV4_LIST_SIZE:10}
|
||||
|
||||
# grpc channel status check interval.
|
||||
collector.grpc_channel_check_interval=${SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL:30}
|
||||
# Agent heartbeat report period. Unit, second.
|
||||
collector.heartbeat_period=${SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD:30}
|
||||
# The agent sends the instance properties to the backend every
|
||||
# collector.heartbeat_period * collector.properties_report_period_factor seconds
|
||||
collector.properties_report_period_factor=${SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR:10}
|
||||
# Backend service addresses.
|
||||
# collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800}
|
||||
# How long grpc client will timeout in sending data to upstream. Unit is second.
|
||||
collector.grpc_upstream_timeout=${SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT:30}
|
||||
# Sniffer get profile task list interval.
|
||||
collector.get_profile_task_interval=${SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL:20}
|
||||
# Sniffer get agent dynamic config interval.
|
||||
collector.get_agent_dynamic_config_interval=${SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL:20}
|
||||
# If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses.
|
||||
collector.is_resolve_dns_periodically=${SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY:false}
|
||||
|
||||
# Logging level
|
||||
logging.level=${SW_LOGGING_LEVEL:WARN}
|
||||
# Logging file_name
|
||||
logging.file_name=${SW_LOGGING_FILE_NAME:skywalking-api.log}
|
||||
# Log output. Default is FILE. Use CONSOLE means output to stdout.
|
||||
logging.output=${SW_LOGGING_OUTPUT:FILE}
|
||||
# Log files directory. Default is blank string, meaning use "{theSkywalkingAgentJarDir}/logs " to output logs.
|
||||
# {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located
|
||||
logging.dir=${SW_LOGGING_DIR:}
|
||||
# Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs.
|
||||
# JSON resolver prints logs in JSON format.
|
||||
logging.resolver=${SW_LOGGING_RESOLVER:PATTERN}
|
||||
# Logging format. There are all conversion specifiers:
|
||||
# * %level means log level.
|
||||
# * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.
|
||||
# * %thread means name of current thread.
|
||||
# * %msg means some message which user logged.
|
||||
# * %class means SimpleName of TargetClass.
|
||||
# * %throwable means a throwable which user called.
|
||||
# * %agent_name means agent.service_name. Only apply to the PatternLogger.
|
||||
logging.pattern=${SW_LOGGING_PATTERN:%level %timestamp %thread %class : %msg %throwable}
|
||||
# Logging max_file_size, default: 300 * 1024 * 1024 = 314572800
|
||||
logging.max_file_size=${SW_LOGGING_MAX_FILE_SIZE:314572800}
|
||||
# The max history log files. When rollover happened, if log files exceed this number,
|
||||
# then the oldest file will be delete. Negative or zero means off, by default.
|
||||
logging.max_history_files=${SW_LOGGING_MAX_HISTORY_FILES:-1}
|
||||
|
||||
# Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow.
|
||||
# Besides, the annotation named IgnoredException in the trace toolkit is another way to configure ignored exceptions.
|
||||
statuscheck.ignored_exceptions=${SW_STATUSCHECK_IGNORED_EXCEPTIONS:}
|
||||
# The max recursive depth when checking the exception traced by the agent. Typically, we don't recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.
|
||||
statuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1}
|
||||
|
||||
# Max element count in the correlation context
|
||||
correlation.element_max_number=${SW_CORRELATION_ELEMENT_MAX_NUMBER:3}
|
||||
|
||||
# Max value length of each element.
|
||||
correlation.value_max_length=${SW_CORRELATION_VALUE_MAX_LENGTH:128}
|
||||
# Tag the span by the key/value in the correlation context, when the keys listed here exist.
|
||||
correlation.auto_tag_keys=${SW_CORRELATION_AUTO_TAG_KEYS:}
|
||||
# The buffer size of collected JVM info.
|
||||
jvm.buffer_size=${SW_JVM_BUFFER_SIZE:600}
|
||||
# The period in seconds of JVM metrics collection. Unit is second.
|
||||
jvm.metrics_collect_period=${SW_JVM_METRICS_COLLECT_PERIOD:1}
|
||||
# The buffer channel size.
|
||||
buffer.channel_size=${SW_BUFFER_CHANNEL_SIZE:5}
|
||||
# The buffer size.
|
||||
buffer.buffer_size=${SW_BUFFER_BUFFER_SIZE:300}
|
||||
# If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile.
|
||||
profile.active=${SW_AGENT_PROFILE_ACTIVE:true}
|
||||
# Parallel monitor endpoint thread count
|
||||
profile.max_parallel=${SW_AGENT_PROFILE_MAX_PARALLEL:5}
|
||||
# Max monitoring sub-tasks count of one single endpoint access
|
||||
profile.max_accept_sub_parallel=${SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL:5}
|
||||
# Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it.
|
||||
profile.duration=${SW_AGENT_PROFILE_DURATION:10}
|
||||
# Max dump thread stack depth
|
||||
profile.dump_max_stack_depth=${SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH:500}
|
||||
# Snapshot transport to backend buffer size
|
||||
profile.snapshot_transport_buffer_size=${SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE:4500}
|
||||
# If true, async profiler will be enabled when user creates a new async profiler task. If false, it will be disabled. The default value is true.
|
||||
asyncprofiler.active=${SW_AGENT_ASYNC_PROFILER_ACTIVE:true}
|
||||
# Max execution time(second) for the Async Profiler. The task will be stopped even if a longer time is specified. default 20min.
|
||||
asyncprofiler.max_duration=${SW_AGENT_ASYNC_PROFILER_MAX_DURATION:1200}
|
||||
# Path for the JFR outputs from the Async Profiler. If the parameter is not empty, the file will be created in the specified directory, otherwise the Files.createTemp method will be used to create the file.
|
||||
asyncprofiler.output_path=${SW_AGENT_ASYNC_PROFILER_OUTPUT_PATH:}
|
||||
# If true, the agent collects and reports metrics to the backend.
|
||||
meter.active=${SW_METER_ACTIVE:true}
|
||||
# Report meters interval. The unit is second
|
||||
meter.report_interval=${SW_METER_REPORT_INTERVAL:20}
|
||||
# Max size of the meter pool
|
||||
meter.max_meter_size=${SW_METER_MAX_METER_SIZE:500}
|
||||
# The max size of message to send to server.Default is 10 MB
|
||||
log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}
|
||||
|
||||
# Mount the specific folders of the plugins. Plugins in mounted folders would work.
|
||||
plugin.mount=${SW_MOUNT_FOLDERS:plugins,activations}
|
||||
# Peer maximum description limit.
|
||||
plugin.peer_max_length=${SW_PLUGIN_PEER_MAX_LENGTH:200}
|
||||
# Exclude some plugins define in plugins dir.Plugin names is defined in [Agent plugin list](Plugin-list.md)
|
||||
plugin.exclude_plugins=${SW_EXCLUDE_PLUGINS:}
|
||||
# If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters.
|
||||
plugin.mongodb.trace_param=${SW_PLUGIN_MONGODB_TRACE_PARAM:false}
|
||||
# If set to positive number, the `WriteRequest.params` would be truncated to this length, otherwise it would be completely saved, which may cause performance problem.
|
||||
plugin.mongodb.filter_length_limit=${SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT:256}
|
||||
# If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false.
|
||||
plugin.elasticsearch.trace_dsl=${SW_PLUGIN_ELASTICSEARCH_TRACE_DSL:false}
|
||||
# If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false.
|
||||
plugin.springmvc.use_qualified_name_as_endpoint_name=${SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME:false}
|
||||
# If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false.
|
||||
plugin.toolkit.use_qualified_name_as_operation_name=${SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME:false}
|
||||
# If set to true, the parameters of the sql (typically `java.sql.PreparedStatement`) would be collected.
|
||||
plugin.jdbc.trace_sql_parameters=${SW_JDBC_TRACE_SQL_PARAMETERS:false}
|
||||
# If set to positive number, the `db.sql.parameters` would be truncated to this length, otherwise it would be completely saved, which may cause performance problem.
|
||||
plugin.jdbc.sql_parameters_max_length=${SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH:512}
|
||||
# If set to positive number, the `db.statement` would be truncated to this length, otherwise it would be completely saved, which may cause performance problem.
|
||||
plugin.jdbc.sql_body_max_length=${SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH:2048}
|
||||
# If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false.
|
||||
plugin.solrj.trace_statement=${SW_PLUGIN_SOLRJ_TRACE_STATEMENT:false}
|
||||
# If true, trace all the operation parameters in Solr request, default is false.
|
||||
plugin.solrj.trace_ops_params=${SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS:false}
|
||||
# If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request.
|
||||
plugin.light4j.trace_handler_chain=${SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN:false}
|
||||
# If true, the transaction definition name will be simplified.
|
||||
plugin.springtransaction.simplify_transaction_definition_name=${SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME:false}
|
||||
# Threading classes (`java.lang.Runnable` and `java.util.concurrent.Callable`) and their subclasses, including anonymous inner classes whose name match any one of the `THREADING_CLASS_PREFIXES` (splitted by `,`) will be instrumented, make sure to only specify as narrow prefixes as what you're expecting to instrument, (`java.` and `javax.` will be ignored due to safety issues)
|
||||
plugin.jdkthreading.threading_class_prefixes=${SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES:}
|
||||
# This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace.
|
||||
plugin.tomcat.collect_http_params=${SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS:false}
|
||||
# This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either `plugin.tomcat.collect_http_params` or `plugin.springmvc.collect_http_params`. Also, activate implicitly in the profiled trace.
|
||||
plugin.springmvc.collect_http_params=${SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS:false}
|
||||
# This config item controls that whether the HttpClient plugin should collect the parameters of the request
|
||||
plugin.httpclient.collect_http_params=${SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS:false}
|
||||
# When `COLLECT_HTTP_PARAMS` is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance.
|
||||
plugin.http.http_params_length_threshold=${SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD:1024}
|
||||
# When `include_http_headers` declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance.
|
||||
plugin.http.http_headers_length_threshold=${SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD:2048}
|
||||
# Set the header names, which should be collected by the plugin. Header name must follow `javax.servlet.http` definition. Multiple names should be split by comma.
|
||||
plugin.http.include_http_headers=${SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS:}
|
||||
# This config item controls that whether the Feign plugin should collect the http body of the request.
|
||||
plugin.feign.collect_request_body=${SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY:false}
|
||||
# When `COLLECT_REQUEST_BODY` is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body.
|
||||
plugin.feign.filter_length_limit=${SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT:1024}
|
||||
# When `COLLECT_REQUEST_BODY` is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by `,`
|
||||
plugin.feign.supported_content_types_prefix=${SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX:application/json,text/}
|
||||
# If true, trace all the influxql(query and write) in InfluxDB access, default is true.
|
||||
plugin.influxdb.trace_influxql=${SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL:true}
|
||||
# Apache Dubbo consumer collect `arguments` in RPC call, use `Object#toString` to collect `arguments`.
|
||||
plugin.dubbo.collect_consumer_arguments=${SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS:false}
|
||||
# When `plugin.dubbo.collect_consumer_arguments` is `true`, Arguments of length from the front will to the OAP backend
|
||||
plugin.dubbo.consumer_arguments_length_threshold=${SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD:256}
|
||||
# Apache Dubbo provider collect `arguments` in RPC call, use `Object#toString` to collect `arguments`.
|
||||
plugin.dubbo.collect_provider_arguments=${SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS:false}
|
||||
# When `plugin.dubbo.collect_provider_arguments` is `true`, Arguments of length from the front will to the OAP backend
|
||||
plugin.dubbo.provider_arguments_length_threshold=${SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD:256}
|
||||
# A list of host/port pairs to use for establishing the initial connection to the Kafka cluster.
|
||||
plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092}
|
||||
# Timeout period of reading topics from the Kafka server, the unit is second.
|
||||
plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10}
|
||||
# Kafka producer configuration. Read [producer configure](http://kafka.apache.org/24/documentation.html#producerconfigs)
|
||||
# to get more details. Check document for more details and examples.
|
||||
plugin.kafka.producer_config=${SW_PLUGIN_KAFKA_PRODUCER_CONFIG:}
|
||||
# Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication.
|
||||
plugin.kafka.producer_config_json=${SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON:}
|
||||
# Specify which Kafka topic name for Meter System data to report to.
|
||||
plugin.kafka.topic_meter=${SW_PLUGIN_KAFKA_TOPIC_METER:skywalking-meters}
|
||||
# Specify which Kafka topic name for JVM metrics data to report to.
|
||||
plugin.kafka.topic_metrics=${SW_PLUGIN_KAFKA_TOPIC_METRICS:skywalking-metrics}
|
||||
# Specify which Kafka topic name for traces data to report to.
|
||||
plugin.kafka.topic_segment=${SW_PLUGIN_KAFKA_TOPIC_SEGMENT:skywalking-segments}
|
||||
# Specify which Kafka topic name for Thread Profiling snapshot to report to.
|
||||
plugin.kafka.topic_profiling=${SW_PLUGIN_KAFKA_TOPIC_PROFILINGS:skywalking-profilings}
|
||||
# Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to.
|
||||
plugin.kafka.topic_management=${SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT:skywalking-managements}
|
||||
# Specify which Kafka topic name for the logging data to report to.
|
||||
plugin.kafka.topic_logging=${SW_PLUGIN_KAFKA_TOPIC_LOGGING:skywalking-logs}
|
||||
# isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with `-` ).
|
||||
plugin.kafka.namespace=${SW_KAFKA_NAMESPACE:}
|
||||
# Specify which class to decode encoded configuration of kafka.You can set encoded information in `plugin.kafka.producer_config_json` or `plugin.kafka.producer_config` if you need.
|
||||
plugin.kafka.decode_class=${SW_KAFKA_DECODE_CLASS:}
|
||||
# Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when `Spring annotation plugin` has been activated.
|
||||
plugin.springannotation.classname_match_regex=${SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX:}
|
||||
# Whether or not to transmit logged data as formatted or un-formatted.
|
||||
plugin.toolkit.log.transmit_formatted=${SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED:true}
|
||||
# If set to true, the parameters of Redis commands would be collected by Lettuce agent.
|
||||
plugin.lettuce.trace_redis_parameters=${SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS:false}
|
||||
# If set to positive number and `plugin.lettuce.trace_redis_parameters` is set to `true`, Redis command parameters would be collected and truncated to this length.
|
||||
plugin.lettuce.redis_parameter_max_length=${SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH:128}
|
||||
# Specify which command should be converted to write operation
|
||||
plugin.lettuce.operation_mapping_write=${SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE:getset,set,setbit,setex,setnx,setrange,strlen,mset,msetnx,psetex,incr,incrby,incrbyfloat,decr,decrby,append,hmset,hset,hsetnx,hincrby,hincrbyfloat,hdel,rpoplpush,rpush,rpushx,lpush,lpushx,lrem,ltrim,lset,brpoplpush,linsert,sadd,sdiff,sdiffstore,sinterstore,sismember,srem,sunion,sunionstore,sinter,zadd,zincrby,zinterstore,zrange,zrangebylex,zrangebyscore,zrank,zrem,zremrangebylex,zremrangebyrank,zremrangebyscore,zrevrange,zrevrangebyscore,zrevrank,zunionstore,xadd,xdel,del,xtrim}
|
||||
# Specify which command should be converted to read operation
|
||||
plugin.lettuce.operation_mapping_read=${SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ:getrange,getbit,mget,hvals,hkeys,hlen,hexists,hget,hgetall,hmget,blpop,brpop,lindex,llen,lpop,lrange,rpop,scard,srandmember,spop,sscan,smove,zlexcount,zscore,zscan,zcard,zcount,xget,get,xread,xlen,xrange,xrevrange}
|
||||
# If set to true, the parameters of the cypher would be collected.
|
||||
plugin.neo4j.trace_cypher_parameters=${SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS:false}
|
||||
# If set to positive number, the `db.cypher.parameters` would be truncated to this length, otherwise it would be completely saved, which may cause performance problem.
|
||||
plugin.neo4j.cypher_parameters_max_length=${SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH:512}
|
||||
# If set to positive number, the `db.statement` would be truncated to this length, otherwise it would be completely saved, which may cause performance problem.
|
||||
plugin.neo4j.cypher_body_max_length=${SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH:2048}
|
||||
# If set to a positive number and activate `trace sampler CPU policy plugin`, the trace would not be collected when agent process CPU usage percent is greater than `plugin.cpupolicy.sample_cpu_usage_percent_limit`.
|
||||
plugin.cpupolicy.sample_cpu_usage_percent_limit=${SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT:-1}
|
||||
# This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace.
|
||||
plugin.micronauthttpclient.collect_http_params=${SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS:false}
|
||||
# This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace.
|
||||
plugin.micronauthttpserver.collect_http_params=${SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS:false}
|
||||
# Specify which command should be converted to write operation
|
||||
plugin.memcached.operation_mapping_write=${SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE:set,add,replace,append,prepend,cas,delete,touch,incr,decr}
|
||||
# Specify which command should be converted to read operation
|
||||
plugin.memcached.operation_mapping_read=${SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ:get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck}
|
||||
# Specify which command should be converted to write operation
|
||||
plugin.ehcache.operation_mapping_write=${SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE:tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent}
|
||||
# Specify which command should be converted to read operation
|
||||
plugin.ehcache.operation_mapping_read=${SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ:get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader}
|
||||
# Specify which command should be converted to write operation
|
||||
plugin.guavacache.operation_mapping_write=${SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE:put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp}
|
||||
# Specify which command should be converted to read operation
|
||||
plugin.guavacache.operation_mapping_read=${SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ:getIfPresent,get,getAllPresent,size}
|
||||
# If set to true, the parameters of Redis commands would be collected by Jedis agent.
|
||||
plugin.jedis.trace_redis_parameters=${SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS:false}
|
||||
# If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length.
|
||||
plugin.jedis.redis_parameter_max_length=${SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH:128}
|
||||
# Specify which command should be converted to write operation
|
||||
plugin.jedis.operation_mapping_write=${SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE:getset,set,setbit,setex,setnx,setrange,strlen,mset,msetnx,psetex,incr,incrby,incrbyfloat,decr,decrby,append,hmset,hset,hsetnx,hincrby,hincrbyfloat,hdel,rpoplpush,rpush,rpushx,lpush,lpushx,lrem,ltrim,lset,brpoplpush,linsert,sadd,sdiff,sdiffstore,sinterstore,sismember,srem,sunion,sunionstore,sinter,zadd,zincrby,zinterstore,zrange,zrangebylex,zrangebyscore,zrank,zrem,zremrangebylex,zremrangebyrank,zremrangebyscore,zrevrange,zrevrangebyscore,zrevrank,zunionstore,xadd,xdel,del,xtrim}
|
||||
# Specify which command should be converted to read operation
|
||||
plugin.jedis.operation_mapping_read=${SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ:getrange,getbit,mget,hvals,hkeys,hlen,hexists,hget,hgetall,hmget,blpop,brpop,lindex,llen,lpop,lrange,rpop,scard,srandmember,spop,sscan,smove,zlexcount,zscore,zscan,zcard,zcount,xget,get,xread,xlen,xrange,xrevrange}
|
||||
# If set to true, the parameters of Redis commands would be collected by Redisson agent.
|
||||
plugin.redisson.trace_redis_parameters=${SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS:false}
|
||||
# If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length.
|
||||
plugin.redisson.redis_parameter_max_length=${SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH:128}
|
||||
# Specify which command should be converted to write operation
|
||||
plugin.redisson.operation_mapping_write=${SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE:getset,set,setbit,setex,setnx,setrange,strlen,mset,msetnx,psetex,incr,incrby,incrbyfloat,decr,decrby,append,hmset,hset,hsetnx,hincrby,hincrbyfloat,hdel,rpoplpush,rpush,rpushx,lpush,lpushx,lrem,ltrim,lset,brpoplpush,linsert,sadd,sdiff,sdiffstore,sinterstore,sismember,srem,sunion,sunionstore,sinter,zadd,zincrby,zinterstore,zrange,zrangebylex,zrangebyscore,zrank,zrem,zremrangebylex,zremrangebyrank,zremrangebyscore,zrevrange,zrevrangebyscore,zrevrank,zunionstore,xadd,xdel,del,xtrim}
|
||||
# Specify which command should be converted to read operation
|
||||
plugin.redisson.operation_mapping_read=${SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ:getrange,getbit,mget,hvals,hkeys,hlen,hexists,hget,hgetall,hmget,blpop,brpop,lindex,llen,lpop,lrange,rpop,scard,srandmember,spop,sscan,smove,zlexcount,zscore,zscan,zcard,zcount,xget,get,xread,xlen,xrange,xrevrange}
|
||||
# This config item controls that whether the Netty-http plugin should collect the http body of the request.
|
||||
plugin.nettyhttp.collect_request_body=${SW_PLUGIN_NETTYHTTP_COLLECT_REQUEST_BODY:false}
|
||||
# When `HTTP_COLLECT_REQUEST_BODY` is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body.
|
||||
plugin.nettyhttp.filter_length_limit=${SW_PLUGIN_NETTYHTTP_FILTER_LENGTH_LIMIT:1024}
|
||||
# When `HTTP_COLLECT_REQUEST_BODY` is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by `,`
|
||||
plugin.nettyhttp.supported_content_types_prefix=${SW_PLUGIN_NETTYHTTP_SUPPORTED_CONTENT_TYPES_PREFIX:application/json,text/}
|
||||
# If set to true, the keys of messages would be collected by the plugin for RocketMQ Java client.
|
||||
plugin.rocketmqclient.collect_message_keys=${SW_PLUGIN_ROCKETMQCLIENT_COLLECT_MESSAGE_KEYS:false}
|
||||
# If set to true, the tags of messages would be collected by the plugin for RocketMQ Java client.
|
||||
plugin.rocketmqclient.collect_message_tags=${SW_PLUGIN_ROCKETMQCLIENT_COLLECT_MESSAGE_TAGS:false}
|
||||
# Define the max length of collected HTTP parameters. The default value(=0) means not collecting.
|
||||
plugin.solon.http_params_length_threshold=${SW_PLUGIN_SOLON_HTTP_PARAMS_LENGTH_THRESHOLD:0}
|
||||
# It controls what header data should be collected, values must be in lower case, if empty, no header data will be collected. default is empty.
|
||||
plugin.solon.include_http_headers=${SW_PLUGIN_SOLON_INCLUDE_HTTP_HEADERS:}
|
||||
# Define the max length of collected HTTP body. The default value(=0) means not collecting.
|
||||
plugin.solon.http_body_length_threshold=${SW_PLUGIN_SOLON_HTTP_BODY_LENGTH_THRESHOLD:0}
|
||||
# Specify which command should be converted to write operation
|
||||
plugin.caffeine.operation_mapping_write=${SW_PLUGIN_CAFFEINE_OPERATION_MAPPING_WRITE:put,putAll,remove,clear}
|
||||
# Specify which command should be converted to read operation
|
||||
plugin.caffeine.operation_mapping_read=${SW_PLUGIN_CAFFEINE_OPERATION_MAPPING_READ:getIfPresent,getAllPresent,computeIfAbsent}
|
||||
@@ -1,26 +0,0 @@
|
||||
Copyright (c) 2012 France Télécom
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. Neither the name of the copyright holders nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
THE POSSIBILITY OF SUCH DAMAGE.
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user