docker/bak.yml | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
docker/docker-compose.yml | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
docker/gateway/bootstrap.yml | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
docker/kafka.yml | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
docker/nacos.yml | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
docker/nacos/conf/application.properties | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
docker/nacos/plugins/nacos-postgresql-datasource-plugin-ext-3.0.2.jar | 补丁 | 查看 | 原始文档 | blame | 历史 | |
docker/nginx/conf/nginx.conf | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
docker/postgis/init/init.sql | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
docker/readme.md | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
docker/redis/redis.conf | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
docker/spring.yml | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
docker/start.sh | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
docker/system/bootstrap.yml | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 |
docker/bak.yml
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,121 @@ version: "3.8" services: # postgis postgis: image: postgis/postgis:17.5 ports: - 5432:5432 volumes: - ./postgis/pgdata:/var/lib/postgresql/data - ./postgis/init:/docker-entrypoint-initdb.d environment: POSTGRES_USER: postgres POSTGRES_PASSWORD: Postgres!_17_jHs ALLOW_IP_RANGE: 0.0.0.0/0 TZ: Asia/Shanghai POSTGRES_DB: jhs healthcheck: test: ["CMD-SHELL", "pg_isready -U postgres"] interval: 5s timeout: 2s retries: 3 privileged: true networks: - network-jhs # nacos nacos: image: nacos/nacos-server:v3.0.2 environment: TZ: Asia/Shanghai MODE: standalone SPRING_DATASOURCE_PLATFORM: postgresql DB_URL: jdbc:postgresql://postgis:5432/jhs?tcpKeepAlive=true&reWriteBatchedInserts=true&ApplicationName=nacos_java DB_USER: postgres DB_PASSWORD: Postgres!_17_jHs NACOS_AUTH_ENABLE: true NACOS_AUTH_IDENTITY_KEY: nacos NACOS_AUTH_IDENTITY_VALUE: nAcos_!9#_admIn DB_POOL_CONFIG_DRIVERCLASSNAME: org.postgresql.Driver NACOS_AUTH_TOKEN: TmFjb3NfUmFuZG9tX1N0cmluZ18zMl9DaGFyc19Pcl9Mb25nZXI= volumes: - ./nacos/logs/:/home/nacos/logs - ./nacos/plugins:/home/nacos/plugins #- ./nacos/conf/application.properties:/home/nacos/conf/application.properties ports: - 8080:8080 - 8848:8848 - 9848:9848 depends_on: postgis: condition: service_healthy privileged: true networks: - network-jhs restart: always # zookeeper zookeeper: image: zookeeper:3.4 ports: - 2181:2181 - 2888:2888 - 3888:3888 volumes: - ./zookeeper_data:/data environment: ZOO_MY_ID: 1 ZOO_SERVERS: server.1=0.0.0.0:2888:3888 # ZOO_ENABLE_AUTH: yes # ZOO_SERVER_USERS: admin # ZOO_SERVER_PASSWORDS: kaFka_12#$56 healthcheck: test: ["CMD-SHELL", "zkServer.sh status"] interval: 5s timeout: 2s retries: 3 networks: - network-jhs # kafka kafka: image: bitnami/kafka:3.4 environment: KAFKA_BROKER_ID: 1 # ALLOW_PLAINTEXT_LISTENER: yes # KAFKA_CFG_LISTENERS: PLAINTEXT://:9092 KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181 # KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://192.168.11.203:9092 # KAFKA_CFG_SASL_ENABLED_MECHANISMS: PLAIN # KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN # KAFKA_CFG_SECURITY_INTER_BROKER_PROTOCOL: SASL_PLAINTEXT # KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CLIENT:SASL_PLAINTEXT,EXTERNAL:SASL_PLAINTEXT # KAFKA_CFG_SASL_PLAINTEXT_PASSWORD_CONVERTER_CLASSNAME: org.apache.kafka.common.security.plain.PlainPasswordConverter # KAFKA_CFG_SUPER_USERS: User:admin;ClientId:admin;Group:admins;Default # KAFKA_CFG_SASL_JAAS_CONFIG: org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="kaFka_12#$56"; #ALLOW_PLAINTEXT_LISTENER: yes KAFKA_CFG_LISTENERS: SASL_PLAINTEXT://:9092 KAFKA_CFG_ADVERTISED_LISTENERS: SASL_PLAINTEXT://192.168.11.203:9092 KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL KAFKA_CFG_INTER_BROKER_LISTENER_NAME: SASL_PLAINTEXT KAFKA_CFG_SASL_ENABLED_MECHANISMS: PLAIN KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN KAFKA_CLIENT_USERS: admin KAFKA_CLIENT_PASSWORDS: kaFka_12#$56 # KAFKA_ZOOKEEPER_USER: admin # KAFKA_ZOOKEEPER_PASSWORD: kaFka_12#$56 volumes: # chmod -R 777 kafka_data/ - ./kafka_data:/bitnami/kafka - /var/run/docker.sock:/var/run/docker.sock ports: - 9092:9092 depends_on: zookeeper: condition: service_healthy privileged: true networks: - network-jhs networks: network-jhs: driver: bridge docker/docker-compose.yml
@@ -1,55 +1,113 @@ version: '3.8' version: "3.8" services: # zookeeper zookeeper: image: zookeeper:3.4.9 # postgis postgis: image: postgis/postgis:17.5 ports: - 2181:2181 # - 2888:2888 # - 3888:3888 - 5432:5432 volumes: - ./zookeeper_data:/data - ./postgis/pgdata:/var/lib/postgresql/data - ./postgis/init:/docker-entrypoint-initdb.d environment: ZOO_MY_ID: 1 ZOO_SERVERS: server.1=0.0.0.0:2888:3888 POSTGRES_USER: postgres POSTGRES_PASSWORD: Postgres!_17_jHs ALLOW_IP_RANGE: 0.0.0.0/0 TZ: Asia/Shanghai POSTGRES_DB: jhs healthcheck: test: ["CMD-SHELL", "pg_isready -U postgres"] interval: 5s timeout: 2s retries: 3 privileged: true networks: - kafka_net #kafka kafka: image: bitnami/kafka:3.4 depends_on: - zookeeper ports: - 9092:9092 - network-jhs # nacos nacos: image: nacos/nacos-server:v3.0.2 environment: KAFKA_BROKER_ID: 1 ALLOW_PLAINTEXT_LISTENER: "yes" KAFKA_CFG_LISTENERS: PLAINTEXT://:9092 KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181 KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://192.168.11.203:9092 #KKAFKA_LISTENERS: PLAINTEXT://:9092 #AFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.11.203:9092 #KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 #KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' TZ: Asia/Shanghai MODE: standalone SPRING_DATASOURCE_PLATFORM: postgresql DB_URL: jdbc:postgresql://postgis:5432/jhs?tcpKeepAlive=true&reWriteBatchedInserts=true&ApplicationName=nacos_java DB_USER: postgres DB_PASSWORD: Postgres!_17_jHs NACOS_AUTH_ENABLE: true NACOS_AUTH_IDENTITY_KEY: nacos NACOS_AUTH_IDENTITY_VALUE: nAcos_!9#_admIn DB_POOL_CONFIG_DRIVERCLASSNAME: org.postgresql.Driver NACOS_AUTH_TOKEN: TmFjb3NfUmFuZG9tX1N0cmluZ18zMl9DaGFyc19Pcl9Mb25nZXI= volumes: - ./kafka_data:/bitnami/kafka # å°å®¿ä¸»æºçDocker奿¥åæä»¶æè½½å°å®¹å¨å é¨ - /var/run/docker.sock:/var/run/docker.sock networks: - kafka_net # kafka-ui kafka-ui: image: provectuslabs/kafka-ui depends_on: - kafka - ./nacos/logs/:/home/nacos/logs - ./nacos/plugins:/home/nacos/plugins #- ./nacos/conf/application.properties:/home/nacos/conf/application.properties ports: - 8081:8080 - 8080:8080 - 8848:8848 - 9848:9848 depends_on: postgis: condition: service_healthy privileged: true networks: - network-jhs restart: always # redis redis: image: redis:7 ports: - 6379:6379 volumes: - ./redis/redis.conf:/data/redis.conf - ./redis/data:/data #command: redis-server --requirepass Redis_s!E_6.2.6 command: redis-server /data/redis.conf networks: - network-jhs # gateway gateway: image: openjdk:8-jre volumes: - ./gateway/logs:/logs - ./gateway:/app environment: DYNAMIC_CONFIG_ENABLED: true #KAFKA_CLUSTERS_0_NAME: local #KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092 TZ: Asia/Shanghai entrypoint: /bin/sh -c "sleep 30 && java -jar /app/se-gateway.jar --spring.config.location=file:/app/" depends_on: - redis - nacos networks: - kafka_net - network-se restart: always # system system: image: openjdk8-422-gdal:3.5.2 volumes: - ./system/logs:/logs - ./system:/app environment: TZ: Asia/Shanghai entrypoint: /bin/sh -c "sleep 30 && java -jar /app/se-system.jar --spring.config.location=file:/app/" depends_on: - postgis - nacos - redis # nginx nginx: image: nginx:1.29 ports: - 8081:80 environment: TZ: Asia/Shanghai volumes: - ./nginx/logs:/var/log/nginx #- ./nginx/conf.d:/etc/nginx/conf.d - ./nginx/html:/usr/share/nginx/html - ./nginx/conf/nginx.conf:/etc/nginx/nginx.conf depends_on: - gateway networks: kafka_net: - network-jhs networks: network-jhs: driver: bridge docker/gateway/bootstrap.yml
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,48 @@ # Tomcat server: port: 8001 # Spring spring: application: # åºç¨åç§° name: se-gateway main: web-application-type: reactive profiles: # ç¯å¢é ç½® active: dev cloud: nacos: username: nacos password: nAcos_!9#_admIn discovery: # æå¡æ³¨åå°å server-addr: nacos:8848 config: group: JHS_GROUP # é ç½®ä¸å¿å°å server-addr: nacos:8848 # é ç½®æä»¶æ ¼å¼ file-extension: yml # å ±äº«é ç½® shared-configs: - data-id: application-${spring.profiles.active}.${spring.cloud.nacos.config.file-extension} group: JHS_GROUP # å è®¸å·æ° refresh-enabled: true sentinel: # åæ¶æ§å¶å°æå è½½ eager: true transport: # æ§å¶å°å°å dashboard: 127.0.0.1:8718 # nacosé ç½®æä¹ å datasource: ds1: nacos: server-addr: 127.0.0.1:8848 dataId: sentinel-se-gateway groupId: DEFAULT_GROUP data-type: json rule-type: gw-flow docker/kafka.yml
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,59 @@ version: "3.8" services: # zookeeper zookeeper: image: zookeeper:3.4 ports: - 2181:2181 - 2888:2888 - 3888:3888 volumes: - ./zookeeper_data:/data environment: ZOO_MY_ID: 1 ZOO_SERVERS: server.1=0.0.0.0:2888:3888 # ZOO_ENABLE_AUTH: yes # ZOO_SERVER_USERS: admin # ZOO_SERVER_PASSWORDS: kaFka_12#$56 healthcheck: test: ["CMD-SHELL", "zkServer.sh status"] interval: 5s timeout: 2s retries: 3 networks: - network-jhs # kafka kafka: image: bitnami/kafka:3.4 environment: KAFKA_BROKER_ID: 1 # ALLOW_PLAINTEXT_LISTENER: yes # KAFKA_CFG_LISTENERS: PLAINTEXT://:9092 # KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181 # KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://192.168.11.203:9092 KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181 KAFKA_CFG_LISTENERS: SASL_PLAINTEXT://:9092 KAFKA_CFG_ADVERTISED_LISTENERS: SASL_PLAINTEXT://192.168.11.203:9092 KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL KAFKA_CFG_INTER_BROKER_LISTENER_NAME: SASL_PLAINTEXT KAFKA_CFG_SASL_ENABLED_MECHANISMS: PLAIN KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN KAFKA_CLIENT_USERS: admin KAFKA_CLIENT_PASSWORDS: kaFka_12#$56 # KAFKA_ZOOKEEPER_USER: admin # KAFKA_ZOOKEEPER_PASSWORD: kaFka_12#$56 volumes: # chmod -R 777 kafka_data/ - ./kafka_data:/bitnami/kafka - /var/run/docker.sock:/var/run/docker.sock ports: - 9092:9092 depends_on: zookeeper: condition: service_healthy privileged: true networks: - network-jhs networks: network-jhs: driver: bridge docker/nacos.yml
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,57 @@ version: "3.8" services: # postgis postgis: image: postgis/postgis:17.5 ports: - 5432:5432 volumes: - ./postgis/pgdata:/var/lib/postgresql/data - ./postgis/init:/docker-entrypoint-initdb.d environment: POSTGRES_USER: postgres POSTGRES_PASSWORD: Postgres!_17_jHs ALLOW_IP_RANGE: 0.0.0.0/0 TZ: Asia/Shanghai POSTGRES_DB: jhs healthcheck: test: ["CMD-SHELL", "pg_isready -U postgres"] interval: 5s timeout: 2s retries: 3 privileged: true networks: - network-jhs # nacos nacos: image: nacos/nacos-server:v3.0.2 environment: TZ: Asia/Shanghai MODE: standalone SPRING_DATASOURCE_PLATFORM: postgresql DB_URL: jdbc:postgresql://postgis:5432/jhs?tcpKeepAlive=true&reWriteBatchedInserts=true&ApplicationName=nacos_java DB_USER: postgres DB_PASSWORD: Postgres!_17_jHs NACOS_AUTH_ENABLE: true NACOS_AUTH_IDENTITY_KEY: nacos NACOS_AUTH_IDENTITY_VALUE: nAcos_!9#_admIn DB_POOL_CONFIG_DRIVERCLASSNAME: org.postgresql.Driver NACOS_AUTH_TOKEN: TmFjb3NfUmFuZG9tX1N0cmluZ18zMl9DaGFyc19Pcl9Mb25nZXI= volumes: - ./nacos/logs/:/home/nacos/logs - ./nacos/plugins:/home/nacos/plugins #- ./nacos/conf/application.properties:/home/nacos/conf/application.properties ports: - 8080:8080 - 8848:8848 - 9848:9848 depends_on: postgis: condition: service_healthy privileged: true networks: - network-jhs restart: always networks: network-jhs: driver: bridge docker/nacos/conf/application.properties
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,306 @@ # # Copyright 1999-2021 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #*************** Spring Boot Related Configurations ***************# ### Default web context path: server.servlet.contextPath=/nacos ### Include message field server.error.include-message=ALWAYS ### Default web server port: server.port=8848 #*************** Network Related Configurations ***************# ### If prefer hostname over ip for Nacos server addresses in cluster.conf: # nacos.inetutils.prefer-hostname-over-ip=false ### Specify local server's IP: # nacos.inetutils.ip-address= #*************** Config Module Related Configurations ***************# ### If use MySQL as datasource: ### Deprecated configuration property, it is recommended to use `spring.sql.init.platform` replaced. # spring.datasource.platform=mysql # spring.sql.init.platform=mysql ### Count of DB: # db.num=1 ### Connect URL of DB: # db.url.0=jdbc:mysql://127.0.0.1:3306/nacos?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC # db.user.0=nacos # db.password.0=nacos spring.datasource.platform=postgresql db.num=1 #db.url.0=jdbc:mysql://se-mysql:3306/se-config?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true db.url.0=jdbc:postgresql://postgis:5432/jhs?tcpKeepAlive=true&reWriteBatchedInserts=true&ApplicationName=nacos_java db.user=postgres db.password=Postgres!_17_jHs db.pool.config.driverClassName=org.postgresql.Driver #db.pool.config.connectionTestQuery=SELECT 1 ### Connection pool configuration: hikariCP db.pool.config.connectionTimeout=30000 db.pool.config.validationTimeout=10000 db.pool.config.maximumPoolSize=20 db.pool.config.minimumIdle=2 ### the maximum retry times for push nacos.config.push.maxRetryTime=50 #*************** Naming Module Related Configurations ***************# ### If enable data warmup. If set to false, the server would accept request without local data preparation: # nacos.naming.data.warmup=true ### If enable the instance auto expiration, kind like of health check of instance: # nacos.naming.expireInstance=true ### Add in 2.0.0 ### The interval to clean empty service, unit: milliseconds. # nacos.naming.clean.empty-service.interval=60000 ### The expired time to clean empty service, unit: milliseconds. # nacos.naming.clean.empty-service.expired-time=60000 ### The interval to clean expired metadata, unit: milliseconds. # nacos.naming.clean.expired-metadata.interval=5000 ### The expired time to clean metadata, unit: milliseconds. # nacos.naming.clean.expired-metadata.expired-time=60000 ### The delay time before push task to execute from service changed, unit: milliseconds. # nacos.naming.push.pushTaskDelay=500 ### The timeout for push task execute, unit: milliseconds. # nacos.naming.push.pushTaskTimeout=5000 ### The delay time for retrying failed push task, unit: milliseconds. # nacos.naming.push.pushTaskRetryDelay=1000 ### Since 2.0.3 ### The expired time for inactive client, unit: milliseconds. # nacos.naming.client.expired.time=180000 #*************** CMDB Module Related Configurations ***************# ### The interval to dump external CMDB in seconds: # nacos.cmdb.dumpTaskInterval=3600 ### The interval of polling data change event in seconds: # nacos.cmdb.eventTaskInterval=10 ### The interval of loading labels in seconds: # nacos.cmdb.labelTaskInterval=300 ### If turn on data loading task: # nacos.cmdb.loadDataAtStart=false #***********Metrics for tomcat **************************# server.tomcat.mbeanregistry.enabled=true #***********Expose prometheus and health **************************# #management.endpoints.web.exposure.include=prometheus,health ### Metrics for elastic search management.metrics.export.elastic.enabled=false #management.metrics.export.elastic.host=http://localhost:9200 ### Metrics for influx management.metrics.export.influx.enabled=false #management.metrics.export.influx.db=springboot #management.metrics.export.influx.uri=http://localhost:8086 #management.metrics.export.influx.auto-create-db=true #management.metrics.export.influx.consistency=one #management.metrics.export.influx.compressed=true #*************** Access Log Related Configurations ***************# ### If turn on the access log: server.tomcat.accesslog.enabled=true ### file name pattern, one file per hour server.tomcat.accesslog.rotate=true server.tomcat.accesslog.file-date-format=.yyyy-MM-dd-HH ### The access log pattern: server.tomcat.accesslog.pattern=%h %l %u %t "%r" %s %b %D %{User-Agent}i %{Request-Source}i ### The directory of access log: server.tomcat.basedir=file:. #*************** Access Control Related Configurations ***************# ### If enable spring security, this option is deprecated in 1.2.0: #spring.security.enabled=false ### The ignore urls of auth nacos.security.ignore.urls=/,/error,/**/*.css,/**/*.js,/**/*.html,/**/*.map,/**/*.svg,/**/*.png,/**/*.ico,/console-ui/public/**,/v1/auth/**,/v1/console/health/**,/actuator/**,/v1/console/server/** ### The auth system to use, currently only 'nacos' and 'ldap' is supported: nacos.core.auth.system.type=nacos ### If turn on auth system: nacos.core.auth.enabled=true ### Turn on/off caching of auth information. By turning on this switch, the update of auth information would have a 15 seconds delay. nacos.core.auth.caching.enabled=true ### Since 1.4.1, Turn on/off white auth for user-agent: nacos-server, only for upgrade from old version. nacos.core.auth.enable.userAgentAuthWhite=false ### Since 1.4.1, worked when nacos.core.auth.enabled=true and nacos.core.auth.enable.userAgentAuthWhite=false. ### The two properties is the white list for auth and used by identity the request from other server. nacos.core.auth.server.identity.key=admin nacos.core.auth.server.identity.value=nAcos_!9#_admIn ### worked when nacos.core.auth.system.type=nacos ### The token expiration in seconds: nacos.core.auth.plugin.nacos.token.cache.enable=false nacos.core.auth.plugin.nacos.token.expire.seconds=18000 ### The default token (Base64 String): https://base64.us/ nacos.core.auth.plugin.nacos.token.secret.key=TmFjb3NfUmFuZG9tX1N0cmluZ18zMl9DaGFyc19Pcl9Mb25nZXI= ### worked when nacos.core.auth.system.type=ldapï¼{0} is Placeholder,replace login username #nacos.core.auth.ldap.url=ldap://localhost:389 #nacos.core.auth.ldap.basedc=dc=example,dc=org #nacos.core.auth.ldap.userDn=cn=admin,${nacos.core.auth.ldap.basedc} #nacos.core.auth.ldap.password=admin #nacos.core.auth.ldap.userdn=cn={0},dc=example,dc=org #nacos.core.auth.ldap.filter.prefix=uid #nacos.core.auth.ldap.case.sensitive=true #nacos.core.auth.ldap.ignore.partial.result.exception=false #*************** Control Plugin Related Configurations ***************# # plugin type #nacos.plugin.control.manager.type=nacos # local control rule storage dir, default ${nacos.home}/data/connection and ${nacos.home}/data/tps #nacos.plugin.control.rule.local.basedir=${nacos.home} # external control rule storage type, if exist #nacos.plugin.control.rule.external.storage= #*************** Config Change Plugin Related Configurations ***************# # webhook #nacos.core.config.plugin.webhook.enabled=false # It is recommended to use EB https://help.aliyun.com/document_detail/413974.html #nacos.core.config.plugin.webhook.url=http://localhost:8080/webhook/send?token=*** # The content push max capacity ,byte #nacos.core.config.plugin.webhook.contentMaxCapacity=102400 # whitelist #nacos.core.config.plugin.whitelist.enabled=false # The import file suffixs #nacos.core.config.plugin.whitelist.suffixs=xml,text,properties,yaml,html # fileformatcheck,which validate the import file of type and content #nacos.core.config.plugin.fileformatcheck.enabled=false #*************** Istio Related Configurations ***************# ### If turn on the MCP server: nacos.istio.mcp.server.enabled=false #*************** Core Related Configurations ***************# ### set the WorkerID manually # nacos.core.snowflake.worker-id= ### Member-MetaData # nacos.core.member.meta.site= # nacos.core.member.meta.adweight= # nacos.core.member.meta.weight= ### MemberLookup ### Addressing pattern category, If set, the priority is highest # nacos.core.member.lookup.type=[file,address-server] ## Set the cluster list with a configuration file or command-line argument # nacos.member.list=192.168.16.101:8847?raft_port=8807,192.168.16.101?raft_port=8808,192.168.16.101:8849?raft_port=8809 ## for AddressServerMemberLookup # Maximum number of retries to query the address server upon initialization # nacos.core.address-server.retry=5 ## Server domain name address of [address-server] mode # address.server.domain=jmenv.tbsite.net ## Server port of [address-server] mode # address.server.port=8080 ## Request address of [address-server] mode # address.server.url=/nacos/serverlist #*************** JRaft Related Configurations ***************# ### Sets the Raft cluster election timeout, default value is 5 second # nacos.core.protocol.raft.data.election_timeout_ms=5000 ### Sets the amount of time the Raft snapshot will execute periodically, default is 30 minute # nacos.core.protocol.raft.data.snapshot_interval_secs=30 ### raft internal worker threads # nacos.core.protocol.raft.data.core_thread_num=8 ### Number of threads required for raft business request processing # nacos.core.protocol.raft.data.cli_service_thread_num=4 ### raft linear read strategy. Safe linear reads are used by default, that is, the Leader tenure is confirmed by heartbeat # nacos.core.protocol.raft.data.read_index_type=ReadOnlySafe ### rpc request timeout, default 5 seconds # nacos.core.protocol.raft.data.rpc_request_timeout_ms=5000 #*************** Distro Related Configurations ***************# ### Distro data sync delay time, when sync task delayed, task will be merged for same data key. Default 1 second. # nacos.core.protocol.distro.data.sync.delayMs=1000 ### Distro data sync timeout for one sync data, default 3 seconds. # nacos.core.protocol.distro.data.sync.timeoutMs=3000 ### Distro data sync retry delay time when sync data failed or timeout, same behavior with delayMs, default 3 seconds. # nacos.core.protocol.distro.data.sync.retryDelayMs=3000 ### Distro data verify interval time, verify synced data whether expired for a interval. Default 5 seconds. # nacos.core.protocol.distro.data.verify.intervalMs=5000 ### Distro data verify timeout for one verify, default 3 seconds. # nacos.core.protocol.distro.data.verify.timeoutMs=3000 ### Distro data load retry delay when load snapshot data failed, default 30 seconds. # nacos.core.protocol.distro.data.load.retryDelayMs=30000 ### enable to support prometheus service discovery #nacos.prometheus.metrics.enabled=true ### Since 2.3 #*************** Grpc Configurations ***************# ## sdk grpc(between nacos server and client) configuration ## Sets the maximum message size allowed to be received on the server. #nacos.remote.server.grpc.sdk.max-inbound-message-size=10485760 ## Sets the time(milliseconds) without read activity before sending a keepalive ping. The typical default is two hours. #nacos.remote.server.grpc.sdk.keep-alive-time=7200000 ## Sets a time(milliseconds) waiting for read activity after sending a keepalive ping. Defaults to 20 seconds. #nacos.remote.server.grpc.sdk.keep-alive-timeout=20000 ## Sets a time(milliseconds) that specify the most aggressive keep-alive time clients are permitted to configure. The typical default is 5 minutes #nacos.remote.server.grpc.sdk.permit-keep-alive-time=300000 ## cluster grpc(inside the nacos server) configuration #nacos.remote.server.grpc.cluster.max-inbound-message-size=10485760 ## Sets the time(milliseconds) without read activity before sending a keepalive ping. The typical default is two hours. #nacos.remote.server.grpc.cluster.keep-alive-time=7200000 ## Sets a time(milliseconds) waiting for read activity after sending a keepalive ping. Defaults to 20 seconds. #nacos.remote.server.grpc.cluster.keep-alive-timeout=20000 ## Sets a time(milliseconds) that specify the most aggressive keep-alive time clients are permitted to configure. The typical default is 5 minutes #nacos.remote.server.grpc.cluster.permit-keep-alive-time=300000 ## open nacos default console ui #nacos.console.ui.enabled=true docker/nacos/plugins/nacos-postgresql-datasource-plugin-ext-3.0.2.jarBinary files differ
docker/nginx/conf/nginx.conf
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,73 @@ worker_processes 1; events { worker_connections 1024; } http { include mime.types; default_type application/octet-stream; sendfile on; keepalive_timeout 65; server { listen 80; server_name localhost; location / { root /usr/share/nginx/html; try_files $uri $uri/ /index.html; index index.html index.htm; } location /sso { alias /usr/share/nginx/html/sso; try_files $uri $uri/ /sso/; #index index.html index.htm; } location /sys { alias /usr/share/nginx/html/sys; try_files $uri $uri/ /sys/; #index index.html index.htm; } location /se-file/ { proxy_set_header Host $http_host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header REMOTE-HOST $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; #proxy_pass http://se-file:9300/; proxy_pass http://se-system:9201/; } location /prod-api/ { proxy_set_header Host $http_host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header REMOTE-HOST $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_pass http://se-gateway:8080/; } location /wgcloud { proxy_set_header Host $http_host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header REMOTE-HOST $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_pass http://se-wgcloud:9999/wgcloud; } # actuator if ($request_uri ~ "/actuator") { return 403; } error_page 500 502 503 504 /50x.html; location = /50x.html { root html; } } } docker/postgis/init/init.sql
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,551 @@ /* * Copyright 1999-2018 Alibaba Group Holding Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ -- ---------------------------- -- create extension -- ---------------------------- create extension if not exists postgis cascade; create extension if not exists "uuid-ossp"; -- ---------------------------- -- Table structure for config_info_gray -- ---------------------------- DROP TABLE IF EXISTS "config_info_gray"; CREATE TABLE "config_info_gray" ( "id" bigserial NOT NULL, "data_id" varchar(255) NOT NULL, "group_id" varchar(128) NOT NULL, "content" text NOT NULL, "md5" varchar(32), "src_user" text, "src_ip" varchar(100) NOT NULL, "gmt_create" timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP, "gmt_modified" timestamp(6) NOT NULL, "app_name" varchar(128) DEFAULT NULL::character varying, "tenant_id" varchar(128) DEFAULT ''::character varying, "gray_name" varchar(128) NOT NULL, "gray_rule" text NOT NULL, "encrypted_data_key" varchar(256) NOT NULL ); COMMENT ON COLUMN "config_info_gray"."id" IS 'èªå¢ID'; COMMENT ON COLUMN "config_info_gray"."data_id" IS 'data_id'; COMMENT ON COLUMN "config_info_gray"."group_id" IS 'group_id'; COMMENT ON COLUMN "config_info_gray"."content" IS 'content'; COMMENT ON COLUMN "config_info_gray"."md5" IS 'md5'; COMMENT ON COLUMN "config_info_gray"."src_user" IS 'src_user'; COMMENT ON COLUMN "config_info_gray"."src_ip" IS 'src_ip'; COMMENT ON COLUMN "config_info_gray"."gmt_create" IS 'gmt_create'; COMMENT ON COLUMN "config_info_gray"."gmt_modified" IS 'gmt_modified'; COMMENT ON COLUMN "config_info_gray"."app_name" IS 'app_name'; COMMENT ON COLUMN "config_info_gray"."tenant_id" IS 'tenant_id'; COMMENT ON COLUMN "config_info_gray"."gray_name" IS 'gray_name'; COMMENT ON COLUMN "config_info_gray"."gray_rule" IS 'gray_rule'; COMMENT ON COLUMN "config_info_gray"."encrypted_data_key" IS 'encrypted_data_key'; -- ---------------------------- -- Primary Key structure for table config_info_gray -- ---------------------------- ALTER TABLE "config_info_gray" ADD CONSTRAINT "config_info_gray_pkey" PRIMARY KEY ("id"); -- ---------------------------- -- Table structure for config_info -- ---------------------------- DROP TABLE IF EXISTS "config_info"; CREATE TABLE "config_info" ( "id" bigserial NOT NULL, "data_id" varchar(255) NOT NULL, "group_id" varchar(255) , "content" text NOT NULL, "md5" varchar(32) , "gmt_create" timestamp(6) NOT NULL, "gmt_modified" timestamp(6) NOT NULL, "src_user" text , "src_ip" varchar(20) , "app_name" varchar(128) , "tenant_id" varchar(128) , "c_desc" varchar(256) , "c_use" varchar(64) , "effect" varchar(64) , "type" varchar(64) , "c_schema" text , "encrypted_data_key" text NOT NULL ) ; COMMENT ON COLUMN "config_info"."id" IS 'id'; COMMENT ON COLUMN "config_info"."data_id" IS 'data_id'; COMMENT ON COLUMN "config_info"."content" IS 'content'; COMMENT ON COLUMN "config_info"."md5" IS 'md5'; COMMENT ON COLUMN "config_info"."gmt_create" IS 'å建æ¶é´'; COMMENT ON COLUMN "config_info"."gmt_modified" IS 'ä¿®æ¹æ¶é´'; COMMENT ON COLUMN "config_info"."src_user" IS 'source user'; COMMENT ON COLUMN "config_info"."src_ip" IS 'source ip'; COMMENT ON COLUMN "config_info"."tenant_id" IS 'ç§æ·å段'; COMMENT ON COLUMN "config_info"."encrypted_data_key" IS 'ç§é¥'; COMMENT ON TABLE "config_info" IS 'config_info'; -- ---------------------------- -- Table structure for config_info_aggr -- ---------------------------- DROP TABLE IF EXISTS "config_info_aggr"; CREATE TABLE "config_info_aggr" ( "id" bigserial NOT NULL, "data_id" varchar(255) NOT NULL, "group_id" varchar(255) NOT NULL, "datum_id" varchar(255) NOT NULL, "content" text NOT NULL, "gmt_modified" timestamp(6) NOT NULL, "app_name" varchar(128) , "tenant_id" varchar(128) ) ; COMMENT ON COLUMN "config_info_aggr"."id" IS 'id'; COMMENT ON COLUMN "config_info_aggr"."data_id" IS 'data_id'; COMMENT ON COLUMN "config_info_aggr"."group_id" IS 'group_id'; COMMENT ON COLUMN "config_info_aggr"."datum_id" IS 'datum_id'; COMMENT ON COLUMN "config_info_aggr"."content" IS 'å 容'; COMMENT ON COLUMN "config_info_aggr"."gmt_modified" IS 'ä¿®æ¹æ¶é´'; COMMENT ON COLUMN "config_info_aggr"."tenant_id" IS 'ç§æ·å段'; COMMENT ON TABLE "config_info_aggr" IS 'å¢å ç§æ·å段'; -- ---------------------------- -- Records of config_info_aggr -- ---------------------------- BEGIN; COMMIT; -- ---------------------------- -- Table structure for config_info_beta -- ---------------------------- DROP TABLE IF EXISTS "config_info_beta"; CREATE TABLE "config_info_beta" ( "id" bigserial NOT NULL, "data_id" varchar(255) NOT NULL, "group_id" varchar(128) NOT NULL, "app_name" varchar(128) , "content" text NOT NULL, "beta_ips" varchar(1024) , "md5" varchar(32) , "gmt_create" timestamp(6) NOT NULL, "gmt_modified" timestamp(6) NOT NULL, "src_user" text , "src_ip" varchar(20) , "tenant_id" varchar(128) , "encrypted_data_key" text NOT NULL ) ; COMMENT ON COLUMN "config_info_beta"."id" IS 'id'; COMMENT ON COLUMN "config_info_beta"."data_id" IS 'data_id'; COMMENT ON COLUMN "config_info_beta"."group_id" IS 'group_id'; COMMENT ON COLUMN "config_info_beta"."app_name" IS 'app_name'; COMMENT ON COLUMN "config_info_beta"."content" IS 'content'; COMMENT ON COLUMN "config_info_beta"."beta_ips" IS 'betaIps'; COMMENT ON COLUMN "config_info_beta"."md5" IS 'md5'; COMMENT ON COLUMN "config_info_beta"."gmt_create" IS 'å建æ¶é´'; COMMENT ON COLUMN "config_info_beta"."gmt_modified" IS 'ä¿®æ¹æ¶é´'; COMMENT ON COLUMN "config_info_beta"."src_user" IS 'source user'; COMMENT ON COLUMN "config_info_beta"."src_ip" IS 'source ip'; COMMENT ON COLUMN "config_info_beta"."tenant_id" IS 'ç§æ·å段'; COMMENT ON COLUMN "config_info_beta"."encrypted_data_key" IS 'ç§é¥'; COMMENT ON TABLE "config_info_beta" IS 'config_info_beta'; -- ---------------------------- -- Records of config_info_beta -- ---------------------------- BEGIN; COMMIT; -- ---------------------------- -- Table structure for config_info_tag -- ---------------------------- DROP TABLE IF EXISTS "config_info_tag"; CREATE TABLE "config_info_tag" ( "id" bigserial NOT NULL, "data_id" varchar(255) NOT NULL, "group_id" varchar(128) NOT NULL, "tenant_id" varchar(128) , "tag_id" varchar(128) NOT NULL, "app_name" varchar(128) , "content" text NOT NULL, "md5" varchar(32) , "gmt_create" timestamp(6) NOT NULL, "gmt_modified" timestamp(6) NOT NULL, "src_user" text , "src_ip" varchar(20) ) ; COMMENT ON COLUMN "config_info_tag"."id" IS 'id'; COMMENT ON COLUMN "config_info_tag"."data_id" IS 'data_id'; COMMENT ON COLUMN "config_info_tag"."group_id" IS 'group_id'; COMMENT ON COLUMN "config_info_tag"."tenant_id" IS 'tenant_id'; COMMENT ON COLUMN "config_info_tag"."tag_id" IS 'tag_id'; COMMENT ON COLUMN "config_info_tag"."app_name" IS 'app_name'; COMMENT ON COLUMN "config_info_tag"."content" IS 'content'; COMMENT ON COLUMN "config_info_tag"."md5" IS 'md5'; COMMENT ON COLUMN "config_info_tag"."gmt_create" IS 'å建æ¶é´'; COMMENT ON COLUMN "config_info_tag"."gmt_modified" IS 'ä¿®æ¹æ¶é´'; COMMENT ON COLUMN "config_info_tag"."src_user" IS 'source user'; COMMENT ON COLUMN "config_info_tag"."src_ip" IS 'source ip'; COMMENT ON TABLE "config_info_tag" IS 'config_info_tag'; -- ---------------------------- -- Records of config_info_tag -- ---------------------------- BEGIN; COMMIT; -- ---------------------------- -- Table structure for config_tags_relation -- ---------------------------- DROP TABLE IF EXISTS "config_tags_relation"; CREATE TABLE "config_tags_relation" ( "id" bigserial NOT NULL, "tag_name" varchar(128) NOT NULL, "tag_type" varchar(64) , "data_id" varchar(255) NOT NULL, "group_id" varchar(128) NOT NULL, "tenant_id" varchar(128) , "nid" bigserial NOT NULL ) ; COMMENT ON COLUMN "config_tags_relation"."id" IS 'id'; COMMENT ON COLUMN "config_tags_relation"."tag_name" IS 'tag_name'; COMMENT ON COLUMN "config_tags_relation"."tag_type" IS 'tag_type'; COMMENT ON COLUMN "config_tags_relation"."data_id" IS 'data_id'; COMMENT ON COLUMN "config_tags_relation"."group_id" IS 'group_id'; COMMENT ON COLUMN "config_tags_relation"."tenant_id" IS 'tenant_id'; COMMENT ON TABLE "config_tags_relation" IS 'config_tag_relation'; -- ---------------------------- -- Records of config_tags_relation -- ---------------------------- BEGIN; COMMIT; -- ---------------------------- -- Table structure for group_capacity -- ---------------------------- DROP TABLE IF EXISTS "group_capacity"; CREATE TABLE "group_capacity" ( "id" bigserial NOT NULL, "group_id" varchar(128) NOT NULL, "quota" int4 NOT NULL, "usage" int4 NOT NULL, "max_size" int4 NOT NULL, "max_aggr_count" int4 NOT NULL, "max_aggr_size" int4 NOT NULL, "max_history_count" int4 NOT NULL, "gmt_create" timestamp(6) NOT NULL, "gmt_modified" timestamp(6) NOT NULL ) ; COMMENT ON COLUMN "group_capacity"."id" IS '主é®ID'; COMMENT ON COLUMN "group_capacity"."group_id" IS 'Group IDï¼ç©ºå符表示æ´ä¸ªé群'; COMMENT ON COLUMN "group_capacity"."quota" IS 'é é¢ï¼0表示使ç¨é»è®¤å¼'; COMMENT ON COLUMN "group_capacity"."usage" IS '使ç¨é'; COMMENT ON COLUMN "group_capacity"."max_size" IS 'å个é 置大å°ä¸éï¼åä½ä¸ºåèï¼0表示使ç¨é»è®¤å¼'; COMMENT ON COLUMN "group_capacity"."max_aggr_count" IS 'èååé ç½®æå¤§ä¸ªæ°ï¼ï¼0表示使ç¨é»è®¤å¼'; COMMENT ON COLUMN "group_capacity"."max_aggr_size" IS 'å个èåæ°æ®çåé 置大å°ä¸éï¼åä½ä¸ºåèï¼0表示使ç¨é»è®¤å¼'; COMMENT ON COLUMN "group_capacity"."max_history_count" IS 'æå¤§åæ´å岿°é'; COMMENT ON COLUMN "group_capacity"."gmt_create" IS 'å建æ¶é´'; COMMENT ON COLUMN "group_capacity"."gmt_modified" IS 'ä¿®æ¹æ¶é´'; COMMENT ON TABLE "group_capacity" IS 'é群ãåGroup容éä¿¡æ¯è¡¨'; -- ---------------------------- -- Records of group_capacity -- ---------------------------- BEGIN; COMMIT; -- ---------------------------- -- Table structure for his_config_info -- ---------------------------- DROP TABLE IF EXISTS "his_config_info"; CREATE TABLE "his_config_info" ( "id" int8 NOT NULL, "nid" bigserial NOT NULL, "data_id" varchar(255) NOT NULL, "group_id" varchar(128) NOT NULL, "app_name" varchar(128) , "content" text NOT NULL, "md5" varchar(32) , "gmt_create" timestamp(6) NOT NULL DEFAULT '2010-05-05 00:00:00', "gmt_modified" timestamp(6) NOT NULL, "src_user" text , "src_ip" varchar(20) , "op_type" char(10) , "tenant_id" varchar(128) , "encrypted_data_key" text NOT NULL, "publish_type" varchar(50) DEFAULT 'formal', "gray_name" varchar(50), "ext_info" text ) ; COMMENT ON COLUMN "his_config_info"."app_name" IS 'app_name'; COMMENT ON COLUMN "his_config_info"."tenant_id" IS 'ç§æ·å段'; COMMENT ON COLUMN "his_config_info"."encrypted_data_key" IS 'ç§é¥'; COMMENT ON TABLE "his_config_info" IS 'å¤ç§æ·æ¹é '; COMMENT ON COLUMN "his_config_info"."publish_type" IS 'publish type gray or formal'; COMMENT ON COLUMN "his_config_info"."gray_name" IS 'gray name'; COMMENT ON COLUMN "his_config_info"."ext_info" IS 'ext_info'; -- ---------------------------- -- Table structure for permissions -- ---------------------------- DROP TABLE IF EXISTS "permissions"; CREATE TABLE "permissions" ( "role" varchar(50) NOT NULL, "resource" varchar(512) NOT NULL, "action" varchar(8) NOT NULL ) ; -- ---------------------------- -- Records of permissions -- ---------------------------- BEGIN; COMMIT; -- ---------------------------- -- Table structure for roles -- ---------------------------- DROP TABLE IF EXISTS "roles"; CREATE TABLE "roles" ( "username" varchar(50) NOT NULL, "role" varchar(50) NOT NULL ) ; -- ---------------------------- -- Records of roles -- ---------------------------- BEGIN; INSERT INTO "roles" VALUES ('nacos', 'ROLE_ADMIN'); COMMIT; -- ---------------------------- -- Table structure for tenant_capacity -- ---------------------------- DROP TABLE IF EXISTS "tenant_capacity"; CREATE TABLE "tenant_capacity" ( "id" bigserial NOT NULL, "tenant_id" varchar(128) NOT NULL, "quota" int4 NOT NULL, "usage" int4 NOT NULL, "max_size" int4 NOT NULL, "max_aggr_count" int4 NOT NULL, "max_aggr_size" int4 NOT NULL, "max_history_count" int4 NOT NULL, "gmt_create" timestamp(6) NOT NULL, "gmt_modified" timestamp(6) NOT NULL ) ; COMMENT ON COLUMN "tenant_capacity"."id" IS '主é®ID'; COMMENT ON COLUMN "tenant_capacity"."tenant_id" IS 'Tenant ID'; COMMENT ON COLUMN "tenant_capacity"."quota" IS 'é é¢ï¼0表示使ç¨é»è®¤å¼'; COMMENT ON COLUMN "tenant_capacity"."usage" IS '使ç¨é'; COMMENT ON COLUMN "tenant_capacity"."max_size" IS 'å个é 置大å°ä¸éï¼åä½ä¸ºåèï¼0表示使ç¨é»è®¤å¼'; COMMENT ON COLUMN "tenant_capacity"."max_aggr_count" IS 'èååé ç½®æå¤§ä¸ªæ°'; COMMENT ON COLUMN "tenant_capacity"."max_aggr_size" IS 'å个èåæ°æ®çåé 置大å°ä¸éï¼åä½ä¸ºåèï¼0表示使ç¨é»è®¤å¼'; COMMENT ON COLUMN "tenant_capacity"."max_history_count" IS 'æå¤§åæ´å岿°é'; COMMENT ON COLUMN "tenant_capacity"."gmt_create" IS 'å建æ¶é´'; COMMENT ON COLUMN "tenant_capacity"."gmt_modified" IS 'ä¿®æ¹æ¶é´'; COMMENT ON TABLE "tenant_capacity" IS 'ç§æ·å®¹éä¿¡æ¯è¡¨'; -- ---------------------------- -- Records of tenant_capacity -- ---------------------------- BEGIN; COMMIT; -- ---------------------------- -- Table structure for tenant_info -- ---------------------------- DROP TABLE IF EXISTS "tenant_info"; CREATE TABLE "tenant_info" ( "id" bigserial NOT NULL, "kp" varchar(128) NOT NULL, "tenant_id" varchar(128) , "tenant_name" varchar(128) , "tenant_desc" varchar(256) , "create_source" varchar(32) , "gmt_create" int8 NOT NULL, "gmt_modified" int8 NOT NULL ) ; COMMENT ON COLUMN "tenant_info"."id" IS 'id'; COMMENT ON COLUMN "tenant_info"."kp" IS 'kp'; COMMENT ON COLUMN "tenant_info"."tenant_id" IS 'tenant_id'; COMMENT ON COLUMN "tenant_info"."tenant_name" IS 'tenant_name'; COMMENT ON COLUMN "tenant_info"."tenant_desc" IS 'tenant_desc'; COMMENT ON COLUMN "tenant_info"."create_source" IS 'create_source'; COMMENT ON COLUMN "tenant_info"."gmt_create" IS 'å建æ¶é´'; COMMENT ON COLUMN "tenant_info"."gmt_modified" IS 'ä¿®æ¹æ¶é´'; COMMENT ON TABLE "tenant_info" IS 'tenant_info'; -- ---------------------------- -- Records of tenant_info -- ---------------------------- BEGIN; COMMIT; -- ---------------------------- -- Table structure for users -- ---------------------------- DROP TABLE IF EXISTS "users"; CREATE TABLE "users" ( "username" varchar(50) NOT NULL, "password" varchar(500) NOT NULL, "enabled" boolean NOT NULL ) ; -- ---------------------------- -- Records of users > nAcos_!9#_admIn -- ---------------------------- BEGIN; -- INSERT INTO "users" VALUES ('nacos', '$2a$10$EuWPZHzz32dJN7jexM34MOeYirDdFAZm2kuWj7VEOJhhZkDrxfvUu', TRUE); INSERT INTO users (username, password, enabled) VALUES ('nacos', '$2a$10$LnJDlPeP.XbQnr6iabc65OI6kM4LLS/BRLwPHEtRSERErvnmlKkBu', TRUE); COMMIT; -- ---------------------------- -- Indexes structure for table config_info -- ---------------------------- CREATE UNIQUE INDEX "uk_configinfo_datagrouptenant" ON "config_info" ("data_id","group_id","tenant_id"); -- ---------------------------- -- Primary Key structure for table config_info -- ---------------------------- ALTER TABLE "config_info" ADD CONSTRAINT "config_info_pkey" PRIMARY KEY ("id"); -- ---------------------------- -- Indexes structure for table config_info_aggr -- ---------------------------- CREATE UNIQUE INDEX "uk_configinfoaggr_datagrouptenantdatum" ON "config_info_aggr" USING btree ("data_id","group_id","tenant_id","datum_id"); -- ---------------------------- -- Primary Key structure for table config_info_aggr -- ---------------------------- ALTER TABLE "config_info_aggr" ADD CONSTRAINT "config_info_aggr_pkey" PRIMARY KEY ("id"); -- ---------------------------- -- Indexes structure for table config_info_beta -- ---------------------------- CREATE UNIQUE INDEX "uk_configinfobeta_datagrouptenant" ON "config_info_beta" USING btree ("data_id","group_id","tenant_id"); -- ---------------------------- -- Primary Key structure for table config_info_beta -- ---------------------------- ALTER TABLE "config_info_beta" ADD CONSTRAINT "config_info_beta_pkey" PRIMARY KEY ("id"); -- ---------------------------- -- Indexes structure for table config_info_tag -- ---------------------------- CREATE UNIQUE INDEX "uk_configinfotag_datagrouptenanttag" ON "config_info_tag" USING btree ("data_id","group_id","tenant_id","tag_id"); -- ---------------------------- -- Primary Key structure for table config_info_tag -- ---------------------------- ALTER TABLE "config_info_tag" ADD CONSTRAINT "config_info_tag_pkey" PRIMARY KEY ("id"); -- ---------------------------- -- Indexes structure for table config_tags_relation -- ---------------------------- CREATE INDEX "idx_tenant_id" ON "config_tags_relation" USING btree ( "tenant_id" ); CREATE UNIQUE INDEX "uk_configtagrelation_configidtag" ON "config_tags_relation" USING btree ( "id", "tag_name", "tag_type" ); -- ---------------------------- -- Primary Key structure for table config_tags_relation -- ---------------------------- ALTER TABLE "config_tags_relation" ADD CONSTRAINT "config_tags_relation_pkey" PRIMARY KEY ("nid"); -- ---------------------------- -- Indexes structure for table group_capacity -- ---------------------------- CREATE UNIQUE INDEX "uk_group_id" ON "group_capacity" USING btree ( "group_id" ); -- ---------------------------- -- Primary Key structure for table group_capacity -- ---------------------------- ALTER TABLE "group_capacity" ADD CONSTRAINT "group_capacity_pkey" PRIMARY KEY ("id"); -- ---------------------------- -- Indexes structure for table his_config_info -- ---------------------------- CREATE INDEX "idx_did" ON "his_config_info" USING btree ( "data_id" ); CREATE INDEX "idx_gmt_create" ON "his_config_info" USING btree ( "gmt_create" ); CREATE INDEX "idx_gmt_modified" ON "his_config_info" USING btree ( "gmt_modified" ); -- ---------------------------- -- Primary Key structure for table his_config_info -- ---------------------------- ALTER TABLE "his_config_info" ADD CONSTRAINT "his_config_info_pkey" PRIMARY KEY ("nid"); -- ---------------------------- -- Indexes structure for table permissions -- ---------------------------- CREATE UNIQUE INDEX "uk_role_permission" ON "permissions" USING btree ( "role", "resource", "action" ); -- ---------------------------- -- Indexes structure for table roles -- ---------------------------- CREATE UNIQUE INDEX "uk_username_role" ON "roles" USING btree ( "username", "role" ); -- ---------------------------- -- Indexes structure for table tenant_capacity -- ---------------------------- CREATE UNIQUE INDEX "uk_tenant_id" ON "tenant_capacity" USING btree ( "tenant_id" ); -- ---------------------------- -- Primary Key structure for table tenant_capacity -- ---------------------------- ALTER TABLE "tenant_capacity" ADD CONSTRAINT "tenant_capacity_pkey" PRIMARY KEY ("id"); -- ---------------------------- -- Indexes structure for table tenant_info -- ---------------------------- CREATE UNIQUE INDEX "uk_tenant_info_kptenantid" ON "tenant_info" USING btree ( "kp", "tenant_id" ); docker/readme.md
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,103 @@ 1. è¿è¡ ```bash docker images | sort -k1 # æåº docker images | grep -E 'kafka|zook' # è¿æ»¤ docker run -itd -p 5432:5432 postgis/postgis psql --version # psql (PostgreSQL) 17.5 (Debian 17.5-1.pgdg110+1) docker tag postgis/postgis:latest postgis/postgis:17.5 docker run -itd --name nginx nginx docker exec -it nginx bash nginx -v # nginx version: nginx/1.29.0 docker pull nginx:1.29 docker pull bitnami/kafka:3.9 docker pull zookeeper:3.9 # æå3.9ç³»åçææ°çæ¬ docker pull redis:7 docker run -itd --name redis redis:7 redis-server --version # Redis server v=7.4.5 docker run -itd --name mysql -e MYSQL_RANDOM_ROOT_PASSWORD=123456 mysql:5.7 docker exec -it mysql bash mysql --version # mysql Ver 14.14 Distrib 5.7.44 docker run -itd --name mysql -e MYSQL_RANDOM_ROOT_PASSWORD=123456 mysql:8 docker exec -it mysql bash mysql --version # mysql Ver 8.4.5 for Linux on x86_64 docker pull mongo:8 docker run -itd --name mongo -p 27017:27017 -e MONGO_INITDB_ROOT_USERNAME=admin -e MONGO_INITDB_ROOT_PASSWORD=123456 mongo:8 docker exec -it mongodb mongosh db.version() # 8.0.11 use admin db.auth("admin", "123456") show docker run -itd --name openjdk openjdk:8-jre docker exec -it openjdk bash java -version # openjdk version "1.8.0_342" # TmFjb3NfUmFuZG9tX1N0cmluZ18zMl9DaGFyc19Pcl9Mb25nZXI= echo -n "Nacos_Random_String_32_Chars_Or_Longer" | base64 docker run -d --name nacos -p 8848:8848 -e MODE=standalone nacos/nacos-server docker run -d -p 8848:8848 -e MODE=standalone -e NACOS_AUTH_ENABLE=true -e NACOS_AUTH_TOKEN=TmFjb3NfUmFuZG9tX1N0cmluZ18zMl9DaGFyc19Pcl9Mb25nZXI= -e NACOS_AUTH_IDENTITY_KEY=Authorization -e NACOS_AUTH_IDENTITY_VALUE=token nacos/nacos-server:2.5.1 ``` 2.éå ``` bash postgis/postgis:17.5 nginx:1.29 nacos/nacos-server:2.5.1 bitnami/kafka:3.9 zookeeper:3.9 redis:7 openjdk:8-jre mysql:5.7 mongo:8 openjdk8-422-gdal:3.5.2 # JVMåæ° # image: openjdk8-422/gdal:3.5.2 - JAVA_OPTS=-Xms512m -Xmx1024m -XX:+UseG1GC healthcheck: test: ["CMD-SHELL", "curl -f http://localhost:8848/nacos || exit 1"] # nacosæä»¶ä¸è½½ https://github.com/wuchubuzai2018/nacos-datasource-extend-plugins https://blog.csdn.net/lilinhai548/article/details/149198957 # Linuxæ¥ç端å£å ç¨ netstat -tulnp | grep 8080 lsof -i :8080 kill -9 26634 # git忢忝 git checkout 3.0.1 # OffsetExplorerè¿æ¥kafkaï¼å¸¦å¯ç ï¼ Security >Type > SASL Plaintext Advanced: > SASL Mechanism > PLAIN JAAS: > Config: org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="kaFka_12#$56"; >> Update >> Connect # åçè§£éå¨ï¼æ²¡æé£ä¸ªæä»¶æç®å½ sed -i 's/\r$//' start.sh # ç»è®¡æä»¶å¤¹å¤§å° du -sh postgis/pgdata ``` docker/redis/redis.conf
¶Ô±ÈÐÂÎļþ @@ -0,0 +1 @@ requirepass Redis_s!E_6.2.6 docker/spring.yml
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,59 @@ version: "3.8" services: # zookeeper zookeeper: image: zookeeper:3.4 ports: - 2181:2181 - 2888:2888 - 3888:3888 volumes: - ./zookeeper_data:/data environment: ZOO_MY_ID: 1 ZOO_SERVERS: server.1=0.0.0.0:2888:3888 # ZOO_ENABLE_AUTH: yes # ZOO_SERVER_USERS: admin # ZOO_SERVER_PASSWORDS: kaFka_12#$56 healthcheck: test: ["CMD-SHELL", "zkServer.sh status"] interval: 5s timeout: 2s retries: 3 networks: - network-jhs # kafka kafka: image: bitnami/kafka:3.4 environment: KAFKA_BROKER_ID: 1 # ALLOW_PLAINTEXT_LISTENER: yes # KAFKA_CFG_LISTENERS: PLAINTEXT://:9092 # KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181 # KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://192.168.11.203:9092 KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181 KAFKA_CFG_LISTENERS: SASL_PLAINTEXT://:9092 KAFKA_CFG_ADVERTISED_LISTENERS: SASL_PLAINTEXT://192.168.11.203:9092 KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL KAFKA_CFG_INTER_BROKER_LISTENER_NAME: SASL_PLAINTEXT KAFKA_CFG_SASL_ENABLED_MECHANISMS: PLAIN KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN KAFKA_CLIENT_USERS: admin KAFKA_CLIENT_PASSWORDS: kaFka_12#$56 # KAFKA_ZOOKEEPER_USER: admin # KAFKA_ZOOKEEPER_PASSWORD: kaFka_12#$56 volumes: # chmod -R 777 kafka_data/ - ./kafka_data:/bitnami/kafka - /var/run/docker.sock:/var/run/docker.sock ports: - 9092:9092 depends_on: zookeeper: condition: service_healthy privileged: true networks: - network-jhs networks: network-jhs: driver: bridge docker/start.sh
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,11 @@ #!/bin/bash cd /data/jhs rm -rf zookeeper_data/* rm -rf kafka_data/* docker-compose down docker-compose up -d # sed -i 's/\r$//' start.sh exit 0 docker/system/bootstrap.yml
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,31 @@ # Tomcat server: port: 8002 # Spring spring: application: # åºç¨åç§° name: se-system profiles: # ç¯å¢é ç½® active: dev cloud: nacos: username: nacos password: nAcos_!9#_admIn discovery: # æå¡æ³¨åå°å server-addr: 127.0.0.1:8848 config: group: JHS_GROUP # é ç½®ä¸å¿å°å server-addr: 127.0.0.1:8848 # é ç½®æä»¶æ ¼å¼ file-extension: yml # å ±äº«é ç½® shared-configs: - data-id: application-${spring.profiles.active}.${spring.cloud.nacos.config.file-extension} group: JHS_GROUP # å è®¸å·æ° refresh-enabled: true