9 天以前 | 13693261870 | ![]() |
9 天以前 | 13693261870 | ![]() |
9 天以前 | 13693261870 | ![]() |
9 天以前 | 13693261870 | ![]() |
9 天以前 | 13693261870 | ![]() |
9 天以前 | 13693261870 | ![]() |
9 天以前 | 13693261870 | ![]() |
.gitignore
@@ -45,3 +45,7 @@ !*/build/*.html !*/build/*.xml /doc /docker/nginx/html/sso /docker/postgis/pgdata /docker/system/*.jar /docker/gateway/*.jar docker/bak.yml
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,121 @@ version: "3.8" services: # postgis postgis: image: postgis/postgis:17.5 ports: - 5432:5432 volumes: - ./postgis/pgdata:/var/lib/postgresql/data - ./postgis/init:/docker-entrypoint-initdb.d environment: POSTGRES_USER: postgres POSTGRES_PASSWORD: Postgres!_17_jHs ALLOW_IP_RANGE: 0.0.0.0/0 TZ: Asia/Shanghai POSTGRES_DB: jhs healthcheck: test: ["CMD-SHELL", "pg_isready -U postgres"] interval: 5s timeout: 2s retries: 3 privileged: true networks: - network-jhs # nacos nacos: image: nacos/nacos-server:v3.0.2 environment: TZ: Asia/Shanghai MODE: standalone SPRING_DATASOURCE_PLATFORM: postgresql DB_URL: jdbc:postgresql://postgis:5432/jhs?tcpKeepAlive=true&reWriteBatchedInserts=true&ApplicationName=nacos_java DB_USER: postgres DB_PASSWORD: Postgres!_17_jHs NACOS_AUTH_ENABLE: true NACOS_AUTH_IDENTITY_KEY: nacos NACOS_AUTH_IDENTITY_VALUE: nAcos_!9#_admIn DB_POOL_CONFIG_DRIVERCLASSNAME: org.postgresql.Driver NACOS_AUTH_TOKEN: TmFjb3NfUmFuZG9tX1N0cmluZ18zMl9DaGFyc19Pcl9Mb25nZXI= volumes: - ./nacos/logs/:/home/nacos/logs - ./nacos/plugins:/home/nacos/plugins #- ./nacos/conf/application.properties:/home/nacos/conf/application.properties ports: - 8080:8080 - 8848:8848 - 9848:9848 depends_on: postgis: condition: service_healthy privileged: true networks: - network-jhs restart: always # zookeeper zookeeper: image: zookeeper:3.4 ports: - 2181:2181 - 2888:2888 - 3888:3888 volumes: - ./zookeeper_data:/data environment: ZOO_MY_ID: 1 ZOO_SERVERS: server.1=0.0.0.0:2888:3888 # ZOO_ENABLE_AUTH: yes # ZOO_SERVER_USERS: admin # ZOO_SERVER_PASSWORDS: kaFka_12#$56 healthcheck: test: ["CMD-SHELL", "zkServer.sh status"] interval: 5s timeout: 2s retries: 3 networks: - network-jhs # kafka kafka: image: bitnami/kafka:3.4 environment: KAFKA_BROKER_ID: 1 # ALLOW_PLAINTEXT_LISTENER: yes # KAFKA_CFG_LISTENERS: PLAINTEXT://:9092 KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181 # KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://192.168.11.203:9092 # KAFKA_CFG_SASL_ENABLED_MECHANISMS: PLAIN # KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN # KAFKA_CFG_SECURITY_INTER_BROKER_PROTOCOL: SASL_PLAINTEXT # KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CLIENT:SASL_PLAINTEXT,EXTERNAL:SASL_PLAINTEXT # KAFKA_CFG_SASL_PLAINTEXT_PASSWORD_CONVERTER_CLASSNAME: org.apache.kafka.common.security.plain.PlainPasswordConverter # KAFKA_CFG_SUPER_USERS: User:admin;ClientId:admin;Group:admins;Default # KAFKA_CFG_SASL_JAAS_CONFIG: org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="kaFka_12#$56"; #ALLOW_PLAINTEXT_LISTENER: yes KAFKA_CFG_LISTENERS: SASL_PLAINTEXT://:9092 KAFKA_CFG_ADVERTISED_LISTENERS: SASL_PLAINTEXT://192.168.11.203:9092 KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL KAFKA_CFG_INTER_BROKER_LISTENER_NAME: SASL_PLAINTEXT KAFKA_CFG_SASL_ENABLED_MECHANISMS: PLAIN KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN KAFKA_CLIENT_USERS: admin KAFKA_CLIENT_PASSWORDS: kaFka_12#$56 # KAFKA_ZOOKEEPER_USER: admin # KAFKA_ZOOKEEPER_PASSWORD: kaFka_12#$56 volumes: # chmod -R 777 kafka_data/ - ./kafka_data:/bitnami/kafka - /var/run/docker.sock:/var/run/docker.sock ports: - 9092:9092 depends_on: zookeeper: condition: service_healthy privileged: true networks: - network-jhs networks: network-jhs: driver: bridge docker/docker-compose.yml
@@ -1,55 +1,134 @@ version: '3.8' version: "3.8" services: # zookeeper zookeeper: image: zookeeper:3.4.9 # postgis postgis: image: postgis/postgis:17.5 ports: - 2181:2181 # - 2888:2888 # - 3888:3888 - 5432:5432 volumes: - ./zookeeper_data:/data - ./postgis/pgdata:/var/lib/postgresql/data - ./postgis/init:/docker-entrypoint-initdb.d environment: ZOO_MY_ID: 1 ZOO_SERVERS: server.1=0.0.0.0:2888:3888 POSTGRES_USER: postgres POSTGRES_PASSWORD: Postgres!_17_jHs ALLOW_IP_RANGE: 0.0.0.0/0 TZ: Asia/Shanghai POSTGRES_DB: jhs healthcheck: test: ["CMD-SHELL", "pg_isready -U postgres"] interval: 10s timeout: 2s retries: 3 privileged: true networks: - kafka_net #kafka kafka: image: bitnami/kafka:3.4 depends_on: - zookeeper ports: - 9092:9092 - network-jhs # nacos nacos: image: nacos/nacos-server:v3.0.2 environment: KAFKA_BROKER_ID: 1 ALLOW_PLAINTEXT_LISTENER: "yes" KAFKA_CFG_LISTENERS: PLAINTEXT://:9092 KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181 KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://192.168.11.203:9092 #KKAFKA_LISTENERS: PLAINTEXT://:9092 #AFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.11.203:9092 #KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 #KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' TZ: Asia/Shanghai MODE: standalone SPRING_DATASOURCE_PLATFORM: postgresql DB_URL: jdbc:postgresql://postgis:5432/jhs?tcpKeepAlive=true&reWriteBatchedInserts=true&ApplicationName=nacos_java DB_USER: postgres DB_PASSWORD: Postgres!_17_jHs NACOS_AUTH_ENABLE: true NACOS_AUTH_IDENTITY_KEY: nacos NACOS_AUTH_IDENTITY_VALUE: nAcos_!9#_admIn DB_POOL_CONFIG_DRIVERCLASSNAME: org.postgresql.Driver NACOS_AUTH_TOKEN: TmFjb3NfUmFuZG9tX1N0cmluZ18zMl9DaGFyc19Pcl9Mb25nZXI= volumes: - ./kafka_data:/bitnami/kafka # å°å®¿ä¸»æºçDocker奿¥åæä»¶æè½½å°å®¹å¨å é¨ - /var/run/docker.sock:/var/run/docker.sock networks: - kafka_net # kafka-ui kafka-ui: image: provectuslabs/kafka-ui depends_on: - kafka - ./nacos/logs/:/home/nacos/logs - ./nacos/plugins:/home/nacos/plugins #- ./nacos/conf/application.properties:/home/nacos/conf/application.properties healthcheck: test: ["CMD-SHELL", "curl -f http://localhost:8080 || exit 1"] interval: 10s timeout: 2s retries: 3 start_period: 7s ports: - 8081:8080 environment: DYNAMIC_CONFIG_ENABLED: true #KAFKA_CLUSTERS_0_NAME: local #KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092 - 8080:8080 - 8848:8848 - 9848:9848 depends_on: postgis: condition: service_healthy privileged: true networks: - kafka_net - network-jhs #restart: always # redis redis: image: redis:7 ports: - 6379:6379 volumes: - ./redis/redis.conf:/data/redis.conf - ./redis/data:/data healthcheck: test: ["CMD", "redis-cli", "ping"] interval: 10s timeout: 2s retries: 3 #command: redis-server --requirepass Redis_s!E_6.2.6 command: redis-server /data/redis.conf networks: - network-jhs # gateway gateway: image: openjdk:8-jre volumes: - ./gateway/logs:/logs - ./gateway:/app environment: TZ: Asia/Shanghai #entrypoint: /bin/sh -c "sleep 30 && java -jar /app/se-gateway.jar --spring.config.location=file:/app/" entrypoint: java -jar /app/se-gateway.jar --spring.config.location=file:/app/ depends_on: redis: condition: service_healthy nacos: condition: service_healthy networks: - network-jhs # system system: image: openjdk8-422-gdal:3.5.2 volumes: - ./system/logs:/logs - ./system:/app environment: TZ: Asia/Shanghai #entrypoint: /bin/sh -c "sleep 30 && java -jar /app/se-system.jar --spring.config.location=file:/app/" entrypoint: java -jar /app/se-system.jar --spring.config.location=file:/app/ depends_on: postgis: condition: service_healthy redis: condition: service_healthy nacos: condition: service_healthy networks: - network-jhs # nginx nginx: image: nginx:1.29 ports: - 80:80 - 443:443 environment: TZ: Asia/Shanghai volumes: - ./nginx/logs:/var/log/nginx - ./nginx/html:/usr/share/nginx/html - ./nginx/conf/conf.d:/etc/nginx/conf.d - ./nginx/conf/nginx.conf:/etc/nginx/nginx.conf depends_on: - gateway - system networks: - network-jhs networks: kafka_net: network-jhs: driver: bridge docker/gateway/bootstrap.yml
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,33 @@ # Tomcat server: port: 8001 # Spring spring: application: # åºç¨åç§° name: se-gateway main: web-application-type: reactive profiles: # ç¯å¢é ç½® active: dev cloud: nacos: username: nacos password: nAcos_!9#_admIn discovery: # æå¡æ³¨åå°å server-addr: nacos:8848 config: group: JHS_GROUP # é ç½®ä¸å¿å°å server-addr: nacos:8848 # é ç½®æä»¶æ ¼å¼ file-extension: yml # å ±äº«é ç½® shared-configs: - data-id: application-${spring.profiles.active}.${spring.cloud.nacos.config.file-extension} group: JHS_GROUP # å è®¸å·æ° refresh-enabled: true docker/kafka.yml
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,59 @@ version: "3.8" services: # zookeeper zookeeper: image: zookeeper:3.4 ports: - 2181:2181 - 2888:2888 - 3888:3888 volumes: - ./zookeeper_data:/data environment: ZOO_MY_ID: 1 ZOO_SERVERS: server.1=0.0.0.0:2888:3888 # ZOO_ENABLE_AUTH: yes # ZOO_SERVER_USERS: admin # ZOO_SERVER_PASSWORDS: kaFka_12#$56 healthcheck: test: ["CMD-SHELL", "zkServer.sh status"] interval: 5s timeout: 2s retries: 3 networks: - network-jhs # kafka kafka: image: bitnami/kafka:3.4 environment: KAFKA_BROKER_ID: 1 # ALLOW_PLAINTEXT_LISTENER: yes # KAFKA_CFG_LISTENERS: PLAINTEXT://:9092 # KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181 # KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://192.168.11.203:9092 KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181 KAFKA_CFG_LISTENERS: SASL_PLAINTEXT://:9092 KAFKA_CFG_ADVERTISED_LISTENERS: SASL_PLAINTEXT://192.168.11.203:9092 KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL KAFKA_CFG_INTER_BROKER_LISTENER_NAME: SASL_PLAINTEXT KAFKA_CFG_SASL_ENABLED_MECHANISMS: PLAIN KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN KAFKA_CLIENT_USERS: admin KAFKA_CLIENT_PASSWORDS: kaFka_12#$56 # KAFKA_ZOOKEEPER_USER: admin # KAFKA_ZOOKEEPER_PASSWORD: kaFka_12#$56 volumes: # chmod -R 777 kafka_data/ - ./kafka_data:/bitnami/kafka - /var/run/docker.sock:/var/run/docker.sock ports: - 9092:9092 depends_on: zookeeper: condition: service_healthy privileged: true networks: - network-jhs networks: network-jhs: driver: bridge docker/nacos.yml
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,57 @@ version: "3.8" services: # postgis postgis: image: postgis/postgis:17.5 ports: - 5432:5432 volumes: - ./postgis/pgdata:/var/lib/postgresql/data - ./postgis/init:/docker-entrypoint-initdb.d environment: POSTGRES_USER: postgres POSTGRES_PASSWORD: Postgres!_17_jHs ALLOW_IP_RANGE: 0.0.0.0/0 TZ: Asia/Shanghai POSTGRES_DB: jhs healthcheck: test: ["CMD-SHELL", "pg_isready -U postgres"] interval: 5s timeout: 2s retries: 3 privileged: true networks: - network-jhs # nacos nacos: image: nacos/nacos-server:v3.0.2 environment: TZ: Asia/Shanghai MODE: standalone SPRING_DATASOURCE_PLATFORM: postgresql DB_URL: jdbc:postgresql://postgis:5432/jhs?tcpKeepAlive=true&reWriteBatchedInserts=true&ApplicationName=nacos_java DB_USER: postgres DB_PASSWORD: Postgres!_17_jHs NACOS_AUTH_ENABLE: true NACOS_AUTH_IDENTITY_KEY: nacos NACOS_AUTH_IDENTITY_VALUE: nAcos_!9#_admIn DB_POOL_CONFIG_DRIVERCLASSNAME: org.postgresql.Driver NACOS_AUTH_TOKEN: TmFjb3NfUmFuZG9tX1N0cmluZ18zMl9DaGFyc19Pcl9Mb25nZXI= volumes: - ./nacos/logs/:/home/nacos/logs - ./nacos/plugins:/home/nacos/plugins #- ./nacos/conf/application.properties:/home/nacos/conf/application.properties ports: - 8080:8080 - 8848:8848 - 9848:9848 depends_on: postgis: condition: service_healthy privileged: true networks: - network-jhs restart: always networks: network-jhs: driver: bridge docker/nacos/conf/application.properties
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,306 @@ # # Copyright 1999-2021 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #*************** Spring Boot Related Configurations ***************# ### Default web context path: server.servlet.contextPath=/nacos ### Include message field server.error.include-message=ALWAYS ### Default web server port: server.port=8848 #*************** Network Related Configurations ***************# ### If prefer hostname over ip for Nacos server addresses in cluster.conf: # nacos.inetutils.prefer-hostname-over-ip=false ### Specify local server's IP: # nacos.inetutils.ip-address= #*************** Config Module Related Configurations ***************# ### If use MySQL as datasource: ### Deprecated configuration property, it is recommended to use `spring.sql.init.platform` replaced. # spring.datasource.platform=mysql # spring.sql.init.platform=mysql ### Count of DB: # db.num=1 ### Connect URL of DB: # db.url.0=jdbc:mysql://127.0.0.1:3306/nacos?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC # db.user.0=nacos # db.password.0=nacos spring.datasource.platform=postgresql db.num=1 #db.url.0=jdbc:mysql://se-mysql:3306/se-config?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true db.url.0=jdbc:postgresql://postgis:5432/jhs?tcpKeepAlive=true&reWriteBatchedInserts=true&ApplicationName=nacos_java db.user=postgres db.password=Postgres!_17_jHs db.pool.config.driverClassName=org.postgresql.Driver #db.pool.config.connectionTestQuery=SELECT 1 ### Connection pool configuration: hikariCP db.pool.config.connectionTimeout=30000 db.pool.config.validationTimeout=10000 db.pool.config.maximumPoolSize=20 db.pool.config.minimumIdle=2 ### the maximum retry times for push nacos.config.push.maxRetryTime=50 #*************** Naming Module Related Configurations ***************# ### If enable data warmup. If set to false, the server would accept request without local data preparation: # nacos.naming.data.warmup=true ### If enable the instance auto expiration, kind like of health check of instance: # nacos.naming.expireInstance=true ### Add in 2.0.0 ### The interval to clean empty service, unit: milliseconds. # nacos.naming.clean.empty-service.interval=60000 ### The expired time to clean empty service, unit: milliseconds. # nacos.naming.clean.empty-service.expired-time=60000 ### The interval to clean expired metadata, unit: milliseconds. # nacos.naming.clean.expired-metadata.interval=5000 ### The expired time to clean metadata, unit: milliseconds. # nacos.naming.clean.expired-metadata.expired-time=60000 ### The delay time before push task to execute from service changed, unit: milliseconds. # nacos.naming.push.pushTaskDelay=500 ### The timeout for push task execute, unit: milliseconds. # nacos.naming.push.pushTaskTimeout=5000 ### The delay time for retrying failed push task, unit: milliseconds. # nacos.naming.push.pushTaskRetryDelay=1000 ### Since 2.0.3 ### The expired time for inactive client, unit: milliseconds. # nacos.naming.client.expired.time=180000 #*************** CMDB Module Related Configurations ***************# ### The interval to dump external CMDB in seconds: # nacos.cmdb.dumpTaskInterval=3600 ### The interval of polling data change event in seconds: # nacos.cmdb.eventTaskInterval=10 ### The interval of loading labels in seconds: # nacos.cmdb.labelTaskInterval=300 ### If turn on data loading task: # nacos.cmdb.loadDataAtStart=false #***********Metrics for tomcat **************************# server.tomcat.mbeanregistry.enabled=true #***********Expose prometheus and health **************************# #management.endpoints.web.exposure.include=prometheus,health ### Metrics for elastic search management.metrics.export.elastic.enabled=false #management.metrics.export.elastic.host=http://localhost:9200 ### Metrics for influx management.metrics.export.influx.enabled=false #management.metrics.export.influx.db=springboot #management.metrics.export.influx.uri=http://localhost:8086 #management.metrics.export.influx.auto-create-db=true #management.metrics.export.influx.consistency=one #management.metrics.export.influx.compressed=true #*************** Access Log Related Configurations ***************# ### If turn on the access log: server.tomcat.accesslog.enabled=true ### file name pattern, one file per hour server.tomcat.accesslog.rotate=true server.tomcat.accesslog.file-date-format=.yyyy-MM-dd-HH ### The access log pattern: server.tomcat.accesslog.pattern=%h %l %u %t "%r" %s %b %D %{User-Agent}i %{Request-Source}i ### The directory of access log: server.tomcat.basedir=file:. #*************** Access Control Related Configurations ***************# ### If enable spring security, this option is deprecated in 1.2.0: #spring.security.enabled=false ### The ignore urls of auth nacos.security.ignore.urls=/,/error,/**/*.css,/**/*.js,/**/*.html,/**/*.map,/**/*.svg,/**/*.png,/**/*.ico,/console-ui/public/**,/v1/auth/**,/v1/console/health/**,/actuator/**,/v1/console/server/** ### The auth system to use, currently only 'nacos' and 'ldap' is supported: nacos.core.auth.system.type=nacos ### If turn on auth system: nacos.core.auth.enabled=true ### Turn on/off caching of auth information. By turning on this switch, the update of auth information would have a 15 seconds delay. nacos.core.auth.caching.enabled=true ### Since 1.4.1, Turn on/off white auth for user-agent: nacos-server, only for upgrade from old version. nacos.core.auth.enable.userAgentAuthWhite=false ### Since 1.4.1, worked when nacos.core.auth.enabled=true and nacos.core.auth.enable.userAgentAuthWhite=false. ### The two properties is the white list for auth and used by identity the request from other server. nacos.core.auth.server.identity.key=admin nacos.core.auth.server.identity.value=nAcos_!9#_admIn ### worked when nacos.core.auth.system.type=nacos ### The token expiration in seconds: nacos.core.auth.plugin.nacos.token.cache.enable=false nacos.core.auth.plugin.nacos.token.expire.seconds=18000 ### The default token (Base64 String): https://base64.us/ nacos.core.auth.plugin.nacos.token.secret.key=TmFjb3NfUmFuZG9tX1N0cmluZ18zMl9DaGFyc19Pcl9Mb25nZXI= ### worked when nacos.core.auth.system.type=ldapï¼{0} is Placeholder,replace login username #nacos.core.auth.ldap.url=ldap://localhost:389 #nacos.core.auth.ldap.basedc=dc=example,dc=org #nacos.core.auth.ldap.userDn=cn=admin,${nacos.core.auth.ldap.basedc} #nacos.core.auth.ldap.password=admin #nacos.core.auth.ldap.userdn=cn={0},dc=example,dc=org #nacos.core.auth.ldap.filter.prefix=uid #nacos.core.auth.ldap.case.sensitive=true #nacos.core.auth.ldap.ignore.partial.result.exception=false #*************** Control Plugin Related Configurations ***************# # plugin type #nacos.plugin.control.manager.type=nacos # local control rule storage dir, default ${nacos.home}/data/connection and ${nacos.home}/data/tps #nacos.plugin.control.rule.local.basedir=${nacos.home} # external control rule storage type, if exist #nacos.plugin.control.rule.external.storage= #*************** Config Change Plugin Related Configurations ***************# # webhook #nacos.core.config.plugin.webhook.enabled=false # It is recommended to use EB https://help.aliyun.com/document_detail/413974.html #nacos.core.config.plugin.webhook.url=http://localhost:8080/webhook/send?token=*** # The content push max capacity ,byte #nacos.core.config.plugin.webhook.contentMaxCapacity=102400 # whitelist #nacos.core.config.plugin.whitelist.enabled=false # The import file suffixs #nacos.core.config.plugin.whitelist.suffixs=xml,text,properties,yaml,html # fileformatcheck,which validate the import file of type and content #nacos.core.config.plugin.fileformatcheck.enabled=false #*************** Istio Related Configurations ***************# ### If turn on the MCP server: nacos.istio.mcp.server.enabled=false #*************** Core Related Configurations ***************# ### set the WorkerID manually # nacos.core.snowflake.worker-id= ### Member-MetaData # nacos.core.member.meta.site= # nacos.core.member.meta.adweight= # nacos.core.member.meta.weight= ### MemberLookup ### Addressing pattern category, If set, the priority is highest # nacos.core.member.lookup.type=[file,address-server] ## Set the cluster list with a configuration file or command-line argument # nacos.member.list=192.168.16.101:8847?raft_port=8807,192.168.16.101?raft_port=8808,192.168.16.101:8849?raft_port=8809 ## for AddressServerMemberLookup # Maximum number of retries to query the address server upon initialization # nacos.core.address-server.retry=5 ## Server domain name address of [address-server] mode # address.server.domain=jmenv.tbsite.net ## Server port of [address-server] mode # address.server.port=8080 ## Request address of [address-server] mode # address.server.url=/nacos/serverlist #*************** JRaft Related Configurations ***************# ### Sets the Raft cluster election timeout, default value is 5 second # nacos.core.protocol.raft.data.election_timeout_ms=5000 ### Sets the amount of time the Raft snapshot will execute periodically, default is 30 minute # nacos.core.protocol.raft.data.snapshot_interval_secs=30 ### raft internal worker threads # nacos.core.protocol.raft.data.core_thread_num=8 ### Number of threads required for raft business request processing # nacos.core.protocol.raft.data.cli_service_thread_num=4 ### raft linear read strategy. Safe linear reads are used by default, that is, the Leader tenure is confirmed by heartbeat # nacos.core.protocol.raft.data.read_index_type=ReadOnlySafe ### rpc request timeout, default 5 seconds # nacos.core.protocol.raft.data.rpc_request_timeout_ms=5000 #*************** Distro Related Configurations ***************# ### Distro data sync delay time, when sync task delayed, task will be merged for same data key. Default 1 second. # nacos.core.protocol.distro.data.sync.delayMs=1000 ### Distro data sync timeout for one sync data, default 3 seconds. # nacos.core.protocol.distro.data.sync.timeoutMs=3000 ### Distro data sync retry delay time when sync data failed or timeout, same behavior with delayMs, default 3 seconds. # nacos.core.protocol.distro.data.sync.retryDelayMs=3000 ### Distro data verify interval time, verify synced data whether expired for a interval. Default 5 seconds. # nacos.core.protocol.distro.data.verify.intervalMs=5000 ### Distro data verify timeout for one verify, default 3 seconds. # nacos.core.protocol.distro.data.verify.timeoutMs=3000 ### Distro data load retry delay when load snapshot data failed, default 30 seconds. # nacos.core.protocol.distro.data.load.retryDelayMs=30000 ### enable to support prometheus service discovery #nacos.prometheus.metrics.enabled=true ### Since 2.3 #*************** Grpc Configurations ***************# ## sdk grpc(between nacos server and client) configuration ## Sets the maximum message size allowed to be received on the server. #nacos.remote.server.grpc.sdk.max-inbound-message-size=10485760 ## Sets the time(milliseconds) without read activity before sending a keepalive ping. The typical default is two hours. #nacos.remote.server.grpc.sdk.keep-alive-time=7200000 ## Sets a time(milliseconds) waiting for read activity after sending a keepalive ping. Defaults to 20 seconds. #nacos.remote.server.grpc.sdk.keep-alive-timeout=20000 ## Sets a time(milliseconds) that specify the most aggressive keep-alive time clients are permitted to configure. The typical default is 5 minutes #nacos.remote.server.grpc.sdk.permit-keep-alive-time=300000 ## cluster grpc(inside the nacos server) configuration #nacos.remote.server.grpc.cluster.max-inbound-message-size=10485760 ## Sets the time(milliseconds) without read activity before sending a keepalive ping. The typical default is two hours. #nacos.remote.server.grpc.cluster.keep-alive-time=7200000 ## Sets a time(milliseconds) waiting for read activity after sending a keepalive ping. Defaults to 20 seconds. #nacos.remote.server.grpc.cluster.keep-alive-timeout=20000 ## Sets a time(milliseconds) that specify the most aggressive keep-alive time clients are permitted to configure. The typical default is 5 minutes #nacos.remote.server.grpc.cluster.permit-keep-alive-time=300000 ## open nacos default console ui #nacos.console.ui.enabled=true docker/nacos/plugins/nacos-postgresql-datasource-plugin-ext-3.0.2.jarBinary files differ
docker/nginx/conf/conf.d/ssl/ssl2035.crt
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,24 @@ -----BEGIN CERTIFICATE----- MIID9zCCAt+gAwIBAgIUYsseD8Of/+aDbUUkcyu3cxoryggwDQYJKoZIhvcNAQEL BQAwgYoxCzAJBgNVBAYTAkNOMRAwDgYDVQQIDAdCZWlKaW5nMRAwDgYDVQQHDAdC ZWlKaW5nMRAwDgYDVQQKDAd0ZXJyYWl0MRAwDgYDVQQLDAd0ZXJyYWl0MRIwEAYD VQQDDAlsb2NhbGhvc3QxHzAdBgkqhkiG9w0BCQEWEDI1Mjc0MDQ1NEBxcS5jb20w HhcNMjUwNzA0MDQxNzI5WhcNMzUwNzAyMDQxNzI5WjCBijELMAkGA1UEBhMCQ04x EDAOBgNVBAgMB0JlaUppbmcxEDAOBgNVBAcMB0JlaUppbmcxEDAOBgNVBAoMB3Rl cnJhaXQxEDAOBgNVBAsMB3RlcnJhaXQxEjAQBgNVBAMMCWxvY2FsaG9zdDEfMB0G CSqGSIb3DQEJARYQMjUyNzQwNDU0QHFxLmNvbTCCASIwDQYJKoZIhvcNAQEBBQAD ggEPADCCAQoCggEBAIZic158SDVpEaG8/2OGUTas7avhxZshEljysTglFCsZ7G0O uqspsBxn73gwkUHPX69PYUMb2MKez2cYsg5rdlO7HVhINPUKCCIq1g2aIoc1FCII 1Ism3chWNpxFPwxsTDwNYTzX1EUI5+j+v5s7XGYuUg5ZXbxgDS680zab03gbUsfA SS8Z9AwiqVY2zx57nsfNObhuG91Y+hKrQT/7j3b28TuzuVRsxCdX85zs75Ouv3Rj a3VcAM031WFJyDqD/eGJ9qHXJsm/FljfTelMjUZzDO2elWC/TvFyUsdw0T+utReR xi8easFwS1iLGpBtsr+rc11Nyn7pAL/iU9ghQP8CAwEAAaNTMFEwHQYDVR0OBBYE FFbRdA0156Cucd84iOmQBmunBHXuMB8GA1UdIwQYMBaAFFbRdA0156Cucd84iOmQ BmunBHXuMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGOJEY2l Sqf7h3Vhuu/pRP3szEYCQ//NKPxhOiGynKjAsmBFMbDX6FO3k4201zHsAHqe70r5 y+UImQWdwptVhzFxrrCo8IY7ic58vcVJWqwDLP7QeJ+N9PxcgSA1aQmEjufSk8Hu QJozYwiumqiRHC+mvuScP9U+BlZ/GMixtIWFwfFokWvQ8QmVZTeIxJepHY0i6Mzl dXr5JRt8VrbrR4w8s4e6P9VQRV0P8spjixd0BXRHXJznLdlU9/ZWQYyTfwWMhk6y Bmth/Hrju2ikg1G9YnI69SnlLGXyE7OwD17hV4W+n/0O6xAz0yBkFQddt6JlBwCH fWvWidy4Wml4cHw= -----END CERTIFICATE----- docker/nginx/conf/conf.d/ssl/ssl2035.key
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,28 @@ -----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCGYnNefEg1aRGh vP9jhlE2rO2r4cWbIRJY8rE4JRQrGextDrqrKbAcZ+94MJFBz1+vT2FDG9jCns9n GLIOa3ZTux1YSDT1CggiKtYNmiKHNRQiCNSLJt3IVjacRT8MbEw8DWE819RFCOfo /r+bO1xmLlIOWV28YA0uvNM2m9N4G1LHwEkvGfQMIqlWNs8ee57HzTm4bhvdWPoS q0E/+4929vE7s7lUbMQnV/Oc7O+Trr90Y2t1XADNN9VhScg6g/3hifah1ybJvxZY 303pTI1GcwztnpVgv07xclLHcNE/rrUXkcYvHmrBcEtYixqQbbK/q3NdTcp+6QC/ 4lPYIUD/AgMBAAECggEAD/t+LHBBh5X4Z9uk0BRgbPfsyF1wn/4zQsA9EtKuGbYv 59ncROZksJbX5IZ6MBvWuHCLaRd9vx/IxkJ/TWG7JULYFEYk2OPENd68WHUgGCPY QiKkt0FRworRBzg2bbwk5kYnw2cJMttX9t5IBtFCMFDf/MmPcWDxuxZFVHWnP93B QIqWe44pB2BkHlO30bDVK9Brvn8tB2M7VMSiS2vLBCxfZ2fjMqLSjkpI3RxjQnYk SygDcKCfovFOPRKD/rFyyAfbEvwsY/8/F2ZsMPwWeeaEcaadjLf0DfSXeXOd711S vkkgpn2pj3I2v6S0QcQRAi5SaLVTRKat77tSW39eYQKBgQC5miE/eTFzRSYNC7Rk GTL9A4aDPp+ZlN+sR5cFD/dAW6YNTFzeakIUd7Oc9YgycY45fPCtR+uXElXAv+TP ec+pq4CiVOqXVig1S2a68fyLaO11QhJoovJPAUEBoahXEnLpUTBtgjlCadl0D29F 7f/72hqezl5bIc79NdT33xfjkQKBgQC5WyB83oP9sxzyKpyqtt+tQH8AK5taW9JS RWSEAWWXM4Ju/owvfmG885dcIwiLHcR39ENfZoJ1Mi0A7B+aJ3LuYLaUdjOJIPyb Lq6BzAaCIkmH2wF3ZYrZAnzVf3//uPeKtk08ne4DT+DSbebEsITmJtPeQeQgWhIZ 1onK8B9zjwKBgDiClzDuQ4InQaijMa9whLfIhQIc5Rcr3tZjjbW64Ls1rF9MKtKF Y807jPJzR56kOHuEcPsxdKewq29efdo16mZsk2PZmvus/d0MMiElYJFJx1L2ZQh6 5G/tn89RWyH3ugkT8TzGc4ynEdBmiqiuCEy0YXqMtunkZ1NtOoSl5m4hAoGBAKi+ JCpprMH3IN/6GRx1VdZ1A+mUyV6Ofz+0uthOKT1ogFMp21eVd8c7/8y6fBmiJO2L axZbzWKCJmRTkkWVqlUHqNApd6tcY3unGOlDY51vN3+9ymz2/VuonxsCcvXMX1dh tZj8seVEAAmyUcc4aBTavkD1vYgSV648GL+usQNTAoGATttDS4oM0TN3ngyKYUzU 2/uYUi5iUON2m/aVxptllzQtsOJxJnTdOsMWGgeL5bV11Wmi21wbm4atS3VWFZbd JeRTRk74HHT3VGf4IjmaLr6XZ/9VuaA9UJUxbvlKrxSG/P9MO5u6KMWouAsrInW2 uzAZobB6chszzYCAewPlawY= -----END PRIVATE KEY----- docker/nginx/conf/nginx.conf
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,103 @@ worker_processes 1; events { worker_connections 1024; } http { include mime.types; default_type application/octet-stream; sendfile on; keepalive_timeout 65; server { listen 80; server_name local80; location / { root /usr/share/nginx/html; try_files $uri $uri/ /index.html /sso/index.html; index index.html index.htm; } location /sso { alias /usr/share/nginx/html/sso; try_files $uri $uri/ /sso/; #index index.html index.htm; } location /api/ { proxy_set_header Host $http_host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header REMOTE-HOST $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_pass http://gateway:8001/; } location /system { proxy_set_header Host $http_host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header REMOTE-HOST $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_pass http://gateway:8001/system; } error_page 500 502 503 504 /50x.html; location = /50x.html { root html; } } server { listen 443 ssl; listen [::]:443 ssl; server_name local443; ssl_certificate /etc/nginx/conf.d/ssl/ssl2035.crt; ssl_certificate_key /etc/nginx/conf.d/ssl/ssl2035.key; ssl_session_cache shared:SSL:50m; ssl_session_timeout 7d; ssl_ciphers HIGH:!aNULL:!MD5; ssl_protocols TLSv1.2 TLSv1.3; ssl_prefer_server_ciphers on; location / { root /usr/share/nginx/html; try_files $uri $uri/ /index.html /sso/index.html; index index.html index.htm; } location /sso { alias /usr/share/nginx/html/sso; try_files $uri $uri/ /sso/; #index index.html index.htm; } location /api/ { proxy_set_header Host $http_host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header REMOTE-HOST $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_pass http://gateway:8001/; } location /system { proxy_set_header Host $http_host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header REMOTE-HOST $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_pass http://gateway:8001/system; } error_page 500 502 503 504 /50x.html; location = /50x.html { root html; } } } docker/postgis/init/init.sql
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,551 @@ /* * Copyright 1999-2018 Alibaba Group Holding Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ -- ---------------------------- -- create extension -- ---------------------------- create extension if not exists postgis cascade; create extension if not exists "uuid-ossp"; -- ---------------------------- -- Table structure for config_info_gray -- ---------------------------- DROP TABLE IF EXISTS "config_info_gray"; CREATE TABLE "config_info_gray" ( "id" bigserial NOT NULL, "data_id" varchar(255) NOT NULL, "group_id" varchar(128) NOT NULL, "content" text NOT NULL, "md5" varchar(32), "src_user" text, "src_ip" varchar(100) NOT NULL, "gmt_create" timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP, "gmt_modified" timestamp(6) NOT NULL, "app_name" varchar(128) DEFAULT NULL::character varying, "tenant_id" varchar(128) DEFAULT ''::character varying, "gray_name" varchar(128) NOT NULL, "gray_rule" text NOT NULL, "encrypted_data_key" varchar(256) NOT NULL ); COMMENT ON COLUMN "config_info_gray"."id" IS 'èªå¢ID'; COMMENT ON COLUMN "config_info_gray"."data_id" IS 'data_id'; COMMENT ON COLUMN "config_info_gray"."group_id" IS 'group_id'; COMMENT ON COLUMN "config_info_gray"."content" IS 'content'; COMMENT ON COLUMN "config_info_gray"."md5" IS 'md5'; COMMENT ON COLUMN "config_info_gray"."src_user" IS 'src_user'; COMMENT ON COLUMN "config_info_gray"."src_ip" IS 'src_ip'; COMMENT ON COLUMN "config_info_gray"."gmt_create" IS 'gmt_create'; COMMENT ON COLUMN "config_info_gray"."gmt_modified" IS 'gmt_modified'; COMMENT ON COLUMN "config_info_gray"."app_name" IS 'app_name'; COMMENT ON COLUMN "config_info_gray"."tenant_id" IS 'tenant_id'; COMMENT ON COLUMN "config_info_gray"."gray_name" IS 'gray_name'; COMMENT ON COLUMN "config_info_gray"."gray_rule" IS 'gray_rule'; COMMENT ON COLUMN "config_info_gray"."encrypted_data_key" IS 'encrypted_data_key'; -- ---------------------------- -- Primary Key structure for table config_info_gray -- ---------------------------- ALTER TABLE "config_info_gray" ADD CONSTRAINT "config_info_gray_pkey" PRIMARY KEY ("id"); -- ---------------------------- -- Table structure for config_info -- ---------------------------- DROP TABLE IF EXISTS "config_info"; CREATE TABLE "config_info" ( "id" bigserial NOT NULL, "data_id" varchar(255) NOT NULL, "group_id" varchar(255) , "content" text NOT NULL, "md5" varchar(32) , "gmt_create" timestamp(6) NOT NULL, "gmt_modified" timestamp(6) NOT NULL, "src_user" text , "src_ip" varchar(20) , "app_name" varchar(128) , "tenant_id" varchar(128) , "c_desc" varchar(256) , "c_use" varchar(64) , "effect" varchar(64) , "type" varchar(64) , "c_schema" text , "encrypted_data_key" text NOT NULL ) ; COMMENT ON COLUMN "config_info"."id" IS 'id'; COMMENT ON COLUMN "config_info"."data_id" IS 'data_id'; COMMENT ON COLUMN "config_info"."content" IS 'content'; COMMENT ON COLUMN "config_info"."md5" IS 'md5'; COMMENT ON COLUMN "config_info"."gmt_create" IS 'å建æ¶é´'; COMMENT ON COLUMN "config_info"."gmt_modified" IS 'ä¿®æ¹æ¶é´'; COMMENT ON COLUMN "config_info"."src_user" IS 'source user'; COMMENT ON COLUMN "config_info"."src_ip" IS 'source ip'; COMMENT ON COLUMN "config_info"."tenant_id" IS 'ç§æ·å段'; COMMENT ON COLUMN "config_info"."encrypted_data_key" IS 'ç§é¥'; COMMENT ON TABLE "config_info" IS 'config_info'; -- ---------------------------- -- Table structure for config_info_aggr -- ---------------------------- DROP TABLE IF EXISTS "config_info_aggr"; CREATE TABLE "config_info_aggr" ( "id" bigserial NOT NULL, "data_id" varchar(255) NOT NULL, "group_id" varchar(255) NOT NULL, "datum_id" varchar(255) NOT NULL, "content" text NOT NULL, "gmt_modified" timestamp(6) NOT NULL, "app_name" varchar(128) , "tenant_id" varchar(128) ) ; COMMENT ON COLUMN "config_info_aggr"."id" IS 'id'; COMMENT ON COLUMN "config_info_aggr"."data_id" IS 'data_id'; COMMENT ON COLUMN "config_info_aggr"."group_id" IS 'group_id'; COMMENT ON COLUMN "config_info_aggr"."datum_id" IS 'datum_id'; COMMENT ON COLUMN "config_info_aggr"."content" IS 'å 容'; COMMENT ON COLUMN "config_info_aggr"."gmt_modified" IS 'ä¿®æ¹æ¶é´'; COMMENT ON COLUMN "config_info_aggr"."tenant_id" IS 'ç§æ·å段'; COMMENT ON TABLE "config_info_aggr" IS 'å¢å ç§æ·å段'; -- ---------------------------- -- Records of config_info_aggr -- ---------------------------- BEGIN; COMMIT; -- ---------------------------- -- Table structure for config_info_beta -- ---------------------------- DROP TABLE IF EXISTS "config_info_beta"; CREATE TABLE "config_info_beta" ( "id" bigserial NOT NULL, "data_id" varchar(255) NOT NULL, "group_id" varchar(128) NOT NULL, "app_name" varchar(128) , "content" text NOT NULL, "beta_ips" varchar(1024) , "md5" varchar(32) , "gmt_create" timestamp(6) NOT NULL, "gmt_modified" timestamp(6) NOT NULL, "src_user" text , "src_ip" varchar(20) , "tenant_id" varchar(128) , "encrypted_data_key" text NOT NULL ) ; COMMENT ON COLUMN "config_info_beta"."id" IS 'id'; COMMENT ON COLUMN "config_info_beta"."data_id" IS 'data_id'; COMMENT ON COLUMN "config_info_beta"."group_id" IS 'group_id'; COMMENT ON COLUMN "config_info_beta"."app_name" IS 'app_name'; COMMENT ON COLUMN "config_info_beta"."content" IS 'content'; COMMENT ON COLUMN "config_info_beta"."beta_ips" IS 'betaIps'; COMMENT ON COLUMN "config_info_beta"."md5" IS 'md5'; COMMENT ON COLUMN "config_info_beta"."gmt_create" IS 'å建æ¶é´'; COMMENT ON COLUMN "config_info_beta"."gmt_modified" IS 'ä¿®æ¹æ¶é´'; COMMENT ON COLUMN "config_info_beta"."src_user" IS 'source user'; COMMENT ON COLUMN "config_info_beta"."src_ip" IS 'source ip'; COMMENT ON COLUMN "config_info_beta"."tenant_id" IS 'ç§æ·å段'; COMMENT ON COLUMN "config_info_beta"."encrypted_data_key" IS 'ç§é¥'; COMMENT ON TABLE "config_info_beta" IS 'config_info_beta'; -- ---------------------------- -- Records of config_info_beta -- ---------------------------- BEGIN; COMMIT; -- ---------------------------- -- Table structure for config_info_tag -- ---------------------------- DROP TABLE IF EXISTS "config_info_tag"; CREATE TABLE "config_info_tag" ( "id" bigserial NOT NULL, "data_id" varchar(255) NOT NULL, "group_id" varchar(128) NOT NULL, "tenant_id" varchar(128) , "tag_id" varchar(128) NOT NULL, "app_name" varchar(128) , "content" text NOT NULL, "md5" varchar(32) , "gmt_create" timestamp(6) NOT NULL, "gmt_modified" timestamp(6) NOT NULL, "src_user" text , "src_ip" varchar(20) ) ; COMMENT ON COLUMN "config_info_tag"."id" IS 'id'; COMMENT ON COLUMN "config_info_tag"."data_id" IS 'data_id'; COMMENT ON COLUMN "config_info_tag"."group_id" IS 'group_id'; COMMENT ON COLUMN "config_info_tag"."tenant_id" IS 'tenant_id'; COMMENT ON COLUMN "config_info_tag"."tag_id" IS 'tag_id'; COMMENT ON COLUMN "config_info_tag"."app_name" IS 'app_name'; COMMENT ON COLUMN "config_info_tag"."content" IS 'content'; COMMENT ON COLUMN "config_info_tag"."md5" IS 'md5'; COMMENT ON COLUMN "config_info_tag"."gmt_create" IS 'å建æ¶é´'; COMMENT ON COLUMN "config_info_tag"."gmt_modified" IS 'ä¿®æ¹æ¶é´'; COMMENT ON COLUMN "config_info_tag"."src_user" IS 'source user'; COMMENT ON COLUMN "config_info_tag"."src_ip" IS 'source ip'; COMMENT ON TABLE "config_info_tag" IS 'config_info_tag'; -- ---------------------------- -- Records of config_info_tag -- ---------------------------- BEGIN; COMMIT; -- ---------------------------- -- Table structure for config_tags_relation -- ---------------------------- DROP TABLE IF EXISTS "config_tags_relation"; CREATE TABLE "config_tags_relation" ( "id" bigserial NOT NULL, "tag_name" varchar(128) NOT NULL, "tag_type" varchar(64) , "data_id" varchar(255) NOT NULL, "group_id" varchar(128) NOT NULL, "tenant_id" varchar(128) , "nid" bigserial NOT NULL ) ; COMMENT ON COLUMN "config_tags_relation"."id" IS 'id'; COMMENT ON COLUMN "config_tags_relation"."tag_name" IS 'tag_name'; COMMENT ON COLUMN "config_tags_relation"."tag_type" IS 'tag_type'; COMMENT ON COLUMN "config_tags_relation"."data_id" IS 'data_id'; COMMENT ON COLUMN "config_tags_relation"."group_id" IS 'group_id'; COMMENT ON COLUMN "config_tags_relation"."tenant_id" IS 'tenant_id'; COMMENT ON TABLE "config_tags_relation" IS 'config_tag_relation'; -- ---------------------------- -- Records of config_tags_relation -- ---------------------------- BEGIN; COMMIT; -- ---------------------------- -- Table structure for group_capacity -- ---------------------------- DROP TABLE IF EXISTS "group_capacity"; CREATE TABLE "group_capacity" ( "id" bigserial NOT NULL, "group_id" varchar(128) NOT NULL, "quota" int4 NOT NULL, "usage" int4 NOT NULL, "max_size" int4 NOT NULL, "max_aggr_count" int4 NOT NULL, "max_aggr_size" int4 NOT NULL, "max_history_count" int4 NOT NULL, "gmt_create" timestamp(6) NOT NULL, "gmt_modified" timestamp(6) NOT NULL ) ; COMMENT ON COLUMN "group_capacity"."id" IS '主é®ID'; COMMENT ON COLUMN "group_capacity"."group_id" IS 'Group IDï¼ç©ºå符表示æ´ä¸ªé群'; COMMENT ON COLUMN "group_capacity"."quota" IS 'é é¢ï¼0表示使ç¨é»è®¤å¼'; COMMENT ON COLUMN "group_capacity"."usage" IS '使ç¨é'; COMMENT ON COLUMN "group_capacity"."max_size" IS 'å个é 置大å°ä¸éï¼åä½ä¸ºåèï¼0表示使ç¨é»è®¤å¼'; COMMENT ON COLUMN "group_capacity"."max_aggr_count" IS 'èååé ç½®æå¤§ä¸ªæ°ï¼ï¼0表示使ç¨é»è®¤å¼'; COMMENT ON COLUMN "group_capacity"."max_aggr_size" IS 'å个èåæ°æ®çåé 置大å°ä¸éï¼åä½ä¸ºåèï¼0表示使ç¨é»è®¤å¼'; COMMENT ON COLUMN "group_capacity"."max_history_count" IS 'æå¤§åæ´å岿°é'; COMMENT ON COLUMN "group_capacity"."gmt_create" IS 'å建æ¶é´'; COMMENT ON COLUMN "group_capacity"."gmt_modified" IS 'ä¿®æ¹æ¶é´'; COMMENT ON TABLE "group_capacity" IS 'é群ãåGroup容éä¿¡æ¯è¡¨'; -- ---------------------------- -- Records of group_capacity -- ---------------------------- BEGIN; COMMIT; -- ---------------------------- -- Table structure for his_config_info -- ---------------------------- DROP TABLE IF EXISTS "his_config_info"; CREATE TABLE "his_config_info" ( "id" int8 NOT NULL, "nid" bigserial NOT NULL, "data_id" varchar(255) NOT NULL, "group_id" varchar(128) NOT NULL, "app_name" varchar(128) , "content" text NOT NULL, "md5" varchar(32) , "gmt_create" timestamp(6) NOT NULL DEFAULT '2010-05-05 00:00:00', "gmt_modified" timestamp(6) NOT NULL, "src_user" text , "src_ip" varchar(20) , "op_type" char(10) , "tenant_id" varchar(128) , "encrypted_data_key" text NOT NULL, "publish_type" varchar(50) DEFAULT 'formal', "gray_name" varchar(50), "ext_info" text ) ; COMMENT ON COLUMN "his_config_info"."app_name" IS 'app_name'; COMMENT ON COLUMN "his_config_info"."tenant_id" IS 'ç§æ·å段'; COMMENT ON COLUMN "his_config_info"."encrypted_data_key" IS 'ç§é¥'; COMMENT ON TABLE "his_config_info" IS 'å¤ç§æ·æ¹é '; COMMENT ON COLUMN "his_config_info"."publish_type" IS 'publish type gray or formal'; COMMENT ON COLUMN "his_config_info"."gray_name" IS 'gray name'; COMMENT ON COLUMN "his_config_info"."ext_info" IS 'ext_info'; -- ---------------------------- -- Table structure for permissions -- ---------------------------- DROP TABLE IF EXISTS "permissions"; CREATE TABLE "permissions" ( "role" varchar(50) NOT NULL, "resource" varchar(512) NOT NULL, "action" varchar(8) NOT NULL ) ; -- ---------------------------- -- Records of permissions -- ---------------------------- BEGIN; COMMIT; -- ---------------------------- -- Table structure for roles -- ---------------------------- DROP TABLE IF EXISTS "roles"; CREATE TABLE "roles" ( "username" varchar(50) NOT NULL, "role" varchar(50) NOT NULL ) ; -- ---------------------------- -- Records of roles -- ---------------------------- BEGIN; INSERT INTO "roles" VALUES ('nacos', 'ROLE_ADMIN'); COMMIT; -- ---------------------------- -- Table structure for tenant_capacity -- ---------------------------- DROP TABLE IF EXISTS "tenant_capacity"; CREATE TABLE "tenant_capacity" ( "id" bigserial NOT NULL, "tenant_id" varchar(128) NOT NULL, "quota" int4 NOT NULL, "usage" int4 NOT NULL, "max_size" int4 NOT NULL, "max_aggr_count" int4 NOT NULL, "max_aggr_size" int4 NOT NULL, "max_history_count" int4 NOT NULL, "gmt_create" timestamp(6) NOT NULL, "gmt_modified" timestamp(6) NOT NULL ) ; COMMENT ON COLUMN "tenant_capacity"."id" IS '主é®ID'; COMMENT ON COLUMN "tenant_capacity"."tenant_id" IS 'Tenant ID'; COMMENT ON COLUMN "tenant_capacity"."quota" IS 'é é¢ï¼0表示使ç¨é»è®¤å¼'; COMMENT ON COLUMN "tenant_capacity"."usage" IS '使ç¨é'; COMMENT ON COLUMN "tenant_capacity"."max_size" IS 'å个é 置大å°ä¸éï¼åä½ä¸ºåèï¼0表示使ç¨é»è®¤å¼'; COMMENT ON COLUMN "tenant_capacity"."max_aggr_count" IS 'èååé ç½®æå¤§ä¸ªæ°'; COMMENT ON COLUMN "tenant_capacity"."max_aggr_size" IS 'å个èåæ°æ®çåé 置大å°ä¸éï¼åä½ä¸ºåèï¼0表示使ç¨é»è®¤å¼'; COMMENT ON COLUMN "tenant_capacity"."max_history_count" IS 'æå¤§åæ´å岿°é'; COMMENT ON COLUMN "tenant_capacity"."gmt_create" IS 'å建æ¶é´'; COMMENT ON COLUMN "tenant_capacity"."gmt_modified" IS 'ä¿®æ¹æ¶é´'; COMMENT ON TABLE "tenant_capacity" IS 'ç§æ·å®¹éä¿¡æ¯è¡¨'; -- ---------------------------- -- Records of tenant_capacity -- ---------------------------- BEGIN; COMMIT; -- ---------------------------- -- Table structure for tenant_info -- ---------------------------- DROP TABLE IF EXISTS "tenant_info"; CREATE TABLE "tenant_info" ( "id" bigserial NOT NULL, "kp" varchar(128) NOT NULL, "tenant_id" varchar(128) , "tenant_name" varchar(128) , "tenant_desc" varchar(256) , "create_source" varchar(32) , "gmt_create" int8 NOT NULL, "gmt_modified" int8 NOT NULL ) ; COMMENT ON COLUMN "tenant_info"."id" IS 'id'; COMMENT ON COLUMN "tenant_info"."kp" IS 'kp'; COMMENT ON COLUMN "tenant_info"."tenant_id" IS 'tenant_id'; COMMENT ON COLUMN "tenant_info"."tenant_name" IS 'tenant_name'; COMMENT ON COLUMN "tenant_info"."tenant_desc" IS 'tenant_desc'; COMMENT ON COLUMN "tenant_info"."create_source" IS 'create_source'; COMMENT ON COLUMN "tenant_info"."gmt_create" IS 'å建æ¶é´'; COMMENT ON COLUMN "tenant_info"."gmt_modified" IS 'ä¿®æ¹æ¶é´'; COMMENT ON TABLE "tenant_info" IS 'tenant_info'; -- ---------------------------- -- Records of tenant_info -- ---------------------------- BEGIN; COMMIT; -- ---------------------------- -- Table structure for users -- ---------------------------- DROP TABLE IF EXISTS "users"; CREATE TABLE "users" ( "username" varchar(50) NOT NULL, "password" varchar(500) NOT NULL, "enabled" boolean NOT NULL ) ; -- ---------------------------- -- Records of users > nAcos_!9#_admIn -- ---------------------------- BEGIN; -- INSERT INTO "users" VALUES ('nacos', '$2a$10$EuWPZHzz32dJN7jexM34MOeYirDdFAZm2kuWj7VEOJhhZkDrxfvUu', TRUE); INSERT INTO users (username, password, enabled) VALUES ('nacos', '$2a$10$LnJDlPeP.XbQnr6iabc65OI6kM4LLS/BRLwPHEtRSERErvnmlKkBu', TRUE); COMMIT; -- ---------------------------- -- Indexes structure for table config_info -- ---------------------------- CREATE UNIQUE INDEX "uk_configinfo_datagrouptenant" ON "config_info" ("data_id","group_id","tenant_id"); -- ---------------------------- -- Primary Key structure for table config_info -- ---------------------------- ALTER TABLE "config_info" ADD CONSTRAINT "config_info_pkey" PRIMARY KEY ("id"); -- ---------------------------- -- Indexes structure for table config_info_aggr -- ---------------------------- CREATE UNIQUE INDEX "uk_configinfoaggr_datagrouptenantdatum" ON "config_info_aggr" USING btree ("data_id","group_id","tenant_id","datum_id"); -- ---------------------------- -- Primary Key structure for table config_info_aggr -- ---------------------------- ALTER TABLE "config_info_aggr" ADD CONSTRAINT "config_info_aggr_pkey" PRIMARY KEY ("id"); -- ---------------------------- -- Indexes structure for table config_info_beta -- ---------------------------- CREATE UNIQUE INDEX "uk_configinfobeta_datagrouptenant" ON "config_info_beta" USING btree ("data_id","group_id","tenant_id"); -- ---------------------------- -- Primary Key structure for table config_info_beta -- ---------------------------- ALTER TABLE "config_info_beta" ADD CONSTRAINT "config_info_beta_pkey" PRIMARY KEY ("id"); -- ---------------------------- -- Indexes structure for table config_info_tag -- ---------------------------- CREATE UNIQUE INDEX "uk_configinfotag_datagrouptenanttag" ON "config_info_tag" USING btree ("data_id","group_id","tenant_id","tag_id"); -- ---------------------------- -- Primary Key structure for table config_info_tag -- ---------------------------- ALTER TABLE "config_info_tag" ADD CONSTRAINT "config_info_tag_pkey" PRIMARY KEY ("id"); -- ---------------------------- -- Indexes structure for table config_tags_relation -- ---------------------------- CREATE INDEX "idx_tenant_id" ON "config_tags_relation" USING btree ( "tenant_id" ); CREATE UNIQUE INDEX "uk_configtagrelation_configidtag" ON "config_tags_relation" USING btree ( "id", "tag_name", "tag_type" ); -- ---------------------------- -- Primary Key structure for table config_tags_relation -- ---------------------------- ALTER TABLE "config_tags_relation" ADD CONSTRAINT "config_tags_relation_pkey" PRIMARY KEY ("nid"); -- ---------------------------- -- Indexes structure for table group_capacity -- ---------------------------- CREATE UNIQUE INDEX "uk_group_id" ON "group_capacity" USING btree ( "group_id" ); -- ---------------------------- -- Primary Key structure for table group_capacity -- ---------------------------- ALTER TABLE "group_capacity" ADD CONSTRAINT "group_capacity_pkey" PRIMARY KEY ("id"); -- ---------------------------- -- Indexes structure for table his_config_info -- ---------------------------- CREATE INDEX "idx_did" ON "his_config_info" USING btree ( "data_id" ); CREATE INDEX "idx_gmt_create" ON "his_config_info" USING btree ( "gmt_create" ); CREATE INDEX "idx_gmt_modified" ON "his_config_info" USING btree ( "gmt_modified" ); -- ---------------------------- -- Primary Key structure for table his_config_info -- ---------------------------- ALTER TABLE "his_config_info" ADD CONSTRAINT "his_config_info_pkey" PRIMARY KEY ("nid"); -- ---------------------------- -- Indexes structure for table permissions -- ---------------------------- CREATE UNIQUE INDEX "uk_role_permission" ON "permissions" USING btree ( "role", "resource", "action" ); -- ---------------------------- -- Indexes structure for table roles -- ---------------------------- CREATE UNIQUE INDEX "uk_username_role" ON "roles" USING btree ( "username", "role" ); -- ---------------------------- -- Indexes structure for table tenant_capacity -- ---------------------------- CREATE UNIQUE INDEX "uk_tenant_id" ON "tenant_capacity" USING btree ( "tenant_id" ); -- ---------------------------- -- Primary Key structure for table tenant_capacity -- ---------------------------- ALTER TABLE "tenant_capacity" ADD CONSTRAINT "tenant_capacity_pkey" PRIMARY KEY ("id"); -- ---------------------------- -- Indexes structure for table tenant_info -- ---------------------------- CREATE UNIQUE INDEX "uk_tenant_info_kptenantid" ON "tenant_info" USING btree ( "kp", "tenant_id" ); docker/readme.md
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,103 @@ 1. è¿è¡ ```bash docker images | sort -k1 # æåº docker images | grep -E 'kafka|zook' # è¿æ»¤ docker run -itd -p 5432:5432 postgis/postgis psql --version # psql (PostgreSQL) 17.5 (Debian 17.5-1.pgdg110+1) docker tag postgis/postgis:latest postgis/postgis:17.5 docker run -itd --name nginx nginx docker exec -it nginx bash nginx -v # nginx version: nginx/1.29.0 docker pull nginx:1.29 docker pull bitnami/kafka:3.9 docker pull zookeeper:3.9 # æå3.9ç³»åçææ°çæ¬ docker pull redis:7 docker run -itd --name redis redis:7 redis-server --version # Redis server v=7.4.5 docker run -itd --name mysql -e MYSQL_RANDOM_ROOT_PASSWORD=123456 mysql:5.7 docker exec -it mysql bash mysql --version # mysql Ver 14.14 Distrib 5.7.44 docker run -itd --name mysql -e MYSQL_RANDOM_ROOT_PASSWORD=123456 mysql:8 docker exec -it mysql bash mysql --version # mysql Ver 8.4.5 for Linux on x86_64 docker pull mongo:8 docker run -itd --name mongo -p 27017:27017 -e MONGO_INITDB_ROOT_USERNAME=admin -e MONGO_INITDB_ROOT_PASSWORD=123456 mongo:8 docker exec -it mongodb mongosh db.version() # 8.0.11 use admin db.auth("admin", "123456") show docker run -itd --name openjdk openjdk:8-jre docker exec -it openjdk bash java -version # openjdk version "1.8.0_342" # TmFjb3NfUmFuZG9tX1N0cmluZ18zMl9DaGFyc19Pcl9Mb25nZXI= echo -n "Nacos_Random_String_32_Chars_Or_Longer" | base64 docker run -d --name nacos -p 8848:8848 -e MODE=standalone nacos/nacos-server docker run -d -p 8848:8848 -e MODE=standalone -e NACOS_AUTH_ENABLE=true -e NACOS_AUTH_TOKEN=TmFjb3NfUmFuZG9tX1N0cmluZ18zMl9DaGFyc19Pcl9Mb25nZXI= -e NACOS_AUTH_IDENTITY_KEY=Authorization -e NACOS_AUTH_IDENTITY_VALUE=token nacos/nacos-server:2.5.1 ``` 2.éå ``` bash postgis/postgis:17.5 nginx:1.29 nacos/nacos-server:2.5.1 bitnami/kafka:3.9 zookeeper:3.9 redis:7 openjdk:8-jre mysql:5.7 mongo:8 openjdk8-422-gdal:3.5.2 # JVMåæ° # image: openjdk8-422/gdal:3.5.2 - JAVA_OPTS=-Xms512m -Xmx1024m -XX:+UseG1GC healthcheck: test: ["CMD-SHELL", "curl -f http://localhost:8848/nacos || exit 1"] # nacosæä»¶ä¸è½½ https://github.com/wuchubuzai2018/nacos-datasource-extend-plugins https://blog.csdn.net/lilinhai548/article/details/149198957 # Linuxæ¥ç端å£å ç¨ netstat -tulnp | grep 8080 lsof -i :8080 kill -9 26634 # git忢忝 git checkout 3.0.1 # OffsetExplorerè¿æ¥kafkaï¼å¸¦å¯ç ï¼ Security >Type > SASL Plaintext Advanced: > SASL Mechanism > PLAIN JAAS: > Config: org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="kaFka_12#$56"; >> Update >> Connect # åçè§£éå¨ï¼æ²¡æé£ä¸ªæä»¶æç®å½ sed -i 's/\r$//' start.sh # ç»è®¡æä»¶å¤¹å¤§å° du -sh postgis/pgdata ``` docker/redis/redis.conf
¶Ô±ÈÐÂÎļþ @@ -0,0 +1 @@ requirepass Redis_s!E_6.2.6 docker/start.sh
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,11 @@ #!/bin/bash cd /data/jhs rm -rf zookeeper_data/* rm -rf kafka_data/* docker-compose down docker-compose up -d # sed -i 's/\r$//' start.sh exit 0 docker/system/bootstrap.yml
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,31 @@ # Tomcat server: port: 8002 # Spring spring: application: # åºç¨åç§° name: se-system profiles: # ç¯å¢é ç½® active: dev cloud: nacos: username: nacos password: nAcos_!9#_admIn discovery: # æå¡æ³¨åå°å server-addr: nacos:8848 config: group: JHS_GROUP # é ç½®ä¸å¿å°å server-addr: nacos:8848 # é ç½®æä»¶æ ¼å¼ file-extension: yml # å ±äº«é ç½® shared-configs: - data-id: application-${spring.profiles.active}.${spring.cloud.nacos.config.file-extension} group: JHS_GROUP # å è®¸å·æ° refresh-enabled: true se-system/src/main/java/com/terra/system/controller/all/PermsController.java
@@ -2,12 +2,12 @@ import com.terra.system.annotation.SysLog; import com.terra.system.entity.all.*; import com.terra.system.entity.data.LayerEntity; import com.terra.system.entity.sys.LayerEntity; import com.terra.system.entity.sys.MenuEntity; import com.terra.system.entity.sys.ResEntity; import com.terra.system.entity.sys.UserEntity; import com.terra.system.service.all.PermsService; import com.terra.system.service.data.LayerService; import com.terra.system.service.sys.LayerService; import com.terra.system.service.sys.MenuService; import com.terra.system.service.sys.TokenService; import io.swagger.annotations.Api; se-system/src/main/java/com/terra/system/controller/data/PublishController.java
@@ -11,10 +11,10 @@ import com.terra.system.helper.PathHelper; import com.terra.system.helper.StringHelper; import com.terra.system.helper.WebHelper; import com.terra.system.service.data.LayerService; import com.terra.system.service.data.MetaService; import com.terra.system.service.data.PublishService; import com.terra.system.service.data.RasterService; import com.terra.system.service.sys.LayerService; import com.terra.system.service.sys.TokenService; import io.swagger.annotations.Api; import io.swagger.annotations.ApiImplicitParam; se-system/src/main/java/com/terra/system/entity/data/LayerEntity.java
ÎļþÒÑɾ³ý se-system/src/main/java/com/terra/system/mapper/data/LayerMapper.java
ÎļþÒÑɾ³ý se-system/src/main/java/com/terra/system/service/data/LayerService.java
ÎļþÒÑɾ³ý se-system/src/main/java/com/terra/system/service/sys/LayerService.java
@@ -122,4 +122,8 @@ public Integer updates(List<LayerEntity> list) { return layerMapper.updates(list); } public void clearCache() { redisService.clearKeys(RedisCacheKey.permsLayerKey("")); } } se-system/src/main/resources/application-prod.yml
¶Ô±ÈÐÂÎļþ @@ -0,0 +1,135 @@ server: tomcat: uri-encoding: UTF-8 max-connections: 5000 max-http-form-post-size: 100MB threads: max: 2000 servlet: context-path: / spring: application: name: se-system mvc: static-path-pattern: /static/** # redis redis: database: 0 host: 192.168.11.203 port: 6379 password: rediS_5L#F4_Server # è¿æ¥è¶ æ¶æ¶é¿ï¼æ¯«ç§ï¼ timeout: 10000 lettuce: pool: # è¿æ¥æ± æå¤§è¿æ¥æ°ï¼ä½¿ç¨è´å¼è¡¨ç¤ºæ²¡æéå¶ï¼ max-active: 1000 # è¿æ¥æ± æå¤§é»å¡çå¾ æ¶é´ï¼ä½¿ç¨è´å¼è¡¨ç¤ºæ²¡æéå¶ï¼ max-wait: -1 # è¿æ¥æ± ä¸çæå¤§ç©ºé²è¿æ¥ max-idle: 10 # è¿æ¥æ± ä¸çæå°ç©ºé²è¿æ¥ min-idle: 5 # session session: # 20åé timeout: PT20M # 设置ä¸ä¼ æä»¶å¤§å° servlet: multipart: enabled: true max-file-size: 204800MB max-request-size: 1048576MB # jackson jackson: time-zone: GMT+8 date-format: yyyy-MM-dd HH:mm:ss locale: zh_CN thymeleaf: cache: false # datasource datasource: type: com.alibaba.druid.pool.DruidDataSource url: jdbc:postgresql://192.168.11.203:5432/jhs?useAffectedRows=true username: postgres password: Postgres!_17_jHs driver-class-name: org.postgresql.Driver platform: POSTGRESQL filters: stat,wall,log4j connectionProperties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000 logAbandoned: true # é ç½®è¿æ¥æ± ä¿¡æ¯ druid: initial-size: 5 min-idle: 5 max-active: 100 max-wait: 60000 time-between-eviction-runs-millis: 60000 min-evictable-idle-time-millis: 300000 max-pool-prepared-statement-per-connection-size: 50 pool-prepared-statements: true validation-query: SELECT 1 test-while-idle: true test-on-borrow: false test-on-return: false filters: stat,wall web-stat-filter: enabled: true stat-view-servlet: enabled: true url-pattern: /druid/* reset-enable: false login-username: admin login-password: ad_!Druid!_min mybatis-plus: type-aliases-package: com.terra.system.entity config-location: classpath:mybatis.xml mapper-locations: classpath:mapper/**/*.xml # logging logging: config: classpath:logback.xml # RestTemplate config remote: maxTotalConnect: 0 maxConnectPerRoute: 1000 connectTimeout: 30000 readTimeout: -1 # ç³»ç»é ç½® sys: # å¯ç¨swagger swaggerEnable: true # èªå¨æ¥è¯¢ autoQuery: 0 # 管çåID admin: ad_Lf1122_min # IISç主æºå°å iisHost: 127.0.0.1 # FMEæå¡å°å fmeUrl: http://192.168.11.205:88/ # FME令ç fmeToken: c36e4f94-dfde-401e-9967-2c4a449f1300 # åºå¾æå¡ exportServer: http://127.0.0.1/ExportMap # Turfæå¡ turfServer: http://127.0.0.1/Turf # Gdal驱å¨ç®å½ gdal_path: E:\terrait\TianJin\Zip\release-1928-x64-dev\release-1928-x64\bin # ç¦çå°å tile_path: E:\data\2d\tiles path: # ä¸è½½ç®å½ download: D:\JHS\data\download # ä¸ä¼ ç®å½ upload: D:\JHS\data\upload # 临æ¶ç®å½ temp: D:\JHS\data\temp cad: exePath: C:/360/MxDrawCloudServer1.0TryVersion/MxDrawCloudServer/Bin/MxCAD/Release/mxcadassembly.exe targetPath: C:/360/MxDrawCloudServer1.0TryVersion/MxDrawCloudServer/SRC/TsWeb/public/data # ä¸ä¼ é件表 attachTabs: bd.b_pac_hydrogeology,bd.b_pac_frozensoil sm.txt
@@ -11,11 +11,38 @@ http://localhost:8001/system/dir/selectDir?id=1 http://localhost:8002/dir/selectDir?id=1 http://192.168.11.203:8848/nacos http://192.168.11.203:8081/api/system/swagger-ui.html ------------------------------------------------------------------- PIE-Engine Serverï¼Pixel Information Expert 饿/GISæ°æ®æ±éã管çãæ²»çãåå¸ä¸å ±äº«çè½åï¼æ°æ®æ¥å ¥ãç¼ç®ãåå¨ãå ±äº«ãåå¸ä¸åºç¨ã piesat.cnï¼https://engine.piesat.cn/server/#/ https://support.huaweicloud.com/pie-engine-mapslt/pie-engine_04.html https://piesat.cn/website/cn/pages/product/dl-center/software-download.htmlï¼ä¸è½½ä¸å¿ ------------------------------------------------------------------- å°å¾å¼åãWebSDKãRestSDK 3.8.4.3.RestSDKå¼åï¼æ¥å£è°ç¨åç±»åçæ¶ç©ºæ°æ®æå¡ï¼è·åè¿åjsonæxmlæ ¼å¼çæ°æ®ã æ°æ®æ¥å ¥æå¡ï¼ä¸»è¦æä¾ä¸åç±»åæ°æ®çæ¥å ¥åçæ§è½åï¼å ±è®¡20个æ¥å£ã æ°æ®ç®å½æå¡ï¼ä¸»è¦æä¾æ°æ®ç®å½åæ°æ®éçæå¡æ¥å£ï¼å ¶ä¸æ°æ®ç®å½ç¸å ³æ¥å£æ12ä¸ªï¼æ°æ®éç¸å ³æ¥å£æ7ä¸ªã æ°æ®å¯¹è±¡æå¡ï¼ä¸»è¦å æ¬å 模åæå¡6个æ¥å£ï¼æ°æ®ç±»åæå¡8个æ¥å£ï¼æ°æ®å¯¹è±¡ç®¡çæå¡14个æ¥å£ï¼å æ°æ®ä¿¡æ¯ç®¡çæå¡6个æ¥å£ã æ°æ®æ¥è¯¢æå¡ï¼åå«Opensearchæå¡ãå½å±äºæ°æ®ç®å½æå¡ï¼ä¸»è¦æä¾æç±»åãæç®å½ãæå ³é®åæ¥è¯¢çæå¡æ¥å£ï¼å ±è®¡6个æ¥å£ã ç¢éæ°æ®æå¡ï¼ä¸»è¦é对ç¢éç±»åæ°æ®æä¾æ°æ®æ£ç´¢ãæ´æ°ãå é¤ãæ°å¢çè½åï¼å ±è®¡8个æ¥å£ã åºç¡å°çæå¡ï¼ä¸»è¦å æ¬è¡æ¿åºåæ¥è¯¢æå¡ãæå½±åæ æ¥è¯¢æå¡ã åå¨è®¾å¤ç®¡çæå¡ï¼è¯¥æå¡ç®¡çç³»ç»é»è®¤å ç½®åç¨æ·èªå®ä¹åå¨è®¾å¤ï¼ç¨ä»¥åææç§æ·/ç¨æ·æä¾åç§ç±»åæ°æ®å¯¹è±¡çå卿å¡ï¼å¹¶æä¾éç¨åå¨è®¾å¤æä½åè½ï¼å ±è®¡6个æ¥å£ã æä»¶ä»£çæå¡ï¼ä¸»è¦æ¯æä»£çå ±äº«åå¨ç³»ç»ï¼nfsãæ¬å°æä»¶ï¼å对象åå¨ï¼s3ãobsãminioï¼ï¼åæ¶æä¾å°æä»¶ä¸ä¼ ã大æä»¶åçä¸ä¼ åä¸è½½åè½ï¼æ¯ææç¹ç»ä¼ ï¼å ±è®¡18个æ¥å£ã æ°æ®æå æå¡ï¼ä¸»è¦æä¾æ°æ®æå ä»»å¡å建ãçæ§ãå é¤ï¼ä»¥åå缩å ä¸è½½æå¡ï¼å ±è®¡4个æ¥å£ã ------------------------------------------------------------------- -------------------------------------------------------------------