server:
  port: 2067

spring:
  cloud:
    nacos:
      discovery:
        server-addr: 127.0.0.1:8848
  application:
    name: 2067-yuexiu-task-application
  redis:
    host: 139.159.228.191
    port: 6377
    password: j4kd4ng3s8f3
    database: 0
    jedis:
      pool:
        max-active: 1000
        max-idle: 100
        min-idle: 8
        max-wait: 3000ms
    timeout: 10000ms
#  redis:
#    host: 192.168.88.200
#    port: 5001
#    password:
  jpa:
    hibernate:
      use-new-id-generator-mappings: false
swagger:
  enable: true

# 可以配置api-url ribbon,对以上的服务进行负载均衡
ribbon:
  ReadTimeout: 100000
  ConnectTimeout: 100000
  okhttp:
    enabled: true

#feign
feign:
  sentinel:
    enabled: true



  kafka:
    bootstrap-servers: 139.159.224.135:9092 #(kafka生产集群ip+port端口)
    listener:
      missing-topics-fatal: false
    producer:
      acks: all
      buffer-memory: 40960
      retries: 0
      batch-size: 4096
      properties:
        linger.ms: 1
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
    consumer:
      enable-auto-commit: true #(是否自动提交)
      auto-commit-interval: 100ms
      # 当kafka中没有初始offset或offset超出范围时将自动重置offset
      # earliest:重置为分区中最小的offset;
      # latest:重置为分区中最新的offset(消费分区中新产生的数据);
      # none:只要有一个分区不存在已提交的offset,就抛出异常;
      auto-offset-reset: latest #(实时生产,实时消费,不会从头开始消费)
      group-id: defaultConsumerGroup #(消费组 无消费)
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer

  cloud:
    # 增加动态修改熔断规则支持
    sentinel:
      eager: true
      datasource:
        #       熔断配置
        ds1:
          apollo:
            namespaceName: application
            flowRulesKey: degrade-rules
            dataType: json
            ruleType: DEGRADE