Merge branch 'main' of http://37.143.12.169/test3k/authDB
commit
338a76a698
7 changed files with 277 additions and 63 deletions
@ -0,0 +1,26 @@ |
||||
version: '2' |
||||
services: |
||||
zookeeper: |
||||
image: confluentinc/cp-zookeeper:7.3.0 |
||||
container_name: zookeeper |
||||
environment: |
||||
ZOOKEEPER_CLIENT_PORT: 2181 |
||||
ZOOKEEPER_TICK_TIME: 2000 |
||||
|
||||
broker: |
||||
image: confluentinc/cp-kafka:7.3.0 |
||||
container_name: broker |
||||
ports: |
||||
# To learn about configuring Kafka for access across networks see |
||||
# https://www.confluent.io/blog/kafka-client-cannot-connect-to-broker-on-aws-on-docker-etc/ |
||||
- "9092:9092" |
||||
depends_on: |
||||
- zookeeper |
||||
environment: |
||||
KAFKA_BROKER_ID: 1 |
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' |
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_INTERNAL:PLAINTEXT |
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9092,PLAINTEXT_INTERNAL://broker:29092 |
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 |
||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 |
||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 |
@ -0,0 +1,34 @@ |
||||
package kafka |
||||
|
||||
import ( |
||||
"github.com/segmentio/kafka-go" |
||||
) |
||||
|
||||
type KafkaReader struct { |
||||
reader *kafka.Reader |
||||
first string |
||||
topic string |
||||
} |
||||
|
||||
func NewReader(topic string, address ...string) *KafkaReader { |
||||
return &KafkaReader{ |
||||
reader: kafka.NewReader(kafka.ReaderConfig{ |
||||
Topic: topic, |
||||
Brokers: address, |
||||
GroupID: "consumer-group-id", // TODO
|
||||
Partition: 0, // TODO
|
||||
MinBytes: 10e3, // 10KB
|
||||
MaxBytes: 10e6, // 10MB
|
||||
}), |
||||
first: address[0], |
||||
topic: topic, |
||||
} |
||||
} |
||||
|
||||
func (s *KafkaReader) Close() error { |
||||
return s.reader.Close() |
||||
} |
||||
|
||||
func (s *KafkaReader) ReadMessage(key string, value string) error { |
||||
return nil |
||||
} |
@ -0,0 +1,122 @@ |
||||
package kafka |
||||
|
||||
import ( |
||||
"context" |
||||
"log" |
||||
"net" |
||||
"strconv" |
||||
|
||||
"github.com/segmentio/kafka-go" |
||||
) |
||||
|
||||
type KafkaWriter struct { |
||||
writer *kafka.Writer |
||||
first string |
||||
topic string |
||||
} |
||||
|
||||
func NewWriter(topic string, address ...string) *KafkaWriter { |
||||
s := &KafkaWriter{ |
||||
writer: &kafka.Writer{ |
||||
Topic: topic, |
||||
Balancer: &kafka.LeastBytes{}, |
||||
Addr: kafka.TCP(address...), |
||||
}, |
||||
first: address[0], |
||||
topic: topic, |
||||
} |
||||
|
||||
// Приверим и при необходимости создадим топик
|
||||
era := s.checkTopic() |
||||
if era != nil { |
||||
log.Fatal(era) |
||||
} |
||||
|
||||
return s |
||||
} |
||||
|
||||
func (s *KafkaWriter) Close() error { |
||||
return s.writer.Close() |
||||
} |
||||
|
||||
func (s *KafkaWriter) fetchTopics() (map[string]bool, error) { |
||||
conn, err := kafka.Dial("tcp", s.first) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer conn.Close() |
||||
|
||||
//
|
||||
partitions, erp := conn.ReadPartitions() |
||||
if erp != nil { |
||||
return nil, erp |
||||
} |
||||
|
||||
//
|
||||
topics := make(map[string]bool) |
||||
for _, p := range partitions { |
||||
topics[p.Topic] = true |
||||
} |
||||
|
||||
return topics, nil |
||||
} |
||||
|
||||
func (s *KafkaWriter) createTopic() error { |
||||
conn, err := kafka.Dial("tcp", s.first) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer conn.Close() |
||||
|
||||
//
|
||||
controller, era := conn.Controller() |
||||
if era != nil { |
||||
return era |
||||
} |
||||
|
||||
//
|
||||
controllerConn, eru := kafka.Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port))) |
||||
if eru != nil { |
||||
return eru |
||||
} |
||||
defer controllerConn.Close() |
||||
|
||||
//
|
||||
topicConfigs := []kafka.TopicConfig{ |
||||
{ |
||||
Topic: s.topic, |
||||
NumPartitions: 1, |
||||
ReplicationFactor: 1, |
||||
}, |
||||
} |
||||
|
||||
return controllerConn.CreateTopics(topicConfigs...) |
||||
} |
||||
|
||||
func (s *KafkaWriter) checkTopic() error { |
||||
topics, err := s.fetchTopics() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
// Если топика нет, то создадим
|
||||
if _, ok := topics[s.topic]; !ok { |
||||
era := s.createTopic() |
||||
if era != nil { |
||||
return era |
||||
} |
||||
|
||||
log.Printf("create topic %q\n", s.topic) |
||||
|
||||
return era |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (s *KafkaWriter) WriteMessage(key string, value string) error { |
||||
return s.writer.WriteMessages(context.Background(), kafka.Message{ |
||||
Key: []byte(key), |
||||
Value: []byte(value), |
||||
}) |
||||
} |
Loading…
Reference in new issue