Analysis of kafka partition failure

Keywords: Operation & Maintenance JSON encoding github

No matter which partition mode is adopted before, it cannot be partitioned

The producer partition's divider partition selection determines which partition to send messages to when multiple partitions exist

sarama has multiple dividers:

sarama.NewManualPartitioner() //Returns a manually selected partition divider, that is, to get the 'partition' specified in the msg`
sarama.NewRandomPartitioner() //Obtain a partition number by random function 
sarama.NewRoundRobinPartitioner() //Circular selection, i.e. circular selection of one (XCMG) in all zones 
sarama.NewHashPartitioner() //The hash value is generated by the key in the msg, and the partition is selected, 

I suggest the third one, which is just evenly divided,

At present, I have changed it to 5 default partitions, which can be evenly distributed,

Reference https://blog.csdn.net/qq_32292967/article/details/78675116

Cause screenshots

  1. The number of partitions is not set (1 by default). If the number of single partitions is set to 5, the data under the same topic will be divided into 5 regions for saving.
  2. When the producer push es the data, remember to set the partition policy.

Test code

/*
* @Author: Rui XuLe
* @Date:   2019-07-12 22:40:03
* @Last Modified by:   Rui XuLe
* @Last Modified time: 2019-07-13 22:05:08
 */
package main

import (
	"encoding/json"
	"fmt"
	"log"
	// "math/rand"
	"os"
	"time"

	"github.com/Shopify/sarama"
)

var Address = []string{"100.200.101.75:9092"}

func main() {
	p := syncProducerInit(Address)
	sendMeg(p)
}

type MegInfo struct {
	Name    string    `json:"name"`
	Meg     string    `json:"meg"`
	TimeMeg time.Time `json:"timeTime"`
}

//Synchronous message mode
func syncProducerInit(address []string) (p sarama.SyncProducer) {
	config := sarama.NewConfig()
	config.Producer.Return.Successes = true
	config.Producer.Timeout = 5 * time.Second
	config.Producer.Partitioner = sarama.NewRoundRobinPartitioner
	//sarama.NewHashPartitioner()
	p, err := sarama.NewSyncProducer(address, config)
	if err != nil {
		log.Printf("sarama.NewSyncProducer err, message=%s \n", err)
		return nil
	}
	return
}

//Mainly the sending of messages
func sendMeg(p sarama.SyncProducer) {
	defer p.Close()
	topic := "test_go_kafka_producer"
	m := MegInfo{
		Name:    "ruixule",
		Meg:     "Today is 20190720, I'm testing gokafka",
		TimeMeg: time.Now(),
	}

	data, _ := json.Marshal(m)
	// srcValue := string(data) //"sync: this is ; message. index=%d"
	for i := 0; i < 100; i++ {
		value := string(data) // fmt.Sprintf(srcValue)
		msg := &sarama.ProducerMessage{
			//Partition: int32(i),
			//Key:       sarama.StringEncoder(fmt.Sprintf("%d", rand.Intn(10))),
			Timestamp: time.Now(),
			Topic:     topic,
			Value:     sarama.ByteEncoder(value),
		}
		part, offset, err := p.SendMessage(msg)
		if err != nil {
			log.Printf("send message(%s) err=%s \n", value, err)
		} else {
			fmt.Fprintf(os.Stdout, value+"Sent successfully, partition=%d, offset=%d \n", part, offset)
		}
	}
}

``

Posted by 1veedo on Mon, 06 Apr 2020 07:17:02 -0700