QPID通讯代码04

  • TextMessage
  • ListMessage
  • MapMesage
  • StreamMessage

1、MqConsumerStream.java

package com.neohope.qpid.test;

import org.apache.qpid.client.AMQAnyDestination;
import org.apache.qpid.client.AMQConnection;

import javax.jms.*;

public class MqConsumerStream {
    public static void main(String[] args) throws Exception {
        Connection connection =
                new AMQConnection("amqp://guest:guest@test/?brokerlist='tcp://localhost:5672'");

        connection.start();

        Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
        Destination queue = new AMQAnyDestination("ADDR:message_queue; {create: always}");
        MessageConsumer consumer = session.createConsumer(queue);

        System.out.println("Receiving as StreamMessage");
        StreamMessage m = (StreamMessage) consumer.receive();
        System.out.println(m);
        System.out.println("==========================================");
        System.out.println("Printing stream contents:");
        try {
            while (true)
                System.out.println(m.readObject());
        } catch (MessageEOFException e) {
            // DONE
        }
    }
}

Continue reading QPID通讯代码04

QPID通讯代码03

  • TextMessage
  • ListMessage
  • MapMesage
  • StreamMessage

1、MqConsumerMap.java

package com.neohope.qpid.test;

import org.apache.qpid.client.AMQAnyDestination;
import org.apache.qpid.client.AMQConnection;

import javax.jms.*;
import java.util.Enumeration;

public class MqConsumerMap {
    public static void main(String[] args) throws Exception {
        Connection connection =
                new AMQConnection("amqp://guest:guest@test/?brokerlist='tcp://localhost:5672'");

        connection.start();

        Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
        Destination queue = new AMQAnyDestination("ADDR:message_queue; {create: always}");
        MessageConsumer consumer = session.createConsumer(queue);

        System.out.println("Receiving as MapMessage");
        MapMessage m = (MapMessage) consumer.receive();
        System.out.println(m);
        System.out.println("==========================================");
        System.out.println("Printing map contents:");
        Enumeration keys = m.getMapNames();
        while (keys.hasMoreElements()) {
            String key = (String) keys.nextElement();
            System.out.println(key + " => " + m.getObject(key));
        }
    }
}

Continue reading QPID通讯代码03

QPID通讯代码02

  • TextMessage
  • ListMessage
  • MapMesage
  • StreamMessage

1、MqConsumerList.java

package com.neohope.qpid.test;

import org.apache.qpid.client.AMQAnyDestination;
import org.apache.qpid.client.AMQConnection;
import org.apache.qpid.jms.ListMessage;

import javax.jms.*;
import java.util.Enumeration;
import java.util.Iterator;

public class MqConsumerList {
    public static void main(String[] args) throws Exception {
        Connection connection =
                new AMQConnection("amqp://guest:guest@test/?brokerlist='tcp://localhost:5672'");

        connection.start();

        Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
        Destination queue = new AMQAnyDestination("ADDR:message_queue; {create: always}");
        MessageConsumer consumer = session.createConsumer(queue);

        System.out.println("Receiving as ListMessage");
        ListMessage m = (ListMessage) consumer.receive();
        System.out.println(m);
        System.out.println("==========================================");
        System.out.println("Printing list contents:");
        Iterator i = m.iterator();
        while (i.hasNext())
            System.out.println(i.next());

        connection.close();
    }
}

Continue reading QPID通讯代码02

QPID通讯代码01

  • TextMessage
  • ListMessage
  • MapMesage
  • StreamMessage

1、MqConsumerText.java

package com.neohope.qpid.test;

import javax.jms.*;
import javax.naming.Context;
import javax.naming.InitialContext;
import java.util.Properties;

public class MqConsumerText {

    public static void main(String[] args)
    {
        try
        {
            Properties properties = new Properties();
            properties.setProperty("java.naming.factory.initial", "org.apache.qpid.jndi.PropertiesFileInitialContextFactory");
            properties.setProperty("connectionfactory.qpidConnectionfactory","amqp://guest:guest@clientid/?brokerlist='tcp://localhost:5672'");
            properties.setProperty("destination.topicExchange","amq.topic");

            Context context = new InitialContext(properties);

            ConnectionFactory connectionFactory = (ConnectionFactory) context.lookup("qpidConnectionfactory");
            Connection connection = connectionFactory.createConnection();
            connection.start();

            Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
            Destination destination = (Destination) context.lookup("topicExchange");

            MessageConsumer messageConsumer = session.createConsumer(destination);
            TextMessage message = (TextMessage)messageConsumer.receive();
            System.out.println(message.getText());

            connection.close();
            context.close();
        }
        catch (Exception ex)
        {
            ex.printStackTrace();
        }
    }
}

Continue reading QPID通讯代码01

Kafka通讯代码03

  • Producer
  • Consumer
  • GroupConsumer

1、MqConsumerGroup.java

package com.neohope.kafka.test;

import kafka.consumer.ConsumerConfig;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;

public class MqConsumerGroup {
    private final ConsumerConnector consumer;
    private final String topic;
    private  ExecutorService executor;
    private ConsumerThread[] m_Threads;

    public MqConsumerGroup(String a_zookeeper, String a_groupId, String a_topic) {
        consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
                createConsumerConfig(a_zookeeper, a_groupId));
        this.topic = a_topic;
    }

    public void shutdown() {
        if (consumer != null) consumer.shutdown();
        if (executor != null) executor.shutdown();

        try {
            if (!executor.awaitTermination(5000, TimeUnit.MILLISECONDS)) {
                System.out.println("Timed out waiting for consumer threads to shut down, exiting uncleanly");
            }
        } catch (InterruptedException e) {
            System.out.println("Interrupted during shutdown, exiting uncleanly");
        }
    }

    public void run(int a_numThreads) throws InterruptedException {
        Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
        topicCountMap.put(topic, new Integer(a_numThreads));
        Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
        List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);

        // now launch all the threads
        //
        executor = Executors.newFixedThreadPool(a_numThreads);

        // now create an object to consume the messages
        //
        m_Threads = new ConsumerThread[a_numThreads];
        int threadNumber = 0;
        for (final KafkaStream stream : streams) {
            m_Threads[threadNumber] = new ConsumerThread(stream, threadNumber);
            executor.submit(m_Threads[threadNumber]);
            threadNumber++;
        }
    }

    public void WaitForEnd(int a_numThreads) throws InterruptedException {
        boolean bEnd =false;

        while(!bEnd) {
            Thread.sleep(200);
            for (int threadNumber = 0; threadNumber < a_numThreads; threadNumber++) {
                if (m_Threads[threadNumber].m_end)
                {
                    bEnd = true;
                }
            }
        }
    }

    private static ConsumerConfig createConsumerConfig(String a_zookeeper, String a_groupId) {
        Properties props = new Properties();
        props.put("zookeeper.connect", a_zookeeper);
        props.put("group.id", a_groupId);
        props.put("zookeeper.session.timeout.ms", "400");
        props.put("zookeeper.sync.time.ms", "200");
        props.put("auto.commit.interval.ms", "1000");

        return new ConsumerConfig(props);
    }

    public static void main(String[] args) throws InterruptedException {
        String zooKeeper = "localhost:2181";
        String groupId = "group1";
        String topic = "neoTopic";
        int threads = 2;

        MqConsumerGroup mqcGroup = new MqConsumerGroup(zooKeeper, groupId, topic);
        mqcGroup.run(threads);
        mqcGroup.WaitForEnd(threads);
        mqcGroup.shutdown();
    }
}

Continue reading Kafka通讯代码03

Kafka通讯代码02

  • Producer
  • Consumer
  • GroupConsumer

1、MqConsumer.java

package com.neohope.kafka.test;

import kafka.api.FetchRequest;
import kafka.api.FetchRequestBuilder;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.common.ErrorMapping;
import kafka.common.TopicAndPartition;
import kafka.javaapi.*;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.message.MessageAndOffset;

import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

public class MqConsumer {
    private List<String> m_replicaBrokers = new ArrayList<String>();

    public MqConsumer() {
        m_replicaBrokers = new ArrayList<String>();
    }

    public void run(String a_topic, int a_partition, List<String> a_seedBrokers, int a_port) throws Exception {
        // find the meta data about the topic and partition we are interested in
        //
        PartitionMetadata metadata = findLeader(a_seedBrokers, a_port, a_topic, a_partition);
        if (metadata == null) {
            System.out.println("Can't find metadata for Topic and Partition. Exiting");
            return;
        }
        if (metadata.leader() == null) {
            System.out.println("Can't find Leader for Topic and Partition. Exiting");
            return;
        }
        String leadBroker = metadata.leader().host();
        String clientName = "Client_" + a_topic + "_" + a_partition;

        SimpleConsumer consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
        long readOffset = getLastOffset(consumer,a_topic, a_partition, kafka.api.OffsetRequest.EarliestTime(), clientName);

        boolean bEnd =false;
        while (!bEnd) {
            if (consumer == null) {
                consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
            }
            FetchRequest req = new FetchRequestBuilder()
                    .clientId(clientName)
                    .addFetch(a_topic, a_partition, readOffset, 100000) // Note: this fetchSize of 100000 might need to be increased if large batches are written to Kafka
                    .build();
            FetchResponse fetchResponse = consumer.fetch(req);

            if (fetchResponse.hasError()) {
                // Something went wrong!
                short code = fetchResponse.errorCode(a_topic, a_partition);
                System.out.println("Error fetching data from the Broker:" + leadBroker + " Reason: " + code);
                if (code == ErrorMapping.OffsetOutOfRangeCode())  {
                    // We asked for an invalid offset. For simple case ask for the last element to reset
                    readOffset = getLastOffset(consumer,a_topic, a_partition, kafka.api.OffsetRequest.LatestTime(), clientName);
                    continue;
                }
                consumer.close();
                consumer = null;
                leadBroker = findNewLeader(leadBroker, a_topic, a_partition, a_port);
                continue;
            }

            for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(a_topic, a_partition)) {
                long currentOffset = messageAndOffset.offset();

                if (currentOffset < readOffset) {
                    System.out.println("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
                    continue;
                }
                readOffset = messageAndOffset.nextOffset();
                ByteBuffer key = messageAndOffset.message().key();
                byte[] bytesKey = new byte[key.limit()];
                key.get(bytesKey);
                String szKey = new String(bytesKey, "UTF-8");
                if(szKey.equals("-=END=-"))bEnd =true;

                ByteBuffer payload = messageAndOffset.message().payload();
                byte[] bytesPayload = new byte[payload.limit()];
                payload.get(bytesPayload);
                String szPaylaod = new String(bytesPayload, "UTF-8");

                String offset = String.valueOf(messageAndOffset.offset());

                System.out.println("offset=" + offset+ " key=" + szKey +" value="+szPaylaod);
            }

            Thread.sleep(1000);
        }
        if (consumer != null) consumer.close();
    }

    public static long getLastOffset(SimpleConsumer consumer, String topic, int partition,
                                     long whichTime, String clientName) {
        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
        kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(
                requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
        OffsetResponse response = consumer.getOffsetsBefore(request);

        if (response.hasError()) {
            System.out.println("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition) );
            return 0;
        }
        long[] offsets = response.offsets(topic, partition);
        return offsets[0];
    }

    private String findNewLeader(String a_oldLeader, String a_topic, int a_partition, int a_port) throws Exception {
        for (int i = 0; i < 3; i++) {
            boolean goToSleep = false;
            PartitionMetadata metadata = findLeader(m_replicaBrokers, a_port, a_topic, a_partition);
            if (metadata == null) {
                goToSleep = true;
            } else if (metadata.leader() == null) {
                goToSleep = true;
            } else if (a_oldLeader.equalsIgnoreCase(metadata.leader().host()) && i == 0) {
                // first time through if the leader hasn't changed give ZooKeeper a second to recover
                // second time, assume the broker did recover before failover, or it was a non-Broker issue
                //
                goToSleep = true;
            } else {
                return metadata.leader().host();
            }
            if (goToSleep) {
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException ie) {
                }
            }
        }
        System.out.println("Unable to find new leader after Broker failure. Exiting");
        throw new Exception("Unable to find new leader after Broker failure. Exiting");
    }

    private PartitionMetadata findLeader(List<String> a_seedBrokers, int a_port, String a_topic, int a_partition) {
        PartitionMetadata returnMetaData = null;
        loop:
        for (String seed : a_seedBrokers) {
            SimpleConsumer consumer = null;
            try {
                consumer = new SimpleConsumer(seed, a_port, 100000, 64 * 1024, "leaderLookup");
                List<String> topics = Collections.singletonList(a_topic);
                TopicMetadataRequest req = new TopicMetadataRequest(topics);
                kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);

                List<TopicMetadata> metaData = resp.topicsMetadata();
                for (TopicMetadata item : metaData) {
                    for (PartitionMetadata part : item.partitionsMetadata()) {
                        if (part.partitionId() == a_partition) {
                            returnMetaData = part;
                            break loop;
                        }
                    }
                }
            } catch (Exception e) {
                System.out.println("Error communicating with Broker [" + seed + "] to find Leader for [" + a_topic
                        + ", " + a_partition + "] Reason: " + e);
            } finally {
                if (consumer != null) consumer.close();
            }
        }
        if (returnMetaData != null) {
            m_replicaBrokers.clear();
            for (kafka.cluster.BrokerEndPoint replica : returnMetaData.replicas()) {
                m_replicaBrokers.add(replica.host());
            }
        }
        return returnMetaData;
    }

    public static void main(String args[]) {
        MqConsumer mqc = new MqConsumer();

        List<String> seeds = new ArrayList<String>();
        seeds.add("localhost");

        String topic = "neoTopic";
        int partition = 0;
        int port = 9092;

        try {
            mqc.run(topic, partition, seeds, port);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

Continue reading Kafka通讯代码02

Kafka通讯代码01

  • Producer
  • Consumer
  • GroupConsumer

1、MqProducer.java

package com.neohope.kafka.test;

import java.util.*;

import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;

public class MqProducer {
    public static void main(String[] args) {
        Properties props = new Properties();
        props.put("metadata.broker.list", "localhost:9092");
        props.put("serializer.class", "kafka.serializer.StringEncoder");
        props.put("partitioner.class", "com.neohope.kafka.test.SimplePartitioner");
        props.put("request.required.acks", "1");

        ProducerConfig config = new ProducerConfig(props);
        Producer<String, String> producer = new Producer<String, String>(config);
        for(int i=0;i<100;i++) {
            KeyedMessage<String, String> data = new KeyedMessage<String, String>("neoTopic", "key"+i, "value"+i);
            producer.send(data);
        }
        KeyedMessage<String, String> data = new KeyedMessage<String, String>("neoTopic", "-=END=-", "-=END=-");
        producer.send(data);

        producer.close();
    }
}

Continue reading Kafka通讯代码01

ActiveMQ通讯代码04

  • JMS方式调用
  • Queue方式调用
  • Topic方式调用
  • ReqRsp方式调用

1、TestMsg.java

package com.neohope.ActiveMQ.test.beans;

public class TestMsg implements java.io.Serializable{
    private static final long serialVersionUID = 12345678;

    public TestMsg(int taskId, String taskInfo, int taskLevel) {
        this.taskId = taskId;
        this.taskInfo = taskInfo;
        this.taskLevel = taskLevel;
    }

    public int taskId;
    public String taskInfo;
    public int taskLevel;
}

Continue reading ActiveMQ通讯代码04

ActiveMQ通讯代码03

  • JMS方式调用
  • Queue方式调用
  • Topic方式调用
  • ReqRsp方式调用

1、TestMsg.java

package com.neohope.ActiveMQ.test.beans;

public class TestMsg implements java.io.Serializable{
    private static final long serialVersionUID = 12345678;

    public TestMsg(int taskId, String taskInfo, int taskLevel) {
        this.taskId = taskId;
        this.taskInfo = taskInfo;
        this.taskLevel = taskLevel;
    }

    public int taskId;
    public String taskInfo;
    public int taskLevel;
}

Continue reading ActiveMQ通讯代码03

ActiveMQ通讯代码02

  • JMS方式调用
  • Queue方式调用
  • Topic方式调用
  • ReqRsp方式调用

1、TestMsg.java

package com.neohope.ActiveMQ.test.beans;

public class TestMsg implements java.io.Serializable{
    private static final long serialVersionUID = 12345678;

    public TestMsg(int taskId, String taskInfo, int taskLevel) {
        this.taskId = taskId;
        this.taskInfo = taskInfo;
        this.taskLevel = taskLevel;
    }

    public int taskId;
    public String taskInfo;
    public int taskLevel;
}

Continue reading ActiveMQ通讯代码02