storm 訊息確認機制及可靠性
worker程序死掉
在一個節點 kill work程序 比如 kill 2509 對work沒有影響 因為會在其他節點重新啟動程序執行topology任務
supervisor程序死掉
supervisor程序kill掉 對work程序沒有影響 因為他們是互相獨立的!!
nimbus程序死掉(存在HA的問題)
nimbus如果死掉 整個任務掛掉 存在單點故障問題!(hadoop2有ha!!!!!! storm沒有ha高可用)
節點宕機(和supervisor是一樣的)
ack/fail訊息確認機制
spout傳送過來的資料 blot要確認資料是否收到及反饋給spout 下面給一個例子:
import java.util.Map;
import backtype.storm.Config;
import backtype.storm.StormSubmitter;
import backtype.storm.generated.AlreadyAliveException;
import backtype.storm.generated.InvalidTopologyException;
import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.TopologyBuilder;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.topology.base.BaseRichSpout;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;
import backtype.storm.tuple.Values;
import backtype.storm.utils.Utils;
public class ClusterStormTopologyAck {
public static class DataSourceSpout extends BaseRichSpout{
private Map conf;
private TopologyContext context;
private SpoutOutputCollector collector;
/**
* 在本例項執行的時候被呼叫一次
*/
public void open(Map conf, TopologyContext context,
SpoutOutputCollector collector) {
this.conf = conf;
this.context = context;
this.collector = collector;
}
/**
* 死迴圈呼叫 心跳
*/
int i=0;
public void nextTuple() {
System.err.println("spout :"+i);
//values 就是value的list列表
//(new Values(i++),i-1);傳送的值及key一一對應
this.collector.emit(new Values(i++),i-1);
Utils.sleep(1000);
}
/**
* 宣告欄位名稱
*/
public void declareOutputFields(OutputFieldsDeclarer declarer) {
//fields就是field的列表
declarer.declare(new Fields("num"));
}
@Override
public void ack(Object msgId) {
System.out.println("執行ACK:"+msgId);
}
@Override
public void fail(Object msgId) {
System.out.println("執行FAIL:"+msgId);
//TODO--
//this.collector.emit(tuple);
}
}
public static class SumBolt extends BaseRichBolt{
private Map stormConf;
private TopologyContext context;
private OutputCollector collector;
/**
* 只會被呼叫一次
*/
public void prepare(Map stormConf, TopologyContext context,
OutputCollector collector) {
this.stormConf = stormConf;
this.context = context;
this.collector = collector;
}
/**
* 死迴圈,迴圈的獲取上一級傳送過來的資料(spout/bolt)
*/
int sum = 0;
public void execute(Tuple input) {
//input.getInteger(0);
Integer count = input.getIntegerByField("num");
try{
//--------
this.collector.ack(input);
}catch(Exception e){
this.collector.fail(input);
}
/*if(count>10 && count<20){
this.collector.fail(input);
}{
this.collector.ack(input);
}*/
}
public void declareOutputFields(OutputFieldsDeclarer declarer) {
}
}
public static void main(String[] args) {
TopologyBuilder builder = new TopologyBuilder();
String SPOUT_NAME = DataSourceSpout.class.getSimpleName();
String BOLT_NAME = SumBolt.class.getSimpleName();
builder.setSpout(SPOUT_NAME, new DataSourceSpout());
builder.setBolt(BOLT_NAME, new SumBolt()).shuffleGrouping(SPOUT_NAME);
Config config = new Config();
try {
StormSubmitter.submitTopology(ClusterStormTopologyAck.class.getSimpleName(), config, builder.createTopology());
} catch (AlreadyAliveException e) {
e.printStackTrace();
} catch (InvalidTopologyException e) {
e.printStackTrace();
}
}
}