今天搭建hadoop2.0 时hadoop fs  -put 文件时报错,看到网上有这样的解决方法先转载下 呵呵 已解决

 

转自:http://lykke.iteye.com/blog/1320558

Exception in thread "main" java.io.IOException: Bad connect ack with firstBadLink 192.168.1.14:50010
        at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.createBlockOutputStream(DFSClient.java:2903)
        at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.nextBlockOutputStream(DFSClient.java:2826)
        at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2000(DFSClient.java:2102)
        at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2288)

运行hadoop put文件 的时候 回报这个错误

这个在 DFSClient 里

 

[java] view plaincopyprint?

  1. // connects to the first datanode in the pipeline  

  2.   // Returns true if success, otherwise return failure.  

  3.   //  

  4.   private boolean createBlockOutputStream(DatanodeInfo[] nodes, String client,  

  5.                   boolean recoveryFlag) {  

  6.     String firstBadLink = "";  

  7.     if (LOG.isDebugEnabled()) {  

  8.       for (int i = 0; i < nodes.length; i++) {  

  9.         LOG.debug("pipeline = " + nodes[i].getName());  

  10.       }  

  11.     }  

  12.   

  13.     // persist blocks on namenode on next flush  

  14.     persistBlocks = true;  

  15.   

  16.     try {  

  17.       LOG.debug("Connecting to " + nodes[0].getName());  

  18.       InetSocketAddress target = NetUtils.createSocketAddr(nodes[0].getName());  

  19.       s = socketFactory.createSocket();  

  20.       int timeoutValue = 3000 * nodes.length + socketTimeout;  

  21.       NetUtils.connect(s, target, timeoutValue);  

  22.       s.setSoTimeout(timeoutValue);  

  23.       s.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);  

  24.       LOG.debug("Send buf size " + s.getSendBufferSize());  

  25.       long writeTimeout = HdfsConstants.WRITE_TIMEOUT_EXTENSION * nodes.length +  

  26.                           datanodeWriteTimeout;  

  27.   

  28.       //  

  29.       // Xmit header info to datanode  

  30.       //  

  31.       DataOutputStream out = new DataOutputStream(  

  32.           new BufferedOutputStream(NetUtils.getOutputStream(s, writeTimeout),   

  33.                                    DataNode.SMALL_BUFFER_SIZE));  

  34.       blockReplyStream = new DataInputStream(NetUtils.getInputStream(s));  

  35.   

  36.       out.writeShort( DataTransferProtocol.DATA_TRANSFER_VERSION );  

  37.       out.write( DataTransferProtocol.OP_WRITE_BLOCK );  

  38.       out.writeLong( block.getBlockId() );  

  39.       out.writeLong( block.getGenerationStamp() );  

  40.       out.writeInt( nodes.length );  

  41.       out.writeBoolean( recoveryFlag );       // recovery flag  

  42.       Text.writeString( out, client );  

  43.       out.writeBoolean(false); // Not sending src node information  

  44.       out.writeInt( nodes.length - 1 );  

  45.       for (int i = 1; i < nodes.length; i++) {  

  46.         nodes[i].write(out);  

  47.       }  

  48.       checksum.writeHeader( out );  

  49.       out.flush();  

  50.   

  51.       // receive ack for connect  

  52.       firstBadLink = Text.readString(blockReplyStream);  

  53.       if (firstBadLink.length() != 0) {  

  54.         throw new IOException("Bad connect ack with firstBadLink " + firstBadLink);  

  55.       }  

  56.   

  57.       blockStream = out;  

  58.       return true;     // success  

  59.   

  60.     } catch (IOException ie) {  

  // connects to the first datanode in the pipeline
    // Returns true if success, otherwise return failure.
    //
    private boolean createBlockOutputStream(DatanodeInfo[] nodes, String client,
                    boolean recoveryFlag) {
      String firstBadLink = "";
      if (LOG.isDebugEnabled()) {
        for (int i = 0; i < nodes.length; i++) {
          LOG.debug("pipeline = " + nodes[i].getName());
        }
      }

      // persist blocks on namenode on next flush
      persistBlocks = true;

      try {
        LOG.debug("Connecting to " + nodes[0].getName());
        InetSocketAddress target = NetUtils.createSocketAddr(nodes[0].getName());
        s = socketFactory.createSocket();
        int timeoutValue = 3000 * nodes.length + socketTimeout;
        NetUtils.connect(s, target, timeoutValue);
        s.setSoTimeout(timeoutValue);
        s.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
        LOG.debug("Send buf size " + s.getSendBufferSize());
        long writeTimeout = HdfsConstants.WRITE_TIMEOUT_EXTENSION * nodes.length +
                            datanodeWriteTimeout;

        //
        // Xmit header info to datanode
        //
        DataOutputStream out = new DataOutputStream(
            new BufferedOutputStream(NetUtils.getOutputStream(s, writeTimeout), 
                                     DataNode.SMALL_BUFFER_SIZE));
        blockReplyStream = new DataInputStream(NetUtils.getInputStream(s));

        out.writeShort( DataTransferProtocol.DATA_TRANSFER_VERSION );
        out.write( DataTransferProtocol.OP_WRITE_BLOCK );
        out.writeLong( block.getBlockId() );
        out.writeLong( block.getGenerationStamp() );
        out.writeInt( nodes.length );
        out.writeBoolean( recoveryFlag );       // recovery flag
        Text.writeString( out, client );
        out.writeBoolean(false); // Not sending src node information
        out.writeInt( nodes.length - 1 );
        for (int i = 1; i < nodes.length; i++) {
          nodes[i].write(out);
        }
        checksum.writeHeader( out );
        out.flush();

        // receive ack for connect
        firstBadLink = Text.readString(blockReplyStream);
        if (firstBadLink.length() != 0) {
          throw new IOException("Bad connect ack with firstBadLink " + firstBadLink);
        }

        blockStream = out;
        return true;     // success

      } catch (IOException ie) {



显示为没有收到正确的应答包,我用了两种方式解决了


1) '/etc/init.d/iptables stop' -->stopped firewall
2) SELINUX=disabled in '/etc/selinux/config' file.-->disabled selinux

一般的这种hadoop 应答类错误 多半是防火墙没有关闭