envoy代码版本v1.23.1

source/common/network/connection_impl.cc 66行

ConnectionImpl::ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPtr&& socket,
                               TransportSocketPtr&& transport_socket,
                               StreamInfo::StreamInfo& stream_info, bool connected)
    : ConnectionImplBase(dispatcher, next_global_id_++),
      transport_socket_(std::move(transport_socket)), socket_(std::move(socket)),
      stream_info_(stream_info), filter_manager_(*this, *socket_),
      write_buffer_(dispatcher.getWatermarkFactory().createBuffer(
          [this]() -> void { this->onWriteBufferLowWatermark(); },
          [this]() -> void { this->onWriteBufferHighWatermark(); },
          []() -> void { /* TODO(adisuissa): Handle overflow watermark */ })),
      read_buffer_(dispatcher.getWatermarkFactory().createBuffer(
          [this]() -> void { this->onReadBufferLowWatermark(); },
          [this]() -> void { this->onReadBufferHighWatermark(); },
          []() -> void { /* TODO(adisuissa): Handle overflow watermark */ })),
      write_buffer_above_high_watermark_(false), detect_early_close_(true),
      enable_half_close_(false), read_end_stream_raised_(false), read_end_stream_(false),
      write_end_stream_(false), current_write_end_stream_(false), dispatch_buffered_data_(false),
      transport_wants_read_(false) {

  if (!connected) {//设置连接
    connecting_ = true;
  }

  Event::FileTriggerType trigger = Event::PlatformDefaultTriggerType;//事件触发模式

  // We never ask for both early close and read at the same time. If we are reading, we want to
  // consume all available data.
  socket_->ioHandle().initializeFileEvent(
      dispatcher_, [this](uint32_t events) -> void { onFileEvent(events); }, trigger,
      Event::FileReadyType::Read | Event::FileReadyType::Write);//初始化文件事件

  transport_socket_->setTransportSocketCallbacks(*this);//设置传输套接字回调

  // TODO(soulxu): generate the connection id inside the addressProvider directly,
  // then we don't need a setter or any of the optional stuff.
  socket_->connectionInfoProvider().setConnectionID(id());//设置连接id
  socket_->connectionInfoProvider().setSslConnection(transport_socket_->ssl());//设置ssl连接
}

source/common/network/connection_impl.cc 561行

void ConnectionImpl::onFileEvent(uint32_t events) {//文件时间回调
  ScopeTrackerScopeState scope(this, this->dispatcher_);
  ENVOY_CONN_LOG(trace, "socket event: {}", *this, events);

  if (immediate_error_event_ == ConnectionEvent::LocalClose ||
      immediate_error_event_ == ConnectionEvent::RemoteClose) {//如果是本地关闭或远程关闭事件
    if (bind_error_) {//
      ENVOY_CONN_LOG(debug, "raising bind error", *this);
      // Update stats here, rather than on bind failure, to give the caller a chance to
      // setConnectionStats.
      if (connection_stats_ && connection_stats_->bind_errors_) {
        connection_stats_->bind_errors_->inc();//设置统计
      }
    } else {
      ENVOY_CONN_LOG(debug, "raising immediate error", *this);
    }
    closeSocket(immediate_error_event_);//关闭套接字
    return;
  }

  if (events & Event::FileReadyType::Closed) {//如果是文件关闭事件
    // We never ask for both early close and read at the same time. If we are reading, we want to
    // consume all available data.
    ASSERT(!(events & Event::FileReadyType::Read));
    ENVOY_CONN_LOG(debug, "remote early close", *this);
    closeSocket(ConnectionEvent::RemoteClose);//关闭套接字
    return;
  }

  if (events & Event::FileReadyType::Write) {//如果是文件写事件
    onWriteReady();//处理写事件
  }

  // It's possible for a write event callback to close the socket (which will cause fd_ to be -1).
  // In this case ignore read event processing.
  if (ioHandle().isOpen() && (events & Event::FileReadyType::Read)) {//如果是文件读事件
    onReadReady();//处理读事件
  }
}

source/common/network/connection_impl.cc 239行

void ConnectionImpl::closeSocket(ConnectionEvent close_type) {//关闭套接字
  if (!ConnectionImpl::ioHandle().isOpen()) {//如果连接没有打开
    return;
  }

  // No need for a delayed close (if pending) now that the socket is being closed.
  if (delayed_close_timer_) {//关闭延迟计时器对象有值
    delayed_close_timer_->disableTimer();//禁用计时器
    delayed_close_timer_ = nullptr;
  }

  ENVOY_CONN_LOG(debug, "closing socket: {}", *this, static_cast<uint32_t>(close_type));
  transport_socket_->closeSocket(close_type);//传输成套接字关闭套接字

  // Drain input and output buffers.
  updateReadBufferStats(0, 0);//更新读缓存统计
  updateWriteBufferStats(0, 0);//更新写缓存统计

  // As the socket closes, drain any remaining data.
  // The data won't be written out at this point, and where there are reference
  // counted buffer fragments, it helps avoid lifetime issues with the
  // connection outlasting the subscriber.
  write_buffer_->drain(write_buffer_->length());//排水写缓存

  connection_stats_.reset();//重置连接统计

  socket_->close();//关闭套接字

  // Call the base class directly as close() is called in the destructor.
  ConnectionImpl::raiseEvent(close_type);//连接实现产生关闭事件
}

source/common/network/connection_impl.cc 673行

void ConnectionImpl::onWriteReady() {//处理写就绪
  ENVOY_CONN_LOG(trace, "write ready", *this);

  if (connecting_) {//如果连接为true
    int error;
    socklen_t error_size = sizeof(error);
    RELEASE_ASSERT(
        socket_->getSocketOption(SOL_SOCKET, SO_ERROR, &error, &error_size).return_value_ == 0, "");//判断没有套接字错误

    if (error == 0) {//没有错误
      ENVOY_CONN_LOG_EVENT(debug, "connection_connected", "connected", *this);
      connecting_ = false;//连接状态改为false
      onConnected();//处理连接
      transport_socket_->onConnected();//处理连接
      // It's possible that we closed during the connect callback.
      if (state() != State::Open) {//状态不为开放
        ENVOY_CONN_LOG_EVENT(debug, "connection_closed_callback", "close during connected callback",
                             *this);
        return;
      }
    } else {
      setFailureReason(absl::StrCat("delayed connect error: ", error));//设置错误原因
      ENVOY_CONN_LOG_EVENT(debug, "connection_error", "{}", *this, transportFailureReason());
      closeSocket(ConnectionEvent::RemoteClose);//关闭连接
      return;
    }
  }
IoResult result = transport_socket_->doWrite(*write_buffer_, write_end_stream_);//写数据
  ASSERT(!result.end_stream_read_); // The interface guarantees that only read operations set this.
  uint64_t new_buffer_size = write_buffer_->length();//获取写缓存长度
  updateWriteBufferStats(result.bytes_processed_, new_buffer_size);//更新写缓存统计

  // NOTE: If the delayed_close_timer_ is set, it must only trigger after a delayed_close_timeout_
  // period of inactivity from the last write event. Therefore, the timer must be reset to its
  // original timeout value unless the socket is going to be closed as a result of the doWrite().

  if (result.action_ == PostIoAction::Close) {//如果结果动作是关闭
    // It is possible (though unlikely) for the connection to have already been closed during the
    // write callback. This can happen if we manage to complete the SSL handshake in the write
    // callback, raise a connected event, and close the connection.
    closeSocket(ConnectionEvent::RemoteClose);//关闭套接字
  } else if ((inDelayedClose() && new_buffer_size == 0) || bothSidesHalfClosed()) {
    ENVOY_CONN_LOG(debug, "write flush complete", *this);
    if (delayed_close_state_ == DelayedCloseState::CloseAfterFlushAndWait) {//如果延迟关闭状态是刷新和等待后关闭
      ASSERT(delayed_close_timer_ != nullptr && delayed_close_timer_->enabled());
      if (result.bytes_processed_ > 0) {
        delayed_close_timer_->enableTimer(delayed_close_timeout_);//启动延迟关闭计时器
      }
    } else {
      ASSERT(bothSidesHalfClosed() || delayed_close_state_ == DelayedCloseState::CloseAfterFlush);
      closeConnectionImmediately();//立即关闭连接
    }
  } else {
    ASSERT(result.action_ == PostIoAction::KeepOpen);
    ASSERT(!delayed_close_timer_ || delayed_close_timer_->enabled());
    if (delayed_close_timer_ != nullptr && result.bytes_processed_ > 0) {//延迟关闭定时器不为空并且结果处理的字节数大于0
      delayed_close_timer_->enableTimer(delayed_close_timeout_);//启动延迟定时任务
    }
    if (result.bytes_processed_ > 0) {//如果处理的字节数大于0
      auto it = bytes_sent_callbacks_.begin();//获取迭代器位置
      while (it != bytes_sent_callbacks_.end()) {//循环遍历迭代器
        if ((*it)(result.bytes_processed_)) {//如果回调返回true
          // move to the next callback.
          it++;//迭代器往前走
        } else {
          // remove the current callback.
          it = bytes_sent_callbacks_.erase(it);//字节发送回调数组删除元素
        }

        // If a callback closes the socket, stop iterating.
        if (!ioHandle().isOpen()) {//如果io处理器处于非打开状态
          return;
        }
      }
    }
  }
}

source/common/network/connection_impl.cc 601行

void ConnectionImpl::onReadReady() {//处理度就绪
  ENVOY_CONN_LOG(trace, "read ready. dispatch_buffered_data={}", *this,
                 static_cast<int>(dispatch_buffered_data_));
  const bool latched_dispatch_buffered_data = dispatch_buffered_data_;
  dispatch_buffered_data_ = false;

  ASSERT(!connecting_);

  // We get here while read disabled in two ways.
  // 1) There was a call to setTransportSocketIsReadable(), for example if a raw buffer socket ceded
  //    due to shouldDrainReadBuffer(). In this case we defer the event until the socket is read
  //    enabled.
  // 2) The consumer of connection data called readDisable(true), and instead of reading from the
  //    socket we simply need to dispatch already read data.
  if (read_disable_count_ != 0) {
    // Do not clear transport_wants_read_ when returning early; the early return skips the transport
    // socket doRead call.
    if (latched_dispatch_buffered_data && filterChainWantsData()) {
      onRead(read_buffer_->length());//处理读数据
    }
    return;
  }

  // Clear transport_wants_read_ just before the call to doRead. This is the only way to ensure that
  // the transport socket read resumption happens as requested; onReadReady() returns early without
  // reading from the transport if the read buffer is above high watermark at the start of the
  // method.
  transport_wants_read_ = false;
  IoResult result = transport_socket_->doRead(*read_buffer_);//读取数据
  uint64_t new_buffer_size = read_buffer_->length();
  updateReadBufferStats(result.bytes_processed_, new_buffer_size);//更新读缓存统计

  // If this connection doesn't have half-close semantics, translate end_stream into
  // a connection close.
  if ((!enable_half_close_ && result.end_stream_read_)) {//如果没有启动半关闭,并且已经是流结束
    result.end_stream_read_ = false;
    result.action_ = PostIoAction::Close;
  }

  read_end_stream_ |= result.end_stream_read_;
  if (result.bytes_processed_ != 0 || result.end_stream_read_ ||
      (latched_dispatch_buffered_data && read_buffer_->length() > 0)) {
    // Skip onRead if no bytes were processed unless we explicitly want to force onRead for
    // buffered data. For instance, skip onRead if the connection was closed without producing
    // more data.
    onRead(new_buffer_size);//处理读数据
  }

  // The read callback may have already closed the connection.
  if (result.action_ == PostIoAction::Close || bothSidesHalfClosed()) {
    ENVOY_CONN_LOG(debug, "remote close", *this);
    closeSocket(ConnectionEvent::RemoteClose);//关闭套接字
  }
}

source/common/network/raw _buffer_socket.cc 16行

IoResult RawBufferSocket::doRead(Buffer::Instance& buffer) {
  PostIoAction action = PostIoAction::KeepOpen;
  uint64_t bytes_read = 0;
  bool end_stream = false;
  do {
    Api::IoCallUint64Result result = callbacks_->ioHandle().read(buffer, absl::nullopt);//通过io处理器读取数据

    if (result.ok()) {//返回正确
      ENVOY_CONN_LOG(trace, "read returns: {}", callbacks_->connection(), result.return_value_);
      if (result.return_value_ == 0) {//如果返回值是0
        // Remote close.
        end_stream = true;//流结束
        break;
      }
      bytes_read += result.return_value_;//增加读取的字节数
      if (callbacks_->shouldDrainReadBuffer()) {//应该排水读缓存
        callbacks_->setTransportSocketIsReadable();//设置传输层套接字可读取
        break;
      }
    } else {
      // Remote error (might be no data).
      ENVOY_CONN_LOG(trace, "read error: {}", callbacks_->connection(),
                     result.err_->getErrorDetails());
      if (result.err_->getErrorCode() != Api::IoError::IoErrorCode::Again) {//io错误
        action = PostIoAction::Close;//关闭动作
      }
      break;
    }
  } while (true);

  return {action, bytes_read, end_stream};
}

IoResult RawBufferSocket::doWrite(Buffer::Instance& buffer, bool end_stream) {
  PostIoAction action;
  uint64_t bytes_written = 0;
  ASSERT(!shutdown_ || buffer.length() == 0);
  do {
    if (buffer.length() == 0) {
      if (end_stream && !shutdown_) {//流结束并且非关闭
        // Ignore the result. This can only fail if the connection failed. In that case, the
        // error will be detected on the next read, and dealt with appropriately.
        callbacks_->ioHandle().shutdown(ENVOY_SHUT_WR);//关闭io处理器
        shutdown_ = true;
      }
      action = PostIoAction::KeepOpen;//动作为保持打开
      break;
    }
    Api::IoCallUint64Result result = callbacks_->ioHandle().write(buffer);//向io处理器写入数据

    if (result.ok()) {//结果为成功
      ENVOY_CONN_LOG(trace, "write returns: {}", callbacks_->connection(), result.return_value_);
      bytes_written += result.return_value_;//增加写入的字节数
    } else {
      ENVOY_CONN_LOG(trace, "write error: {}", callbacks_->connection(),
                     result.err_->getErrorDetails());
      if (result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again) {//写入失败
        action = PostIoAction::KeepOpen;
      } else {
        action = PostIoAction::Close;
      }
      break;
    }
  } while (true);

  return {action, bytes_written, false};
}

source/common/network/io_socket_handle_impl.cc 94行

Api::IoCallUint64Result IoSocketHandleImpl::readv(uint64_t max_length, Buffer::RawSlice* slices,
                                                  uint64_t num_slice) {
  absl::FixedArray<iovec> iov(num_slice);//io数组
  uint64_t num_slices_to_read = 0;
  uint64_t num_bytes_to_read = 0;
  for (; num_slices_to_read < num_slice && num_bytes_to_read < max_length; num_slices_to_read++) {
    iov[num_slices_to_read].iov_base = slices[num_slices_to_read].mem_;//slice内存
    const size_t slice_length = std::min(slices[num_slices_to_read].len_,
                                         static_cast<size_t>(max_length - num_bytes_to_read));//slice长度
    iov[num_slices_to_read].iov_len = slice_length;//设置长度
    num_bytes_to_read += slice_length;//设置读取的字节数
  }
  ASSERT(num_bytes_to_read <= max_length);
  auto result = sysCallResultToIoCallResult(Api::OsSysCallsSingleton::get().readv(
      fd_, iov.begin(), static_cast<int>(num_slices_to_read)));//系统调用读取数据
  return result;
}

Api::IoCallUint64Result IoSocketHandleImpl::read(Buffer::Instance& buffer,
                                                 absl::optional<uint64_t> max_length_opt) {//读取数据
  const uint64_t max_length = max_length_opt.value_or(UINT64_MAX);//最大长度
  if (max_length == 0) {//最大长度等于0
    return Api::ioCallUint64ResultNoError();
  }
  Buffer::Reservation reservation = buffer.reserveForRead();//保留读取
  Api::IoCallUint64Result result = readv(std::min(reservation.length(), max_length),
                                         reservation.slices(), reservation.numSlices());//读取数据
  uint64_t bytes_to_commit = result.ok() ? result.return_value_ : 0;//读取的字节数
  ASSERT(bytes_to_commit <= max_length);
  reservation.commit(bytes_to_commit);//提交
  return result;
}

Api::IoCallUint64Result IoSocketHandleImpl::writev(const Buffer::RawSlice* slices,
                                                   uint64_t num_slice) {
  absl::FixedArray<iovec> iov(num_slice);//io数据
  uint64_t num_slices_to_write = 0;
  for (uint64_t i = 0; i < num_slice; i++) {//循环slice
    if (slices[i].mem_ != nullptr && slices[i].len_ != 0) {//内存不为空,长度不为空
      iov[num_slices_to_write].iov_base = slices[i].mem_;//设置内存
      iov[num_slices_to_write].iov_len = slices[i].len_;//设置长度
      num_slices_to_write++;//写入的slice加1
    }
  }
  if (num_slices_to_write == 0) {//要写入的slice等于0
    return Api::ioCallUint64ResultNoError();
  }
  auto result = sysCallResultToIoCallResult(
      Api::OsSysCallsSingleton::get().writev(fd_, iov.begin(), num_slices_to_write));//调用系统调用写入数据
  return result;
}

Api::IoCallUint64Result IoSocketHandleImpl::write(Buffer::Instance& buffer) {//写入数据
  constexpr uint64_t MaxSlices = 16;
  Buffer::RawSliceVector slices = buffer.getRawSlices(MaxSlices);//获取slice数组
  Api::IoCallUint64Result result = writev(slices.begin(), slices.size());//写入数据
  if (result.ok() && result.return_value_ > 0) {//写入成功,且写入数据大于0
    buffer.drain(static_cast<uint64_t>(result.return_value_));//排水缓存
  }
  return result;
}

source/common/network/connection_impl.cc 312行

void ConnectionImpl::onRead(uint64_t read_buffer_size) {//处理读数据
  ASSERT(dispatcher_.isThreadSafe());
  if (inDelayedClose() || !filterChainWantsData()) {//在延迟关闭状态中,并且filterchain不需要数据
    return;
  }
  ASSERT(ioHandle().isOpen());

  if (read_buffer_size == 0 && !read_end_stream_) {//读缓存大小等于0,并且读流未结束
    return;
  }

  if (read_end_stream_) {//读流结束
    // read() on a raw socket will repeatedly return 0 (EOF) once EOF has
    // occurred, so filter out the repeats so that filters don't have
    // to handle repeats.
    //
    // I don't know of any cases where this actually happens (we should stop
    // reading the socket after EOF), but this check guards against any bugs
    // in ConnectionImpl or strangeness in the OS events (epoll, kqueue, etc)
    // and maintains the guarantee for filters.
    if (read_end_stream_raised_) {//没有产生读流结束
      // No further data can be delivered after end_stream
      ASSERT(read_buffer_size == 0);
      return;
    }
    read_end_stream_raised_ = true;//设置产生读流结束
  }

  filter_manager_.onRead();//过滤器管理器处理读
}

source/common/network/filter_manager_impl.cc 86行

void FilterManagerImpl::onRead() {
  ASSERT(!upstream_filters_.empty());
  onContinueReading(nullptr, connection_);//继续读
}

source/common/network/filter_manager_impl.cc 48行

void FilterManagerImpl::onContinueReading(ActiveReadFilter* filter,
                                          ReadBufferSource& buffer_source) {
  // Filter could return status == FilterStatus::StopIteration immediately, close the connection and
  // use callback to call this function.
  if (connection_.state() != Connection::State::Open) {//连接状态不为打开
    return;
  }

  std::list<ActiveReadFilterPtr>::iterator entry;//获取读过滤器指针迭代器
  if (!filter) {//获取器为空
    connection_.streamInfo().addBytesReceived(buffer_source.getReadBuffer().buffer.length());//流信息添加收到的字节数
    entry = upstream_filters_.begin();//设置迭代器
  } else {
    entry = std::next(filter->entry());//下一个迭代器
  }

  for (; entry != upstream_filters_.end(); entry++) {//遍历直到最后一个过滤迭代器
    if (!(*entry)->filter_) {//过滤器不存在
      continue;
    }
    if (!(*entry)->initialized_) {//过滤器没有初始化
      (*entry)->initialized_ = true;//设置已经初始化
      FilterStatus status = (*entry)->filter_->onNewConnection();//调用新连接
      if (status == FilterStatus::StopIteration || connection_.state() != Connection::State::Open) {//如果迭代器状态为停止迭代或连接状态不为打开
        return;
      }
    }

    StreamBuffer read_buffer = buffer_source.getReadBuffer();//获取度缓存
    if (read_buffer.buffer.length() > 0 || read_buffer.end_stream) {//数据长度大于0或者流结束
      FilterStatus status = (*entry)->filter_->onData(read_buffer.buffer, read_buffer.end_stream);//处理数据
      if (status == FilterStatus::StopIteration || connection_.state() != Connection::State::Open) {//迭代器状态为停止迭代,或者连接状态为非打开
        return;
      }
    }
  }
}

source/extensions/filters/network/connection_limit/connection_limit.cc 61行

Network::FilterStatus Filter::onNewConnection() {
  if (!config_->enabled()) {//没启用
    ENVOY_CONN_LOG(trace, "connection_limit: runtime disabled", read_callbacks_->connection());
    return Network::FilterStatus::Continue;
  }

  config_->stats().active_connections_.inc();//增加统计信息

  if (!config_->incrementConnectionWithinLimit()) {//增加连接失败
    config_->stats().limited_connections_.inc();//增加统计信息
    ENVOY_CONN_LOG(trace, "connection_limit: connection limiting connection",
                   read_callbacks_->connection());

    // Set is_rejected_ is true, so that onData() will return StopIteration during the delay time.
    is_rejected_ = true;//设置拒绝
    // The close() will trigger onEvent() with close event, increment the active connections count.
    config_->incrementConnection();//新增连接

    // Delay rejection provides a better DoS protection for Envoy.
    absl::optional<std::chrono::milliseconds> duration = config_->delay();//延迟
    if (duration.has_value() && duration.value() > std::chrono::milliseconds(0)) {//延迟有时间,并且大于0
      delay_timer_ = read_callbacks_->connection().dispatcher().createTimer([this]() -> void {
        resetTimerState();
        read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);//关闭连接
      });//设置定时器
      delay_timer_->enableTimer(duration.value());//启用定时器
    } else {
      read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush);//关闭连接
    }
    return Network::FilterStatus::StopIteration;//停止迭代
  }

  return Network::FilterStatus::Continue;//继续迭代
}

source/extensions/filters/network/connection_limit/connection_limit.cc 54行

Network::FilterStatus Filter::onData(Buffer::Instance&, bool) {//处理数据
  if (is_rejected_) {//被拒绝
    return Network::FilterStatus::StopIteration;//停止迭代
  }
  return Network::FilterStatus::Continue;//继续迭代
}

source/common/tcp_proxy/tcp_proxy.cc 574行

Network::FilterStatus Filter::onData(Buffer::Instance& data, bool end_stream) {//tcp proxy过滤器处理数据
  ENVOY_CONN_LOG(trace, "downstream connection received {} bytes, end_stream={}",
                 read_callbacks_->connection(), data.length(), end_stream);
  getStreamInfo().getDownstreamBytesMeter()->addWireBytesReceived(data.length());//流信息添加字节接收数
  if (upstream_) {//如果是上游
    getStreamInfo().getUpstreamBytesMeter()->addWireBytesSent(data.length());//添加发送的字节数
    upstream_->encodeData(data, end_stream);//编码数据
  }
  // The upstream should consume all of the data.
  // Before there is an upstream the connection should be readDisabled. If the upstream is
  // destroyed, there should be no further reads as well.
  ASSERT(0 == data.length());
  resetIdleTimer(); // TODO(ggreenway) PERF: do we need to reset timer on both send and receive?
  return Network::FilterStatus::StopIteration;//停止迭代
}

source/common/tcp_proxy/upstream.cc 38行

void TcpUpstream::encodeData(Buffer::Instance& data, bool end_stream) {//编码数据
  upstream_conn_data_->connection().write(data, end_stream);//向连接写入数据
}

source/common/network/connection_impl.cc 445行

void ConnectionImpl::write(Buffer::Instance& data, bool end_stream) {//写入数据
  write(data, end_stream, true);
}

source/common/network/connection_impl.cc 449行

void ConnectionImpl::write(Buffer::Instance& data, bool end_stream, bool through_filter_chain) {
  ASSERT(!end_stream || enable_half_close_);
  ASSERT(dispatcher_.isThreadSafe());

  if (write_end_stream_) {//写流结束
    // It is an API violation to write more data after writing end_stream, but a duplicate
    // end_stream with no data is harmless. This catches misuse of the API that could result in data
    // being lost.
    ASSERT(data.length() == 0 && end_stream);

    return;
  }

  if (through_filter_chain) {//通过过滤器链
    // NOTE: This is kind of a hack, but currently we don't support restart/continue on the write
    //       path, so we just pass around the buffer passed to us in this function. If we ever
    //       support buffer/restart/continue on the write path this needs to get more complicated.
    current_write_buffer_ = &data;//当前写缓存
    current_write_end_stream_ = end_stream;//流结束状态
    FilterStatus status = filter_manager_.onWrite();//过滤器管理器处理写
    current_write_buffer_ = nullptr;

    if (FilterStatus::StopIteration == status) {//停止迭代
      return;
    }
  }

  write_end_stream_ = end_stream;//写流结束状态
  if (data.length() > 0 || end_stream) {//数据大于0或者流结束
    ENVOY_CONN_LOG(trace, "writing {} bytes, end_stream {}", *this, data.length(), end_stream);
    // TODO(mattklein123): All data currently gets moved from the source buffer to the write buffer.
    // This can lead to inefficient behavior if writing a bunch of small chunks. In this case, it
    // would likely be more efficient to copy data below a certain size. VERY IMPORTANT: If this is
    // ever changed, read the comment in SslSocket::doWrite() VERY carefully. That code assumes that
    // we never change existing write_buffer_ chain elements between calls to SSL_write(). That code
    // might need to change if we ever copy here.
    write_buffer_->move(data);

    // Activating a write event before the socket is connected has the side-effect of tricking
    // doWriteReady into thinking the socket is connected. On macOS, the underlying write may fail
    // with a connection error if a call to write(2) occurs before the connection is completed.
    if (!connecting_) {//非连接
      ioHandle().activateFileEvents(Event::FileReadyType::Write);//io处理器启动写文件事件
    }
  }
}

source/common/network/filter_manger_impl.cc 91行

FilterStatus FilterManagerImpl::onWrite() { return onWrite(nullptr, connection_); }//处理写

FilterStatus FilterManagerImpl::onWrite(ActiveWriteFilter* filter,
                                        WriteBufferSource& buffer_source) {
  // Filter could return status == FilterStatus::StopIteration immediately, close the connection and
  // use callback to call this function.
  if (connection_.state() != Connection::State::Open) {//连接状态不是打开状态
    return FilterStatus::StopIteration;
  }

  std::list<ActiveWriteFilterPtr>::iterator entry;//活跃写过滤器迭代器
  if (!filter) {//过滤器不存在
    entry = downstream_filters_.begin();//迭代器指向下游过滤器第一个
  } else {
    entry = std::next(filter->entry());//迭代器指向下一个过滤器
  }

  for (; entry != downstream_filters_.end(); entry++) {//过滤器不是最后一个,循环
    StreamBuffer write_buffer = buffer_source.getWriteBuffer();//获取写缓存
    FilterStatus status = (*entry)->filter_->onWrite(write_buffer.buffer, write_buffer.end_stream);//调用过滤器写处理
    if (status == FilterStatus::StopIteration || connection_.state() != Connection::State::Open) {//返回过滤器转台为停止迭代或者连接状态非打开状态
      return FilterStatus::StopIteration;
    }
  }

  // Report the final bytes written to the wire
  connection_.streamInfo().addBytesSent(buffer_source.getWriteBuffer().buffer.length());//连接流信息添加字节发送数量
  return FilterStatus::Continue;
}

source/common/network/io_socket_handle_impl.cc 590行

void IoSocketHandleImpl::activateFileEvents(uint32_t events) {//启动文件事件
  if (file_event_) {
    file_event_->activate(events);//启动事件
  } else {
    ENVOY_BUG(false, "Null file_event_");
  }
}

source/common/event/file_event_impl.cc 35行

void FileEventImpl::activate(uint32_t events) {
  ASSERT(dispatcher_.isThreadSafe());

  // events is not empty.
  ASSERT(events != 0);
  // Only supported event types are set.
  ASSERT((events & (FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed)) == events);

  // Schedule the activation callback so it runs as part of the next loop iteration if it is not
  // already scheduled.
  if (injected_activation_events_ == 0) {
    ASSERT(!activation_cb_->enabled());
    activation_cb_->scheduleCallbackNextIteration();
  }
  ASSERT(activation_cb_->enabled());

  // Merge new events with pending injected events.
  injected_activation_events_ |= events;
}

source/common/event/schedulable_cb_impl.cc 32行

void SchedulableCallbackImpl::scheduleCallbackNextIteration() {
  if (enabled()) {//已启用
    return;
  }
  // libevent computes the list of timers to move to the work list after polling for fd events, but
  // iteration through the work list starts. Zero delay timers added while iterating through the
  // work list execute on the next iteration of the event loop.
  const timeval zero_tv{};
  event_add(&raw_event_, &zero_tv);//libevent上树
}

source/common/tcp_proxy/tcp_proxy.cc 590行

Network::FilterStatus Filter::onNewConnection() {//tcp proxy处理连接
  if (config_->maxDownstreamConnectionDuration()) {//配置了最大下游连接时间
    connection_duration_timer_ = read_callbacks_->connection().dispatcher().createTimer(
        [this]() -> void { onMaxDownstreamConnectionDuration(); });//创建定时任务
    connection_duration_timer_->enableTimer(config_->maxDownstreamConnectionDuration().value());//启动定时任务
  }

  ASSERT(upstream_ == nullptr);
  route_ = pickRoute();//获取路由
  return establishUpstreamConnection();//建立上游连接
}

source/common/tcp_proxy/tcp_proxy.h 426行

  virtual RouteConstSharedPtr pickRoute() {//获取路由
    return config_->getRouteFromEntries(read_callbacks_->connection());
  }

source/common/tcp_proxy/tcp_proxy.cc 161行

RouteConstSharedPtr Config::getRouteFromEntries(Network::Connection& connection) {
  if (weighted_clusters_.empty()) {//加权集群为空
    return getRegularRouteFromEntries(connection);//获取一般路由
  }
  return WeightedClusterUtil::pickCluster(weighted_clusters_, total_cluster_weight_,
                                          random_generator_.random(), false);//获取加权路由
}

source/common/tcp_proxy/tcp_proxy.cc 144行

RouteConstSharedPtr Config::getRegularRouteFromEntries(Network::Connection& connection) {
  // First check if the per-connection state to see if we need to route to a pre-selected cluster
  if (const auto* per_connection_cluster =
          connection.streamInfo().filterState()->getDataReadOnly<PerConnectionCluster>(
              PerConnectionCluster::key());//获取集群
      per_connection_cluster != nullptr) {//集群部位空
    return std::make_shared<const SimpleRouteImpl>(*this, per_connection_cluster->value());//构造路由
  }

  if (default_route_ != nullptr) {//默认路由不为空
    return default_route_;
  }

  // no match, no more routes to try
  return nullptr;
}

envoy/stream_info/filter_state.h 134行

  template <typename T> const T* getDataReadOnly(absl::string_view data_name) const {//获取只读数据
    return dynamic_cast<const T*>(getDataReadOnlyGeneric(data_name));
  }

envoy/stream_info/filter_state.h 50行

const FilterState::Object*
FilterStateImpl::getDataReadOnlyGeneric(absl::string_view data_name) const {
  const auto it = data_storage_.find(data_name);//根据名称查找数据

  if (it == data_storage_.end()) {//没有找到
    if (parent_) {//父亲不为空
      return parent_->getDataReadOnlyGeneric(data_name);//从父亲递归查找
    }
    return nullptr;
  }

  const FilterStateImpl::FilterObject* current = it->second.get();//获取map entry中的第二个对象
  return current->data_.get();//返回对象
}

source/common/common/utilty.h 492行

template <typename WeightedClusterEntry>
  static const WeightedClusterEntry&
  pickCluster(const std::vector<WeightedClusterEntry>& weighted_clusters,
              const uint64_t total_cluster_weight, const uint64_t random_value,
              const bool ignore_overflow) {
    uint64_t selected_value = random_value % total_cluster_weight;//选择的值
    uint64_t begin = 0;
    uint64_t end = 0;

    // Find the right cluster to route to based on the interval in which
    // the selected value falls. The intervals are determined as
    // [0, cluster1_weight), [cluster1_weight, cluster1_weight+cluster2_weight),..
    for (const WeightedClusterEntry& cluster : weighted_clusters) {
      end = begin + cluster->clusterWeight();
      if (!ignore_overflow) {
        // end > total_cluster_weight: This case can only occur with Runtimes,
        // when the user specifies invalid weights such that
        // sum(weights) > total_cluster_weight.
        ASSERT(end <= total_cluster_weight);
      }

      if (selected_value >= begin && selected_value < end) {//选择值在开始值和结束值之间
        return cluster;
      }
      begin = end;
    }

    PANIC("unexpectedly reached");
  }

source/common/tcp_proxy/tcp_proxy.cc 342行

Network::FilterStatus Filter::establishUpstreamConnection() {//建立上游连接
  const std::string& cluster_name = route_ ? route_->clusterName() : EMPTY_STRING;//获取上游集群名称
  Upstream::ThreadLocalCluster* thread_local_cluster =
      cluster_manager_.getThreadLocalCluster(cluster_name);//获取线程本地集群

  if (!thread_local_cluster) {//线程本地集群部存在
    auto odcds = config_->onDemandCds();//按需cds
    if (!odcds.has_value()) {//按需cds没有值
      // No ODCDS? It means that on-demand discovery is disabled.
      ENVOY_CONN_LOG(debug, "Cluster not found {} and no on demand cluster set.",
                     read_callbacks_->connection(), cluster_name);
      config_->stats().downstream_cx_no_route_.inc();//增加统计信息
      getStreamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoClusterFound);//设置响应标志
      onInitFailure(UpstreamFailureReason::NoRoute);//处理错误
    } else {
      ASSERT(!cluster_discovery_handle_);
      auto callback = std::make_unique<Upstream::ClusterDiscoveryCallback>(//按需cds回调
          [this](Upstream::ClusterDiscoveryStatus cluster_status) {
            onClusterDiscoveryCompletion(cluster_status);
          });
      config_->onDemandStats().on_demand_cluster_attempt_.inc();//增加统计信息
      cluster_discovery_handle_ = odcds->requestOnDemandClusterDiscovery(//发送按需cds
          cluster_name, std::move(callback), config_->odcdsTimeout());
    }
    return Network::FilterStatus::StopIteration;
  }

  ENVOY_CONN_LOG(debug, "Creating connection to cluster {}", read_callbacks_->connection(),
                 cluster_name);

  const Upstream::ClusterInfoConstSharedPtr& cluster = thread_local_cluster->info();//集群信息
  getStreamInfo().setUpstreamClusterInfo(cluster);//设置上游集群信息

  // Check this here because the TCP conn pool will queue our request waiting for a connection that
  // will never be released.
  if (!cluster->resourceManager(Upstream::ResourcePriority::Default).connections().canCreate()) {//不能创建连接
    getStreamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow);//设置响应标志位
    cluster->stats().upstream_cx_overflow_.inc();//设置统计信息
    onInitFailure(UpstreamFailureReason::ResourceLimitExceeded);//处理错误
    return Network::FilterStatus::StopIteration;
  }

  const uint32_t max_connect_attempts = config_->maxConnectAttempts();//获取最大连接尝试
  if (connect_attempts_ >= max_connect_attempts) {//如果连接尝试次数大于最大值
    getStreamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamRetryLimitExceeded);//设置响应表示位
    cluster->stats().upstream_cx_connect_attempts_exceeded_.inc();//设置统计信息
    onInitFailure(UpstreamFailureReason::ConnectFailed);//处理错误
    return Network::FilterStatus::StopIteration;
  }

  auto& downstream_connection = read_callbacks_->connection();//获取下游连接
  auto& filter_state = downstream_connection.streamInfo().filterState();//过滤器状态
  if (!filter_state->hasData<Network::ProxyProtocolFilterState>(
          Network::ProxyProtocolFilterState::key())) {//过滤器没值
    filter_state->setData(
        Network::ProxyProtocolFilterState::key(),
        std::make_shared<Network::ProxyProtocolFilterState>(Network::ProxyProtocolData{
            downstream_connection.connectionInfoProvider().remoteAddress(),
            downstream_connection.connectionInfoProvider().localAddress()}),
        StreamInfo::FilterState::StateType::ReadOnly,
        StreamInfo::FilterState::LifeSpan::Connection);//过滤器状态设置值
  }
  transport_socket_options_ =
      Network::TransportSocketOptionsUtility::fromFilterState(*filter_state);//传输套接字选项

  if (auto typed_state = filter_state->getDataReadOnly<Network::UpstreamSocketOptionsFilterState>(
          Network::UpstreamSocketOptionsFilterState::key());//获取状态对象
      typed_state != nullptr) {
    auto downstream_options = typed_state->value();//下游选项
    if (!upstream_options_) {//上游选项不存在
      upstream_options_ = std::make_shared<Network::Socket::Options>();//创建上游选项
    }
    Network::Socket::appendOptions(upstream_options_, downstream_options);//追加选项
  }

  if (!maybeTunnel(*thread_local_cluster)) {
    // Either cluster is unknown or there are no healthy hosts. tcpConnPool() increments
    // cluster->stats().upstream_cx_none_healthy in the latter case.
    getStreamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoHealthyUpstream);//设置标志位
    onInitFailure(UpstreamFailureReason::NoHealthyUpstream);//处理失败
  }
  return Network::FilterStatus::StopIteration;
}

source/common/tcp_proxy/tcp_proxy.cc 426行

void Filter::onClusterDiscoveryCompletion(Upstream::ClusterDiscoveryStatus cluster_status) {
  // Clear the cluster_discovery_handle_ before calling establishUpstreamConnection since we may
  // request cluster again.
  cluster_discovery_handle_.reset();//重置集群发现处理器
  const std::string& cluster_name = route_ ? route_->clusterName() : EMPTY_STRING;//获取路由名称
  switch (cluster_status) {//判断发现服务状态
  case Upstream::ClusterDiscoveryStatus::Missing://集群丢失
    ENVOY_CONN_LOG(debug, "On demand cluster {} is missing", read_callbacks_->connection(),
                   cluster_name);
    config_->onDemandStats().on_demand_cluster_missing_.inc();//新增统计
    break;
  case Upstream::ClusterDiscoveryStatus::Timeout://发现超时
    ENVOY_CONN_LOG(debug, "On demand cluster {} was not found before timeout.",
                   read_callbacks_->connection(), cluster_name);
    config_->onDemandStats().on_demand_cluster_timeout_.inc();//新增统计
    break;
  case Upstream::ClusterDiscoveryStatus::Available://集群可用
    // cluster_discovery_handle_ would have been cancelled if the downstream were closed.
    ASSERT(!downstream_closed_);
    ENVOY_CONN_LOG(debug, "On demand cluster {} is found. Establishing connection.",
                   read_callbacks_->connection(), cluster_name);
    config_->onDemandStats().on_demand_cluster_success_.inc();//统计信息
    establishUpstreamConnection();//建立连接
    return;
  }
  // Failure path.
  config_->stats().downstream_cx_no_route_.inc();//新增统计
  getStreamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoClusterFound);//设置响应
  onInitFailure(UpstreamFailureReason::NoRoute);//处理错误
}

source/common/tcp_proxy/tcp_proxy.cc 457行

bool Filter::maybeTunnel(Upstream::ThreadLocalCluster& cluster) {
  GenericConnPoolFactory* factory = nullptr;
  if (cluster.info()->upstreamConfig().has_value()) {//上游配置有值
    factory = Envoy::Config::Utility::getFactory<GenericConnPoolFactory>(
        cluster.info()->upstreamConfig().value());//创建通用连接池工厂
  } else {
    factory = Envoy::Config::Utility::getFactoryByName<GenericConnPoolFactory>(
        "envoy.filters.connection_pools.tcp.generic");//通过名称获取通用连接池工厂
  }
  if (!factory) {//工厂为空
    return false;
  }

  generic_conn_pool_ = factory->createGenericConnPool(cluster, config_->tunnelingConfigHelper(),
                                                      this, *upstream_callbacks_);//创建通用连接池
  if (generic_conn_pool_) {//通用连接池创建成功
    connecting_ = true;//设置连接状态
    connect_attempts_++;//递增连接尝试次数
    getStreamInfo().setAttemptCount(connect_attempts_);//设置连接尝试次数
    generic_conn_pool_->newStream(*this);//创建新流
    // Because we never return open connections to the pool, this either has a handle waiting on
    // connection completion, or onPoolFailure has been invoked. Either way, stop iteration.
    return true;
  }
  return false;
}

source/extensions/upstreams/tcp/generic/config.cc 14行

TcpProxy::GenericConnPoolPtr GenericConnPoolFactory::createGenericConnPool(
    Upstream::ThreadLocalCluster& thread_local_cluster,
    TcpProxy::TunnelingConfigHelperOptConstRef config, Upstream::LoadBalancerContext* context,
    Envoy::Tcp::ConnectionPool::UpstreamCallbacks& upstream_callbacks) const {
  if (config.has_value()) {//配置有值
    Http::CodecType pool_type;//连接池类型
    if ((thread_local_cluster.info()->features() & Upstream::ClusterInfo::Features::HTTP2) != 0) {//http2
      pool_type = Http::CodecType::HTTP2;
    } else if ((thread_local_cluster.info()->features() & Upstream::ClusterInfo::Features::HTTP3) !=
               0) {//http3
      pool_type = Http::CodecType::HTTP3;
    } else {//http1.1
      pool_type = Http::CodecType::HTTP1;
    }
    auto ret = std::make_unique<TcpProxy::HttpConnPool>(thread_local_cluster, context, *config,
                                                        upstream_callbacks, pool_type);//创建http连接池
    return (ret->valid() ? std::move(ret) : nullptr);
  }
  auto ret =
      std::make_unique<TcpProxy::TcpConnPool>(thread_local_cluster, context, upstream_callbacks);//创建tcp连接池
  return (ret->valid() ? std::move(ret) : nullptr);
}

source/common/tcp_proxy/upstream.cc 163行

void TcpConnPool::newStream(GenericConnectionPoolCallbacks& callbacks) {//创建新流
  callbacks_ = &callbacks;
  // Given this function is reentrant, make sure we only reset the upstream_handle_ if given a
  // valid connection handle. If newConnection fails inline it may result in attempting to
  // select a new host, and a recursive call to establishUpstreamConnection. In this case the
  // first call to newConnection will return null and the inner call will persist.
  Tcp::ConnectionPool::Cancellable* handle = conn_pool_data_.value().newConnection(*this);//创建新连接
  if (handle) {//创建成功
    ASSERT(upstream_handle_ == nullptr);
    upstream_handle_ = handle;//设置上游连接
  }
}

envoy/upstream/thread_local_cluster.h 55行

  Envoy::Tcp::ConnectionPool::Cancellable*
  newConnection(Envoy::Tcp::ConnectionPool::Callbacks& callbacks) {
    on_new_connection_();//处理新连接事件
    return pool_->newConnection(callbacks);//创建新连接
  }

source/common/tcp/conn_pool.h 180行

  ConnectionPool::Cancellable* newConnection(Tcp::ConnectionPool::Callbacks& callbacks) override {
    TcpAttachContext context(&callbacks);
    // TLS early data over TCP is not supported yet.
    return newStreamImpl(context, /*can_send_early_data=*/false);//创建新流
  }

source/common/conn_pool/conn_pool_base.cc 254行

ConnectionPool::Cancellable* ConnPoolImplBase::newStreamImpl(AttachContext& context,
                                                             bool can_send_early_data) {
  ASSERT(!is_draining_for_deletion_);
  ASSERT(!deferred_deleting_);

  ASSERT(static_cast<ssize_t>(connecting_stream_capacity_) ==
         connectingCapacity(connecting_clients_) +
             connectingCapacity(early_data_clients_)); // O(n) debug check.
  if (!ready_clients_.empty()) {//就绪客户端不为空
    ActiveClient& client = *ready_clients_.front();//获取活跃客户端
    ENVOY_CONN_LOG(debug, "using existing fully connected connection", client);
    attachStreamToClient(client, context);//把流attach到客户端
    // Even if there's a ready client, we may want to preconnect to handle the next incoming stream.
    tryCreateNewConnections();//尝试创建新连接
    return nullptr;
  }

  if (can_send_early_data && !early_data_clients_.empty()) {//可以发送早期数据并且早期数据客户端不为空
    ActiveClient& client = *early_data_clients_.front();
    ENVOY_CONN_LOG(debug, "using existing early data ready connection", client);
    attachStreamToClient(client, context);//把流attach到客户端
    // Even if there's an available client, we may want to preconnect to handle the next
    // incoming stream.
    tryCreateNewConnections();//尝试创建新连接
    return nullptr;
  }

  if (!host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) {//如果不能创建连接
    ENVOY_LOG(debug, "max pending streams overflow");
    onPoolFailure(nullptr, absl::string_view(), ConnectionPool::PoolFailureReason::Overflow,
                  context);//处理连接池错误
    host_->cluster().stats().upstream_rq_pending_overflow_.inc();//新增统计
    return nullptr;
  }

  ConnectionPool::Cancellable* pending = newPendingStream(context, can_send_early_data);//创建新等待流
  ENVOY_LOG(debug, "trying to create new connection");
  ENVOY_LOG(trace, fmt::format("{}", *this));

  auto old_capacity = connecting_stream_capacity_;
  // This must come after newPendingStream() because this function uses the
  // length of pending_streams_ to determine if a new connection is needed.
  const ConnectionResult result = tryCreateNewConnections();//尝试创建连接
  // If there is not enough connecting capacity, the only reason to not
  // increase capacity is if the connection limits are exceeded.
  ENVOY_BUG(pending_streams_.size() <= connecting_stream_capacity_ ||
                connecting_stream_capacity_ > old_capacity ||
                (result == ConnectionResult::NoConnectionRateLimited ||
                 result == ConnectionResult::FailedToCreateConnection),
            fmt::format("Failed to create expected connection: {}", *this));
  if (result == ConnectionResult::FailedToCreateConnection) {//渐渐连接失败
    // This currently only happens for HTTP/3 if secrets aren't yet loaded.
    // Trigger connection failure.
    pending->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess);//取消
    onPoolFailure(nullptr, absl::string_view(),处理连接池错误
                  ConnectionPool::PoolFailureReason::LocalConnectionFailure, context);//处理连接池错误
    return nullptr;
  }
  return pending;
}

source/common/conn_pool/conn_pool_base.cc 166行

void ConnPoolImplBase::attachStreamToClient(Envoy::ConnectionPool::ActiveClient& client,
                                            AttachContext& context) {
  ASSERT(client.readyForStream());

  if (client.state() == Envoy::ConnectionPool::ActiveClient::State::ReadyForEarlyData) {
    host_->cluster().stats().upstream_rq_0rtt_.inc();//新增统计
  }

  if (enforceMaxRequests() && !host_->cluster().resourceManager(priority_).requests().canCreate()) {//生效最大连接数并且不能创建
    ENVOY_LOG(debug, "max streams overflow");
    onPoolFailure(client.real_host_description_, absl::string_view(),
                  ConnectionPool::PoolFailureReason::Overflow, context);//处理连接池错误
    host_->cluster().stats().upstream_rq_pending_overflow_.inc();//增加统计
    return;
  }
  ENVOY_CONN_LOG(debug, "creating stream", client);

  // Latch capacity before updating remaining streams.
  uint64_t capacity = client.currentUnusedCapacity();//获取当前客户端没有使用的容量
  client.remaining_streams_--;
  if (client.remaining_streams_ == 0) {//如果遗留流数量为0
    ENVOY_CONN_LOG(debug, "maximum streams per connection, start draining", client);
    host_->cluster().stats().upstream_cx_max_requests_.inc();//增加统计
    transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::Draining);//设置活跃客户端状态
  } else if (capacity == 1) {//如果容量为1
    // As soon as the new stream is created, the client will be maxed out.
    transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::Busy);//设置活跃客户端状态
  }

  // Decrement the capacity, as there's one less stream available for serving.
  // For HTTP/3, the capacity is updated in newStreamEncoder.
  if (trackStreamCapacity()) {//最终流容量
    decrConnectingAndConnectedStreamCapacity(1, client);
  }
  // Track the new active stream.
  state_.incrActiveStreams(1);//增加活跃流
  num_active_streams_++;//增加活跃流
  host_->stats().rq_total_.inc();//设置统计信息
  host_->stats().rq_active_.inc();//设置统计信息
  host_->cluster().stats().upstream_rq_total_.inc();//设置统计信息
  host_->cluster().stats().upstream_rq_active_.inc();//设置统计信息
  host_->cluster().resourceManager(priority_).requests().inc();//设置统计信息

  onPoolReady(client, context);//处理连接池就绪
}

source/common/tcp/conn_pool.h 205行

  void onPoolReady(Envoy::ConnectionPool::ActiveClient& client,
                   Envoy::ConnectionPool::AttachContext& context) override {
    ActiveTcpClient* tcp_client = static_cast<ActiveTcpClient*>(&client);//把client转换为tcp client
    tcp_client->readEnableIfNew();//启用读
    auto* callbacks = typedContext<TcpAttachContext>(context).callbacks_;//回调
    std::unique_ptr<Envoy::Tcp::ConnectionPool::ConnectionData> connection_data =
        std::make_unique<ActiveTcpClient::TcpConnectionData>(*tcp_client, *tcp_client->connection_);//连接数据
    callbacks->onPoolReady(std::move(connection_data), tcp_client->real_host_description_);//调用回调
  }

source/common/tcp_proxy/upstream.cc 183行

void TcpConnPool::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data,
                              Upstream::HostDescriptionConstSharedPtr host) {
  upstream_handle_ = nullptr;
  Tcp::ConnectionPool::ConnectionData* latched_data = conn_data.get();//获取连接数据
  Network::Connection& connection = conn_data->connection();//获取连接

  auto upstream = std::make_unique<TcpUpstream>(std::move(conn_data), upstream_callbacks_);//创建tcp upstream
  callbacks_->onGenericPoolReady(
      &connection.streamInfo(), std::move(upstream), host,
      latched_data->connection().connectionInfoProvider().localAddress(),
      latched_data->connection().streamInfo().downstreamAddressProvider().sslConnection());//调用回调
}

source/common/tcp_proxy/tcp_proxy.cc 506行

void Filter::onGenericPoolReady(StreamInfo::StreamInfo* info,
                                std::unique_ptr<GenericUpstream>&& upstream,
                                Upstream::HostDescriptionConstSharedPtr& host,
                                const Network::Address::InstanceConstSharedPtr& local_address,
                                Ssl::ConnectionInfoConstSharedPtr ssl_info) {
  upstream_ = std::move(upstream);//获取upstream
  generic_conn_pool_.reset();//重置通用连接池
  read_callbacks_->upstreamHost(host);
  StreamInfo::UpstreamInfo& upstream_info = *getStreamInfo().upstreamInfo();//获取上游信息
  upstream_info.setUpstreamHost(host);//设置主机
  upstream_info.setUpstreamLocalAddress(local_address);//设置本地地址
  upstream_info.setUpstreamSslConnection(ssl_info);//设置ssl
  onUpstreamConnection();//处理上游连接
  read_callbacks_->continueReading();//读取数据
  if (info) {//如果info存在
    upstream_info.setUpstreamFilterState(info->filterState());//设置过滤器状态
  }
}

source/common/tcp_proxy/tcp_proxy.cc 678行

void Filter::onUpstreamConnection() {
  connecting_ = false;//设置连接状态
  // Re-enable downstream reads now that the upstream connection is established
  // so we have a place to send downstream data to.
  read_callbacks_->connection().readDisable(false);//设置读可用

  read_callbacks_->upstreamHost()->outlierDetector().putResult(
      Upstream::Outlier::Result::LocalOriginConnectSuccessFinal);//设置断路器

  ENVOY_CONN_LOG(debug, "TCP:onUpstreamEvent(), requestedServerName: {}",
                 read_callbacks_->connection(),
                 getStreamInfo().downstreamAddressProvider().requestedServerName());

  if (config_->idleTimeout()) {//空闲超时配置有值
    // The idle_timer_ can be moved to a Drainer, so related callbacks call into
    // the UpstreamCallbacks, which has the same lifetime as the timer, and can dispatch
    // the call to either TcpProxy or to Drainer, depending on the current state.
    idle_timer_ = read_callbacks_->connection().dispatcher().createTimer(
        [upstream_callbacks = upstream_callbacks_]() { upstream_callbacks->onIdleTimeout(); });//设置定时任务
    resetIdleTimer();//重置重现定时任务
    read_callbacks_->connection().addBytesSentCallback([this](uint64_t) {
      resetIdleTimer();
      return true;
    });//设置连接字节发送回调
    if (upstream_) {//如果是上游
      upstream_->addBytesSentCallback([upstream_callbacks = upstream_callbacks_](uint64_t) -> bool {
        upstream_callbacks->onBytesSent();
        return true;
      });//设置上游字节发送回调
    }
  }
}

source/common/conn_pool/conn_pool_base.cc 109行

ConnPoolImplBase::ConnectionResult ConnPoolImplBase::tryCreateNewConnections() {
  ASSERT(!is_draining_for_deletion_);
  ConnPoolImplBase::ConnectionResult result;
  // Somewhat arbitrarily cap the number of connections preconnected due to new
  // incoming connections. The preconnect ratio is capped at 3, so in steady
  // state, no more than 3 connections should be preconnected. If hosts go
  // unhealthy, and connections are not immediately preconnected, it could be that
  // many connections are desired when the host becomes healthy again, but
  // overwhelming it with connections is not desirable.
  for (int i = 0; i < 3; ++i) {
    result = tryCreateNewConnection();//尝试创建连接
    if (result != ConnectionResult::CreatedNewConnection) {//创建成功
      break;
    }
  }
  return result;
}

ConnPoolImplBase::ConnectionResult
ConnPoolImplBase::tryCreateNewConnection(float global_preconnect_ratio) {
  // There are already enough Connecting connections for the number of queued streams.
  if (!shouldCreateNewConnection(global_preconnect_ratio)) {//是否应该创建新连接
    ENVOY_LOG(trace, "not creating a new connection, shouldCreateNewConnection returned false.");
    return ConnectionResult::ShouldNotConnect;
  }

  const bool can_create_connection = host_->canCreateConnection(priority_);//是否可以创建新连接

  if (!can_create_connection) {//不能创建新连接
    host_->cluster().stats().upstream_cx_overflow_.inc();//新增统计
  }
  // If we are at the connection circuit-breaker limit due to other upstreams having
  // too many open connections, and this upstream has no connections, always create one, to
  // prevent pending streams being queued to this upstream with no way to be processed.
  if (can_create_connection || (ready_clients_.empty() && busy_clients_.empty() &&
                                connecting_clients_.empty() && early_data_clients_.empty())) {
    ENVOY_LOG(debug, "creating a new connection (connecting={})", connecting_clients_.size());
    ActiveClientPtr client = instantiateActiveClient();//初始化活跃客户端
    if (client.get() == nullptr) {//客户端为空
      ENVOY_LOG(trace, "connection creation failed");
      return ConnectionResult::FailedToCreateConnection;
    }
    ASSERT(client->state() == ActiveClient::State::Connecting);
    ASSERT(std::numeric_limits<uint64_t>::max() - connecting_stream_capacity_ >=
           static_cast<uint64_t>(client->currentUnusedCapacity()));
    ASSERT(client->real_host_description_);
    // Increase the connecting capacity to reflect the streams this connection can serve.
    incrConnectingAndConnectedStreamCapacity(client->currentUnusedCapacity(), *client);//增加连接和连接流容量
    LinkedList::moveIntoList(std::move(client), owningList(client->state()));
    return can_create_connection ? ConnectionResult::CreatedNewConnection
                                 : ConnectionResult::CreatedButRateLimited;
  } else {
    ENVOY_LOG(trace, "not creating a new connection: connection constrained");
    return ConnectionResult::NoConnectionRateLimited;
  }
}

source/common/http/conn_manager_impl.cc 419行

Network::FilterStatus ConnectionManagerImpl::onNewConnection() {
  if (!read_callbacks_->connection().streamInfo().protocol()) {//协议不存在
    // For Non-QUIC traffic, continue passing data to filters.
    return Network::FilterStatus::Continue;
  }
  // Only QUIC connection's stream_info_ specifies protocol.
  Buffer::OwnedImpl dummy;
  createCodec(dummy);//创建编解码器
  ASSERT(codec_->protocol() == Protocol::Http3);
  // Stop iterating through each filters for QUIC. Currently a QUIC connection
  // only supports one filter, HCM, and bypasses the onData() interface. Because
  // QUICHE already handles de-multiplexing.
  return Network::FilterStatus::StopIteration;
}

source/common/http/conn_manager_impl.cc 375行

Network::FilterStatus ConnectionManagerImpl::onData(Buffer::Instance& data, bool) {
  if (!codec_) {//编解码器为空
    // Http3 codec should have been instantiated by now.
    createCodec(data);//创建编解码
  }

  bool redispatch;
  do {
    redispatch = false;

    const Status status = codec_->dispatch(data);//dispatch数据

    if (isBufferFloodError(status) || isInboundFramesWithEmptyPayloadError(status)) {
      handleCodecError(status.message());//处理编解码错误
      return Network::FilterStatus::StopIteration;
    } else if (isCodecProtocolError(status)) {
      stats_.named_.downstream_cx_protocol_error_.inc();//增加统计
      handleCodecError(status.message());//处理编解码错误
      return Network::FilterStatus::StopIteration;
    }
    ASSERT(status.ok());

    // Processing incoming data may release outbound data so check for closure here as well.
    checkForDeferredClose(false);//检查延迟关闭

    // The HTTP/1 codec will pause dispatch after a single message is complete. We want to
    // either redispatch if there are no streams and we have more data. If we have a single
    // complete non-WebSocket stream but have not responded yet we will pause socket reads
    // to apply back pressure.
    if (codec_->protocol() < Protocol::Http2) {//http2协议
      if (read_callbacks_->connection().state() == Network::Connection::State::Open &&
          data.length() > 0 && streams_.empty()) {
        redispatch = true;
      }
    }
  } while (redispatch);

  if (!read_callbacks_->connection().streamInfo().protocol()) {//协议为空
    read_callbacks_->connection().streamInfo().protocol(codec_->protocol());//设置协议
  }

  return Network::FilterStatus::StopIteration;
}

source/common/http/conn_manager_impl.cc 354行

void ConnectionManagerImpl::createCodec(Buffer::Instance& data) {
  ASSERT(!codec_);
  codec_ = config_.createCodec(read_callbacks_->connection(), data, *this);//创建编解码器

  switch (codec_->protocol()) {//判断协议
  case Protocol::Http3:
    stats_.named_.downstream_cx_http3_total_.inc();
    stats_.named_.downstream_cx_http3_active_.inc();
    break;
  case Protocol::Http2:
    stats_.named_.downstream_cx_http2_total_.inc();
    stats_.named_.downstream_cx_http2_active_.inc();
    break;
  case Protocol::Http11:
  case Protocol::Http10:
    stats_.named_.downstream_cx_http1_total_.inc();
    stats_.named_.downstream_cx_http1_active_.inc();
    break;
  }
}

source/extenstions/filters/network/http_connection_manager.cc 694行

Http::ServerConnectionPtr
HttpConnectionManagerConfig::createCodec(Network::Connection& connection,
                                         const Buffer::Instance& data,
                                         Http::ServerConnectionCallbacks& callbacks) {
  switch (codec_type_) {//判断编解码类型
  case CodecType::HTTP1: {
    return std::make_unique<Http::Http1::ServerConnectionImpl>(
        connection, Http::Http1::CodecStats::atomicGet(http1_codec_stats_, context_.scope()),
        callbacks, http1_settings_, maxRequestHeadersKb(), maxRequestHeadersCount(),
        headersWithUnderscoresAction());//创建http1.1服务器连接对象指针
  }
  case CodecType::HTTP2: {
    return std::make_unique<Http::Http2::ServerConnectionImpl>(
        connection, callbacks,
        Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()),
        context_.api().randomGenerator(), http2_options_, maxRequestHeadersKb(),
        maxRequestHeadersCount(), headersWithUnderscoresAction());//创建http2服务器连接对象指针
  }
  case CodecType::HTTP3:
#ifdef ENVOY_ENABLE_QUIC
    return std::make_unique<Quic::QuicHttpServerConnectionImpl>(
        dynamic_cast<Quic::EnvoyQuicServerSession&>(connection), callbacks,
        Http::Http3::CodecStats::atomicGet(http3_codec_stats_, context_.scope()), http3_options_,
        maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction());//创建http3服务器连接对象指针
#else
    // Should be blocked by configuration checking at an earlier point.
    PANIC("unexpected");
#endif
  case CodecType::AUTO://自动类型
    return Http::ConnectionManagerUtility::autoCreateCodec(
        connection, data, callbacks, context_.scope(), context_.api().randomGenerator(),
        http1_codec_stats_, http2_codec_stats_, http1_settings_, http2_options_,
        maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction());//创建自动编解码器
  }
  PANIC_DUE_TO_CORRUPT_ENUM;
}

source/common/http/http2/codec_impl.cc 2237行

ServerConnectionImpl::ServerConnectionImpl(
    Network::Connection& connection, Http::ServerConnectionCallbacks& callbacks, CodecStats& stats,
    Random::RandomGenerator& random_generator,
    const envoy::config::core::v3::Http2ProtocolOptions& http2_options,
    const uint32_t max_request_headers_kb, const uint32_t max_request_headers_count,
    envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction
        headers_with_underscores_action)
    : ConnectionImpl(connection, stats, random_generator, http2_options, max_request_headers_kb,
                     max_request_headers_count),
      callbacks_(callbacks), headers_with_underscores_action_(headers_with_underscores_action) {
  Http2Options h2_options(http2_options, max_request_headers_kb);

  if (use_new_codec_wrapper_) {//使用新的编解码器包装
    auto visitor = std::make_unique<http2::adapter::CallbackVisitor>(
        http2::adapter::Perspective::kServer, *http2_callbacks_.callbacks(), base());//回调visitor
    if (use_oghttp2_library_) {//使用oghttp2库
      visitor_ = std::move(visitor);
      adapter_ = http2::adapter::OgHttp2Adapter::Create(*visitor_, h2_options.ogOptions());//创建适配器
    } else {
      auto adapter =
          http2::adapter::NgHttp2Adapter::CreateServerAdapter(*visitor, h2_options.options());//创建ng适配器
      auto stream_close_listener = [p = adapter.get()](http2::adapter::Http2StreamId stream_id) {
        p->RemoveStream(stream_id);
      };//流关闭监听器
      visitor->set_stream_close_listener(std::move(stream_close_listener));//设置监听器
      visitor_ = std::move(visitor);
      adapter_ = std::move(adapter);
    }
  } else {
    nghttp2_session_server_new2(&session_, http2_callbacks_.callbacks(), base(),
                                h2_options.options());
  }
  sendSettings(http2_options, false);//发送配置
  allow_metadata_ = http2_options.allow_metadata();//获取元数据
}

source/common/http/http2/codec_impl.cc 935行

ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stats,
                               Random::RandomGenerator& random_generator,
                               const envoy::config::core::v3::Http2ProtocolOptions& http2_options,
                               const uint32_t max_headers_kb, const uint32_t max_headers_count)
    : use_new_codec_wrapper_(
          Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http2_new_codec_wrapper")),
      use_oghttp2_library_(
          Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http2_use_oghttp2")),
      http2_callbacks_(use_new_codec_wrapper_), stats_(stats), connection_(connection),
      max_headers_kb_(max_headers_kb), max_headers_count_(max_headers_count),
      per_stream_buffer_limit_(http2_options.initial_stream_window_size().value()),
      stream_error_on_invalid_http_messaging_(
          http2_options.override_stream_error_on_invalid_http_message().value()),
      protocol_constraints_(stats, http2_options), dispatching_(false), raised_goaway_(false),
      delay_keepalive_timeout_(Runtime::runtimeFeatureEnabled(
          "envoy.reloadable_features.http2_delay_keepalive_timeout")),
      random_(random_generator),
      last_received_data_time_(connection_.dispatcher().timeSource().monotonicTime()) {
  // This library can only be used with the wrapper API enabled.
  ASSERT(!use_oghttp2_library_ || use_new_codec_wrapper_);

  if (http2_options.has_connection_keepalive()) {//有tcp保持连接选项
    keepalive_interval_ = std::chrono::milliseconds(
        PROTOBUF_GET_MS_OR_DEFAULT(http2_options.connection_keepalive(), interval, 0));//监测周期
    keepalive_timeout_ = std::chrono::milliseconds(
        PROTOBUF_GET_MS_REQUIRED(http2_options.connection_keepalive(), timeout));//超时时间
    keepalive_interval_jitter_percent_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT(
        http2_options.connection_keepalive(), interval_jitter, 15.0);//周期抖动

    if (keepalive_interval_.count() > 0) {
      keepalive_send_timer_ = connection.dispatcher().createTimer([this]() { sendKeepalive(); });//设置定时任务
    }
    keepalive_timeout_timer_ =
        connection.dispatcher().createTimer([this]() { onKeepaliveResponseTimeout(); });//设置定时任务

    // This call schedules the initial interval, with jitter.
    onKeepaliveResponse();//处理保持连接响应
  }
}

source/common/http/http2/codec_impl.cc 1839行

ConnectionImpl::Http2Callbacks::Http2Callbacks(bool use_new_codec_wrapper) {//http2回调
  nghttp2_session_callbacks_new(&callbacks_);
  nghttp2_session_callbacks_set_send_callback(
      callbacks_,
      [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t {
        return static_cast<ConnectionImpl*>(user_data)->onSend(data, length);
      });//发送回调

  nghttp2_session_callbacks_set_send_data_callback(
      callbacks_,
      [](nghttp2_session*, nghttp2_frame* frame, const uint8_t* framehd, size_t length,
         nghttp2_data_source* source, void*) -> int {
        ASSERT(frame->data.padlen == 0);
        static_cast<StreamImpl*>(source->ptr)->onDataSourceSend(framehd, length);
        return 0;
      });//发送数据回调

  nghttp2_session_callbacks_set_on_begin_headers_callback(
      callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int {
        auto status = static_cast<ConnectionImpl*>(user_data)->onBeginHeaders(frame);
        return static_cast<ConnectionImpl*>(user_data)->setAndCheckNghttp2CallbackStatus(
            std::move(status));
      });//开始头回调

  nghttp2_session_callbacks_set_on_header_callback(
      callbacks_,
      [](nghttp2_session*, const nghttp2_frame* frame, const uint8_t* raw_name, size_t name_length,
         const uint8_t* raw_value, size_t value_length, uint8_t, void* user_data) -> int {
        // TODO PERF: Can reference count here to avoid copies.
        HeaderString name;
        name.setCopy(reinterpret_cast<const char*>(raw_name), name_length);
        HeaderString value;
        value.setCopy(reinterpret_cast<const char*>(raw_value), value_length);
        return static_cast<ConnectionImpl*>(user_data)->onHeader(frame, std::move(name),
                                                                 std::move(value));
      });//处理头回调

  nghttp2_session_callbacks_set_on_data_chunk_recv_callback(
      callbacks_,
      [](nghttp2_session*, uint8_t, int32_t stream_id, const uint8_t* data, size_t len,
         void* user_data) -> int {
        return static_cast<ConnectionImpl*>(user_data)->onData(stream_id, data, len);
      });//数据块接收回调

  nghttp2_session_callbacks_set_on_begin_frame_callback(
      callbacks_, [](nghttp2_session*, const nghttp2_frame_hd* hd, void* user_data) -> int {
        auto status = static_cast<ConnectionImpl*>(user_data)->onBeforeFrameReceived(hd);
        return static_cast<ConnectionImpl*>(user_data)->setAndCheckNghttp2CallbackStatus(
            std::move(status));
      });//开始帧回调

  nghttp2_session_callbacks_set_on_frame_recv_callback(
      callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int {
        auto status = static_cast<ConnectionImpl*>(user_data)->onFrameReceived(frame);
        return static_cast<ConnectionImpl*>(user_data)->setAndCheckNghttp2CallbackStatus(
            std::move(status));
      });//帧接收回调

  nghttp2_session_callbacks_set_on_stream_close_callback(
      callbacks_,
      [](nghttp2_session*, int32_t stream_id, uint32_t error_code, void* user_data) -> int {
        auto status = static_cast<ConnectionImpl*>(user_data)->onStreamClose(stream_id, error_code);
        return static_cast<ConnectionImpl*>(user_data)->setAndCheckNghttp2CallbackStatus(
            std::move(status));
      });//流关闭回调

  nghttp2_session_callbacks_set_on_frame_send_callback(
      callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int {
        return static_cast<ConnectionImpl*>(user_data)->onFrameSend(frame);
      });// 帧发送回调

  nghttp2_session_callbacks_set_before_frame_send_callback(
      callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int {
        return static_cast<ConnectionImpl*>(user_data)->onBeforeFrameSend(frame);
      });//帧发送之前回调

  nghttp2_session_callbacks_set_on_frame_not_send_callback(
      callbacks_, [](nghttp2_session*, const nghttp2_frame*, int, void*) -> int {
        // We used to always return failure here but it looks now this can get called if the other
        // side sends GOAWAY and we are trying to send a SETTINGS ACK. Just ignore this for now.
        return 0;
      });//帧不发送回调

  nghttp2_session_callbacks_set_on_invalid_frame_recv_callback(
      callbacks_,
      [](nghttp2_session*, const nghttp2_frame* frame, int error_code, void* user_data) -> int {
        return static_cast<ConnectionImpl*>(user_data)->onInvalidFrame(frame->hd.stream_id,
                                                                       error_code);
      });//错误帧接收回调

  nghttp2_session_callbacks_set_on_extension_chunk_recv_callback(
      callbacks_,
      [](nghttp2_session*, const nghttp2_frame_hd* hd, const uint8_t* data, size_t len,
         void* user_data) -> int {
        ASSERT(hd->length >= len);
        return static_cast<ConnectionImpl*>(user_data)->onMetadataReceived(hd->stream_id, data,
                                                                           len);
      });//扩展块接收回调

  nghttp2_session_callbacks_set_unpack_extension_callback(
      callbacks_, [](nghttp2_session*, void**, const nghttp2_frame_hd* hd, void* user_data) -> int {
        return static_cast<ConnectionImpl*>(user_data)->onMetadataFrameComplete(
            hd->stream_id, hd->flags == END_METADATA_FLAG);
      });//unpack扩展回调

  // The new codec does not use the pack_extension callback.
  if (!use_new_codec_wrapper) {
    nghttp2_session_callbacks_set_pack_extension_callback(
        callbacks_,
        [](nghttp2_session*, uint8_t* buf, size_t len, const nghttp2_frame* frame,
           void* user_data) -> ssize_t {
          ASSERT(frame->hd.length <= len);
          return static_cast<ConnectionImpl*>(user_data)->packMetadata(frame->hd.stream_id, buf,
                                                                       len);
        });//pack扩展回调
  }

  nghttp2_session_callbacks_set_error_callback2(
      callbacks_, [](nghttp2_session*, int, const char* msg, size_t len, void* user_data) -> int {
        return static_cast<ConnectionImpl*>(user_data)->onError(absl::string_view(msg, len));
      });//错误回调
}

source/common/http/http2/codec_impl.cc 1449行

ssize_t ConnectionImpl::onSend(const uint8_t* data, size_t length) {
  ENVOY_CONN_LOG(trace, "send data: bytes={}", connection_, length);
  Buffer::OwnedImpl buffer;
  addOutboundFrameFragment(buffer, data, length);//添加向外帧碎片

  // While the buffer is transient the fragment it contains will be moved into the
  // write_buffer_ of the underlying connection_ by the write method below.
  // This creates lifetime dependency between the write_buffer_ of the underlying connection
  // and the codec object. Specifically the write_buffer_ MUST be either fully drained or
  // deleted before the codec object is deleted. This is presently guaranteed by the
  // destruction order of the Network::ConnectionImpl object where write_buffer_ is
  // destroyed before the filter_manager_ which owns the codec through Http::ConnectionManagerImpl.
  connection_.write(buffer, false);//写数据
  return length;
}

source/common/http/http2/codec_impl.cc 1437行

void ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data,
                                              size_t length) {
  // Reset the outbound frame type (set in the onBeforeFrameSend callback) since the
  // onBeforeFrameSend callback is not called for DATA frames.
  bool is_outbound_flood_monitored_control_frame = false;
  std::swap(is_outbound_flood_monitored_control_frame, is_outbound_flood_monitored_control_frame_);//交换数据
  auto releasor =
      protocol_constraints_.incrementOutboundFrameCount(is_outbound_flood_monitored_control_frame);//增加向外帧数量
  output.add(data, length);//添加数据
  output.addDrainTracker(releasor);//添加排水跟踪器
}

source/common/network/connection_impl.cc 445行

void ConnectionImpl::write(Buffer::Instance& data, bool end_stream) {//写数据
  write(data, end_stream, true);
}

void ConnectionImpl::write(Buffer::Instance& data, bool end_stream, bool through_filter_chain) {
  ASSERT(!end_stream || enable_half_close_);
  ASSERT(dispatcher_.isThreadSafe());

  if (write_end_stream_) {//写流结束
    // It is an API violation to write more data after writing end_stream, but a duplicate
    // end_stream with no data is harmless. This catches misuse of the API that could result in data
    // being lost.
    ASSERT(data.length() == 0 && end_stream);

    return;
  }

  if (through_filter_chain) {//通过过滤器链
    // NOTE: This is kind of a hack, but currently we don't support restart/continue on the write
    //       path, so we just pass around the buffer passed to us in this function. If we ever
    //       support buffer/restart/continue on the write path this needs to get more complicated.
    current_write_buffer_ = &data;
    current_write_end_stream_ = end_stream;
    FilterStatus status = filter_manager_.onWrite();//管理器管理器处理写
    current_write_buffer_ = nullptr;

    if (FilterStatus::StopIteration == status) {
      return;
    }
  }

  write_end_stream_ = end_stream;
  if (data.length() > 0 || end_stream) {//有数据或者流结束
    ENVOY_CONN_LOG(trace, "writing {} bytes, end_stream {}", *this, data.length(), end_stream);
    // TODO(mattklein123): All data currently gets moved from the source buffer to the write buffer.
    // This can lead to inefficient behavior if writing a bunch of small chunks. In this case, it
    // would likely be more efficient to copy data below a certain size. VERY IMPORTANT: If this is
    // ever changed, read the comment in SslSocket::doWrite() VERY carefully. That code assumes that
    // we never change existing write_buffer_ chain elements between calls to SSL_write(). That code
    // might need to change if we ever copy here.
    write_buffer_->move(data);

    // Activating a write event before the socket is connected has the side-effect of tricking
    // doWriteReady into thinking the socket is connected. On macOS, the underlying write may fail
    // with a connection error if a call to write(2) occurs before the connection is completed.
    if (!connecting_) {
      ioHandle().activateFileEvents(Event::FileReadyType::Write);//触发文件事件
    }
  }
}

source/common/http/http2/codec_impl.cc 704行

void ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t length) {
  // In this callback we are writing out a raw DATA frame without copying. nghttp2 assumes that we
  // "just know" that the frame header is 9 bytes.
  // https://nghttp2.org/documentation/types.html#c.nghttp2_send_data_callback

  parent_.protocol_constraints_.incrementOutboundDataFrameCount();//添加向外数据帧数量

  Buffer::OwnedImpl output;
  parent_.addOutboundFrameFragment(output, framehd, H2_FRAME_HEADER_SIZE);//添加向外帧碎片
  if (!parent_.protocol_constraints_.checkOutboundFrameLimits().ok()) {//检查向外帧限制不通过
    ENVOY_CONN_LOG(debug, "error sending data frame: Too many frames in the outbound queue",
                   parent_.connection_);
    setDetails(Http2ResponseCodeDetails::get().outbound_frame_flood);
  }

  parent_.stats_.pending_send_bytes_.sub(length);//设置统计信息
  output.move(*pending_send_data_, length);//移动数据
  parent_.connection_.write(output, false);//写入数据
}

source/common/http/http2/codec_impl.cc 2185行

Status ClientConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) {
  // The client code explicitly does not currently support push promise.
  RELEASE_ASSERT(frame->hd.type == NGHTTP2_HEADERS, "");
  RELEASE_ASSERT(frame->headers.cat == NGHTTP2_HCAT_RESPONSE ||
                     frame->headers.cat == NGHTTP2_HCAT_HEADERS,
                 "");
  RETURN_IF_ERROR(trackInboundFrames(&frame->hd, frame->headers.padlen));
  if (frame->headers.cat == NGHTTP2_HCAT_HEADERS) {
    StreamImpl* stream = getStream(frame->hd.stream_id);//获取流
    stream->allocTrailers();//分配trailers
  }

  return okStatus();
}

source/common/http/http2/codec_impl.cc 2305行

int ServerConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& name,
                                   HeaderString&& value) {
  // For a server connection, we should never get push promise frames.
  ASSERT(frame->hd.type == NGHTTP2_HEADERS);
  ASSERT(frame->headers.cat == NGHTTP2_HCAT_REQUEST || frame->headers.cat == NGHTTP2_HCAT_HEADERS);
  return saveHeader(frame, std::move(name), std::move(value));//保存头
}

source/common/http/http2/codec_impl.cc 1583行

int ConnectionImpl::saveHeader(const nghttp2_frame* frame, HeaderString&& name,
                               HeaderString&& value) {
  StreamImpl* stream = getStream(frame->hd.stream_id);//获取流
  if (!stream) {//流不存在
    // We have seen 1 or 2 crashes where we get a headers callback but there is no associated
    // stream data. I honestly am not sure how this can happen. However, from reading the nghttp2
    // code it looks possible that inflate_header_block() can safely inflate headers for an already
    // closed stream, but will still call the headers callback. Since that seems possible, we should
    // ignore this case here.
    // TODO(mattklein123): Figure out a test case that can hit this.
    stats_.headers_cb_no_stream_.inc();//设置统计信息
    return 0;
  }

  // TODO(10646): Switch to use HeaderUtility::checkHeaderNameForUnderscores().
  auto should_return = checkHeaderNameForUnderscores(name.getStringView());//检查头名称
  if (should_return) {
    stream->setDetails(Http2ResponseCodeDetails::get().invalid_underscore);//设置流错误详情
    name.clear();//清理名称
    value.clear();//清理值
    return should_return.value();
  }

  stream->saveHeader(std::move(name), std::move(value));//保存头

  if (stream->headers().byteSize() > max_headers_kb_ * 1024 ||
      stream->headers().size() > max_headers_count_) {//头字节大小和都数量超过限制
    stream->setDetails(Http2ResponseCodeDetails::get().too_many_headers);//设置流失败详情
    stats_.header_overflow_.inc();//设置统计信息
    // This will cause the library to reset/close the stream.
    return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
  } else {
    return 0;
  }
}

source/common/http/http2/codec_impl.cc 1114行

int ConnectionImpl::onData(int32_t stream_id, const uint8_t* data, size_t len) {
  ASSERT(connection_.state() == Network::Connection::State::Open);
  StreamImpl* stream = getStream(stream_id);//获取流
  // If this results in buffering too much data, the watermark buffer will call
  // pendingRecvBufferHighWatermark, resulting in ++read_disable_count_
  stream->pending_recv_data_->add(data, len);//增加等待接收数据
  // Update the window to the peer unless some consumer of this stream's data has hit a flow control
  // limit and disabled reads on this stream
  if (stream->shouldAllowPeerAdditionalStreamWindow()) {//应该允许对端添加流窗口
    if (use_new_codec_wrapper_) {//使用新的编解码包装器
      adapter_->MarkDataConsumedForStream(stream_id, len);//消费数据
    } else {
      nghttp2_session_consume(session_, stream_id, len);//消费数据
    }
  } else {
    stream->unconsumed_bytes_ += len;//未消费字节增加
  }
  return 0;
}

source/common/http/http2/codec_impl.cc 1179行

Status ConnectionImpl::onBeforeFrameReceived(const nghttp2_frame_hd* hd) {
  ENVOY_CONN_LOG(trace, "about to recv frame type={}, flags={}, stream_id={}", connection_,
                 static_cast<uint64_t>(hd->type), static_cast<uint64_t>(hd->flags), hd->stream_id);
  ASSERT(connection_.state() == Network::Connection::State::Open);

  current_stream_id_ = hd->stream_id;//获取流id
  // Track all the frames without padding here, since this is the only callback we receive
  // for some of them (e.g. CONTINUATION frame, frames sent on closed streams, etc.).
  // HEADERS frame is tracked in onBeginHeaders(), DATA frame is tracked in onFrameReceived().
  auto status = okStatus();
  if (hd->type != NGHTTP2_HEADERS && hd->type != NGHTTP2_DATA) {
    status = trackInboundFrames(hd, 0);//追踪向内帧
  }

  return status;
}

source/common/http/http2/codec_impl.cc 1206行

Status ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) {
  ENVOY_CONN_LOG(trace, "recv frame type={}", connection_, static_cast<uint64_t>(frame->hd.type));
  ASSERT(connection_.state() == Network::Connection::State::Open);

  // onFrameReceived() is called with a complete HEADERS frame assembled from all the HEADERS
  // and CONTINUATION frames, but we track them separately: HEADERS frames in onBeginHeaders()
  // and CONTINUATION frames in onBeforeFrameReceived().
  ASSERT(frame->hd.type != NGHTTP2_CONTINUATION);

  if ((frame->hd.type == NGHTTP2_PING) && (frame->ping.hd.flags & NGHTTP2_FLAG_ACK)) {//ping帧并且ack flag
    // The ``opaque_data`` should be exactly what was sent in the ping, which is
    // was the current time when the ping was sent. This can be useful while debugging
    // to match the ping and ack.
    uint64_t data;
    safeMemcpy(&data, &(frame->ping.opaque_data));//拷贝数据
    ENVOY_CONN_LOG(trace, "recv PING ACK {}", connection_, data);

    onKeepaliveResponse();//处理保持连接响应
    return okStatus();
  }

  // In slow networks, HOL blocking can prevent the ping response from coming in a reasonable
  // amount of time. To avoid HOL blocking influence, if we receive *any* frame extend the
  // timeout for another timeout period. This will still timeout the connection if there is no
  // activity, but if there is frame activity we assume the connection is still healthy and the
  // PING ACK may be delayed behind other frames.
  if (delay_keepalive_timeout_ && keepalive_timeout_timer_ != nullptr &&
      keepalive_timeout_timer_->enabled()) {
    keepalive_timeout_timer_->enableTimer(keepalive_timeout_);//启动保持连接超时计时器
  }

  if (frame->hd.type == NGHTTP2_DATA) {//帧头类型是数据
    RETURN_IF_ERROR(trackInboundFrames(&frame->hd, frame->data.padlen));
  }

  // Only raise GOAWAY once, since we don't currently expose stream information. Shutdown
  // notifications are the same as a normal GOAWAY.
  // TODO: handle multiple GOAWAY frames.
  if (frame->hd.type == NGHTTP2_GOAWAY && !raised_goaway_) {//帧头类型是goaway,并且没有产生goaway
    ASSERT(frame->hd.stream_id == 0);
    raised_goaway_ = true;//设置产生goaway
    callbacks().onGoAway(ngHttp2ErrorCodeToErrorCode(frame->goaway.error_code));//处理goaway
    return okStatus();
  }

  if (frame->hd.type == NGHTTP2_SETTINGS && frame->hd.flags == NGHTTP2_FLAG_NONE) {//帧类型时设置并且没有flag
    onSettings(frame->settings);//处理设置
  }

  StreamImpl* stream = getStream(frame->hd.stream_id);//获取流
  if (!stream) {//获取流失败
    return okStatus();
  }

  // Track bytes sent and received.
  if (frame->hd.type != METADATA_FRAME_TYPE) //帧类型时元数据{
    stream->bytes_meter_->addWireBytesReceived(frame->hd.length + H2_FRAME_HEADER_SIZE);//流添加接收字节数
  }
  if (frame->hd.type == NGHTTP2_HEADERS || frame->hd.type == NGHTTP2_CONTINUATION) {//帧头类型是透明或者继续
    stream->bytes_meter_->addHeaderBytesReceived(frame->hd.length + H2_FRAME_HEADER_SIZE);//流添加接收字节数
  }

  switch (frame->hd.type) {//判断帧头类型
  case NGHTTP2_HEADERS: {//头
    stream->remote_end_stream_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM;//设置远程结束流
    if (!stream->cookies_.empty()) {//流cookie不为空
      HeaderString key(Headers::get().Cookie);
      stream->headers().addViaMove(std::move(key), std::move(stream->cookies_));//设置cookie
    }

    switch (frame->headers.cat) {//判断帧头cat
    case NGHTTP2_HCAT_RESPONSE:
    case NGHTTP2_HCAT_REQUEST: {
      stream->decodeHeaders();//解码header
      break;
    }

    case NGHTTP2_HCAT_HEADERS: {
      // It's possible that we are waiting to send a deferred reset, so only raise headers/trailers
      // if local is not complete.
      if (!stream->deferred_reset_) {//非延迟重置
        const bool is_server_session = use_new_codec_wrapper_
                                           ? adapter_->IsServerSession()
                                           : nghttp2_session_check_server_session(session_);//是否服务端会话
        if (is_server_session || stream->received_noninformational_headers_) {
          ASSERT(stream->remote_end_stream_);
          stream->decodeTrailers();//解码trailers
        } else {
          // We're a client session and still waiting for non-informational headers.
          stream->decodeHeaders();//解码头
        }
      }
      break;
    }

    default:
      // We do not currently support push.
      ENVOY_BUG(false, "push not supported");
    }

    break;
  }
  case NGHTTP2_DATA: {
    stream->remote_end_stream_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM;//设置远程流结束
    stream->decodeData();//解码数据
    break;
  }
  case NGHTTP2_RST_STREAM: {
    ENVOY_CONN_LOG(trace, "remote reset: {}", connection_, frame->rst_stream.error_code);
    stats_.rx_reset_.inc();//设置统计
    break;
  }
  }

  return okStatus();
}

source/common/http/http2/codec_impl.cc 1465行

Status ConnectionImpl::onStreamClose(StreamImpl* stream, uint32_t error_code) {
  if (stream) {//流存在
    const int32_t stream_id = stream->stream_id_;//获取流id

    // Consume buffered on stream_close.
    if (stream->stream_manager_.buffered_on_stream_close_) {//当流关闭时缓存
      stream->stream_manager_.buffered_on_stream_close_ = false;//当流关闭时缓存设为false
      stats_.deferred_stream_close_.dec();//设置统计信息
    }

    ENVOY_CONN_LOG(debug, "stream {} closed: {}", connection_, stream_id, error_code);

    if (!stream->remote_end_stream_ || !stream->local_end_stream_) {//远程或本地流关闭为false
      StreamResetReason reason;//流重置原因
      if (stream->reset_due_to_messaging_error_) {//由于消息错误导致流重置
        // Unfortunately, the nghttp2 API makes it incredibly difficult to clearly understand
        // the flow of resets. I.e., did the reset originate locally? Was it remote? Here,
        // we attempt to track cases in which we sent a reset locally due to an invalid frame
        // received from the remote. We only do that in two cases currently (HTTP messaging layer
        // errors from https://tools.ietf.org/html/rfc7540#section-8 which nghttp2 is very strict
        // about). In other cases we treat invalid frames as a protocol error and just kill
        // the connection.

        // Get ClientConnectionImpl or ServerConnectionImpl specific stream reset reason,
        // depending whether the connection is upstream or downstream.
        reason = getMessagingErrorResetReason();//获取原因
      } else {
        if (error_code == NGHTTP2_REFUSED_STREAM) {//拒绝流
          reason = StreamResetReason::RemoteRefusedStreamReset;//设置重置原因
          stream->setDetails(Http2ResponseCodeDetails::get().remote_refused);//设置流错误详情
        } else {
          if (error_code == NGHTTP2_CONNECT_ERROR) {//连接错误
            reason = StreamResetReason::ConnectError;
          } else {
            reason = StreamResetReason::RemoteReset;
          }
          stream->setDetails(Http2ResponseCodeDetails::get().remote_reset);//设置流错误详情
        }
      }

      stream->runResetCallbacks(reason);//运行重置回调

    } else if (stream->defer_processing_backedup_streams_ && !stream->reset_reason_.has_value() &&
               stream->stream_manager_.hasBufferedBodyOrTrailers()) {
      ASSERT(error_code == NGHTTP2_NO_ERROR);
      ENVOY_CONN_LOG(debug, "buffered onStreamClose for stream: {}", connection_, stream_id);
      // Buffer the call, rely on the stream->process_buffered_data_callback_
      // to end up invoking.
      stream->stream_manager_.buffered_on_stream_close_ = true;//当流关闭缓存设置为true
      stats_.deferred_stream_close_.inc();//设置统计信息
      return okStatus();
    }

    stream->destroy();//销毁流
    current_stream_id_.reset();//重置流id
    // TODO(antoniovicente) Test coverage for onCloseStream before deferred reset handling happens.
    pending_deferred_reset_streams_.erase(stream->stream_id_);//擦除等待重置流

    connection_.dispatcher().deferredDelete(stream->removeFromList(active_streams_));//dispatcher延迟删除
    // Any unconsumed data must be consumed before the stream is deleted.
    // nghttp2 does not appear to track this internally, and any stream deleted
    // with outstanding window will contribute to a slow connection-window leak.
    if (use_new_codec_wrapper_) {//使用新的编解码包装器
      adapter_->MarkDataConsumedForStream(stream_id, stream->unconsumed_bytes_);//标记流数据消费
      stream->unconsumed_bytes_ = 0;//设置未消费字节数
      adapter_->SetStreamUserData(stream->stream_id_, nullptr);//设置流用户数据
    } else {
      nghttp2_session_consume(session_, stream_id, stream->unconsumed_bytes_);//消费数据
      stream->unconsumed_bytes_ = 0;//设置未消费字节数
      nghttp2_session_set_stream_user_data(session_, stream->stream_id_, nullptr);//设置流用户数据
    }
  }

  return okStatus();
}

Status ConnectionImpl::onStreamClose(int32_t stream_id, uint32_t error_code) {
  return onStreamClose(getStream(stream_id), error_code);
}

source/common/http/http2/codec_impl.cc 1323行

int ConnectionImpl::onFrameSend(const nghttp2_frame* frame) {
  // The nghttp2 library does not cleanly give us a way to determine whether we received invalid
  // data from our peer. Sometimes it raises the invalid frame callback, and sometimes it does not.
  // In all cases however it will attempt to send a GOAWAY frame with an error status. If we see
  // an outgoing frame of this type, we will return an error code so that we can abort execution.
  ENVOY_CONN_LOG(trace, "sent frame type={}, stream_id={}, length={}", connection_,
                 static_cast<uint64_t>(frame->hd.type), frame->hd.stream_id, frame->hd.length);
  StreamImpl* stream = getStream(frame->hd.stream_id);//获取流
  if (stream != nullptr) {//流部位孔
    if (frame->hd.type != METADATA_FRAME_TYPE) {
      stream->bytes_meter_->addWireBytesSent(frame->hd.length + H2_FRAME_HEADER_SIZE);//设置发送字节数
    }
    if (frame->hd.type == NGHTTP2_HEADERS || frame->hd.type == NGHTTP2_CONTINUATION) {
      stream->bytes_meter_->addHeaderBytesSent(frame->hd.length + H2_FRAME_HEADER_SIZE);//设置发送字节数
    }
  }
  switch (frame->hd.type) {//判断帧头类型
  case NGHTTP2_GOAWAY: {// goaway
    ENVOY_CONN_LOG(debug, "sent goaway code={}", connection_, frame->goaway.error_code);
    if (frame->goaway.error_code != NGHTTP2_NO_ERROR) {//有错误
      // TODO(mattklein123): Returning this error code abandons standard nghttp2 frame accounting.
      // As such, it is not reliable to call sendPendingFrames() again after this and we assume
      // that the connection is going to get torn down immediately. One byproduct of this is that
      // we need to cancel all pending flush stream timeouts since they can race with connection
      // teardown. As part of the work to remove exceptions we should aim to clean up all of this
      // error handling logic and only handle this type of case at the end of dispatch.
      for (auto& stream : active_streams_) {//遍历流
        stream->disarmStreamIdleTimer();//解除流空闲计时器
      }
      return NGHTTP2_ERR_CALLBACK_FAILURE;
    }
    break;
  }

  case NGHTTP2_RST_STREAM: {//重置流
    ENVOY_CONN_LOG(debug, "sent reset code={}", connection_, frame->rst_stream.error_code);
    stats_.tx_reset_.inc();//增加统计信息
    break;
  }

  case NGHTTP2_HEADERS:
  case NGHTTP2_DATA: {
    StreamImpl* stream = getStream(frame->hd.stream_id);//获取流
    stream->local_end_stream_sent_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM;//设置本地流结束发送
    break;
  }
  }

  return 0;
}

source/common/http/http2/codec_impl.cc 1425行

int ConnectionImpl::onBeforeFrameSend(const nghttp2_frame* frame) {
  ENVOY_CONN_LOG(trace, "about to send frame type={}, flags={}", connection_,
                 static_cast<uint64_t>(frame->hd.type), static_cast<uint64_t>(frame->hd.flags));
  ASSERT(!is_outbound_flood_monitored_control_frame_);
  // Flag flood monitored outbound control frames.
  is_outbound_flood_monitored_control_frame_ =
      ((frame->hd.type == NGHTTP2_PING || frame->hd.type == NGHTTP2_SETTINGS) &&
       frame->hd.flags & NGHTTP2_FLAG_ACK) ||
      frame->hd.type == NGHTTP2_RST_STREAM;
  return 0;
}

source/common/http/http2/codec_impl.cc 1379行

int ConnectionImpl::onInvalidFrame(int32_t stream_id, int error_code) {
  ENVOY_CONN_LOG(debug, "invalid frame: {} on stream {}", connection_, nghttp2_strerror(error_code),
                 stream_id);

  // Set details of error_code in the stream whenever we have one.
  StreamImpl* stream = getStreamUnchecked(stream_id);//获取流
  if (stream != nullptr) {//流不为空
    stream->setDetails(Http2ResponseCodeDetails::get().errorDetails(error_code));//设置失败信息详情
  }

  switch (error_code) {//判断错误码
  case NGHTTP2_ERR_REFUSED_STREAM://拒绝

    stats_.stream_refused_errors_.inc();//增加统计信息
    return 0;

  case NGHTTP2_ERR_HTTP_HEADER:
  case NGHTTP2_ERR_HTTP_MESSAGING:
    stats_.rx_messaging_error_.inc();//增加统计信息
    if (stream_error_on_invalid_http_messaging_) {
      // The stream is about to be closed due to an invalid header or messaging. Don't kill the
      // entire connection if one stream has bad headers or messaging.
      if (stream != nullptr) {
        // See comment below in onStreamClose() for why we do this.
        stream->reset_due_to_messaging_error_ = true//设置reset原因
      }
      return 0;
    }
    break;

  case NGHTTP2_ERR_FLOW_CONTROL:
  case NGHTTP2_ERR_PROTO:
  case NGHTTP2_ERR_STREAM_CLOSED:
    // Known error conditions that should trigger connection close.
    break;

  default:
    // Unknown error conditions. Trigger ENVOY_BUG and connection close.
    ENVOY_BUG(false, absl::StrCat("Unexpected error_code: ", error_code));
    break;
  }

  // Cause dispatch to return with an error code.
  return NGHTTP2_ERR_CALLBACK_FAILURE;
}

source/common/http/http2/codec_impl.cc 1545行

int ConnectionImpl::onMetadataReceived(int32_t stream_id, const uint8_t* data, size_t len) {
  ENVOY_CONN_LOG(trace, "recv {} bytes METADATA", connection_, len);

  StreamImpl* stream = getStream(stream_id);//获取流
  if (!stream || stream->remote_end_stream_) {//流不存在或者远程关闭流
    return 0;
  }

  bool success = stream->getMetadataDecoder().receiveMetadata(data, len);//解码元数据
  return success ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE;
}

source/common/http/http2/codec_impl.cc 1557行

int ConnectionImpl::onMetadataFrameComplete(int32_t stream_id, bool end_metadata) {
  ENVOY_CONN_LOG(trace, "recv METADATA frame on stream {}, end_metadata: {}", connection_,
                 stream_id, end_metadata);

  StreamImpl* stream = getStream(stream_id);//获取流
  if (!stream || stream->remote_end_stream_) {//流不存在或者远程关闭流
    return 0;
  }

  bool result = stream->getMetadataDecoder().onMetadataFrameComplete(end_metadata);//解码元数据
  return result ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE;
}

source/common/http/http2/codec_impl.cc 1570行

ssize_t ConnectionImpl::packMetadata(int32_t stream_id, uint8_t* buf, size_t len) {
  ASSERT(use_new_codec_wrapper_ == false);
  ENVOY_CONN_LOG(trace, "pack METADATA frame on stream {}", connection_, stream_id);

  StreamImpl* stream = getStream(stream_id);//获取流
  if (stream == nullptr) {
    return 0;
  }

  MetadataEncoder& encoder = stream->getMetadataEncoderOld();//获取流老的元数据编码器
  return encoder.packNextFramePayload(buf, len);//编码帧数据
}

source/common/http/http2/codec_impl.cc 1374行

int ConnectionImpl::onError(absl::string_view error) {
  ENVOY_CONN_LOG(debug, "invalid http2: {}", connection_, error);
  return 0;
}

source/common/network/filter_manager_impl.cc 91行

onWrite() { return onWrite(nullptr, connection_); }

FilterStatus FilterManagerImpl::onWrite(ActiveWriteFilter* filter,
                                        WriteBufferSource& buffer_source) {
  // Filter could return status == FilterStatus::StopIteration immediately, close the connection and
  // use callback to call this function.
  if (connection_.state() != Connection::State::Open) {//连接状态不是打开
    return FilterStatus::StopIteration;
  }

  std::list<ActiveWriteFilterPtr>::iterator entry;//写过滤器迭代器
  if (!filter) {//过滤器不存在
    entry = downstream_filters_.begin();//迭代器指向下游过滤器第一个位置
  } else {
    entry = std::next(filter->entry());//迭代器指向下一个过滤器
  }

  for (; entry != downstream_filters_.end(); entry++) {//遍历所有过滤器
    StreamBuffer write_buffer = buffer_source.getWriteBuffer();//获取缓存
    FilterStatus status = (*entry)->filter_->onWrite(write_buffer.buffer, write_buffer.end_stream);//调用过滤器写处理
    if (status == FilterStatus::StopIteration || connection_.state() != Connection::State::Open) {//过滤器返回状态时停止迭代或者连接状态为非打开
      return FilterStatus::StopIteration;
    }
  }

  // Report the final bytes written to the wire
  connection_.streamInfo().addBytesSent(buffer_source.getWriteBuffer().buffer.length());//流信息添加发送字节数
  return FilterStatus::Continue;
}

source/common/http/conn_manager_utility.cc 55行

ServerConnectionPtr ConnectionManagerUtility::autoCreateCodec(
    Network::Connection& connection, const Buffer::Instance& data,
    ServerConnectionCallbacks& callbacks, Stats::Scope& scope, Random::RandomGenerator& random,
    Http1::CodecStats::AtomicPtr& http1_codec_stats,
    Http2::CodecStats::AtomicPtr& http2_codec_stats, const Http1Settings& http1_settings,
    const envoy::config::core::v3::Http2ProtocolOptions& http2_options,
    uint32_t max_request_headers_kb, uint32_t max_request_headers_count,
    envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction
        headers_with_underscores_action) {
  if (determineNextProtocol(connection, data) == Utility::AlpnNames::get().Http2) {//http2协议
    Http2::CodecStats& stats = Http2::CodecStats::atomicGet(http2_codec_stats, scope);
    return std::make_unique<Http2::ServerConnectionImpl>(
        connection, callbacks, stats, random, http2_options, max_request_headers_kb,
        max_request_headers_count, headers_with_underscores_action);//http2 server连接
  } else {
    Http1::CodecStats& stats = Http1::CodecStats::atomicGet(http1_codec_stats, scope);//http1.1协议
    return std::make_unique<Http1::ServerConnectionImpl>(
        connection, stats, callbacks, http1_settings, max_request_headers_kb,
        max_request_headers_count, headers_with_underscores_action);//创建http1服务端连接
  }
}

source/common/http/http2/codec_impl.cc 2237行

ServerConnectionImpl::ServerConnectionImpl(
    Network::Connection& connection, Http::ServerConnectionCallbacks& callbacks, CodecStats& stats,
    Random::RandomGenerator& random_generator,
    const envoy::config::core::v3::Http2ProtocolOptions& http2_options,
    const uint32_t max_request_headers_kb, const uint32_t max_request_headers_count,
    envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction
        headers_with_underscores_action)
    : ConnectionImpl(connection, stats, random_generator, http2_options, max_request_headers_kb,
                     max_request_headers_count),
      callbacks_(callbacks), headers_with_underscores_action_(headers_with_underscores_action) {
  Http2Options h2_options(http2_options, max_request_headers_kb);

  if (use_new_codec_wrapper_) {//如果使用新的编解码包装器
    auto visitor = std::make_unique<http2::adapter::CallbackVisitor>(
        http2::adapter::Perspective::kServer, *http2_callbacks_.callbacks(), base());//创建回调visitor
    if (use_oghttp2_library_) {//如果使用oghttp2库
      visitor_ = std::move(visitor);
      adapter_ = http2::adapter::OgHttp2Adapter::Create(*visitor_, h2_options.ogOptions());//创建adapter
    } else {
      auto adapter =
          http2::adapter::NgHttp2Adapter::CreateServerAdapter(*visitor, h2_options.options());//创建ng adapter
      auto stream_close_listener = [p = adapter.get()](http2::adapter::Http2StreamId stream_id) {
        p->RemoveStream(stream_id);
      };//创建流关闭监听器
      visitor->set_stream_close_listener(std::move(stream_close_listener));//设置监听器
      visitor_ = std::move(visitor);
      adapter_ = std::move(adapter);
    }
  } else {
    nghttp2_session_server_new2(&session_, http2_callbacks_.callbacks(), base(),
                                h2_options.options());//nghttp2
  }
  sendSettings(http2_options, false);//发送设置
  allow_metadata_ = http2_options.allow_metadata();//获取允许的元数据
}

source/common/http/http2/codec_impl.cc 2134行

ClientConnectionImpl::ClientConnectionImpl(
    Network::Connection& connection, Http::ConnectionCallbacks& callbacks, CodecStats& stats,
    Random::RandomGenerator& random_generator,
    const envoy::config::core::v3::Http2ProtocolOptions& http2_options,
    const uint32_t max_response_headers_kb, const uint32_t max_response_headers_count,
    Http2SessionFactory& http2_session_factory)
    : ConnectionImpl(connection, stats, random_generator, http2_options, max_response_headers_kb,
                     max_response_headers_count),
      callbacks_(callbacks) {
  ClientHttp2Options client_http2_options(http2_options, max_response_headers_kb);
  if (use_new_codec_wrapper_) {//使用新的编解码包装器
    if (use_oghttp2_library_) {//使用oghttp2库
      adapter_ = http2_session_factory.create(http2_callbacks_.callbacks(), base(),
                                              client_http2_options.ogOptions());//创建adapter
    } else {
      adapter_ = http2_session_factory.create(http2_callbacks_.callbacks(), base(),
                                              client_http2_options.options());//创建adapter
    }
    http2_session_factory.init(base(), http2_options);//初始化会话工厂
  } else {
    session_ = http2_session_factory.createOld(http2_callbacks_.callbacks(), base(),
                                               client_http2_options.options());//创建会话
    http2_session_factory.initOld(session_, base(), http2_options);//初始化旧的会话工厂
  }
  allow_metadata_ = http2_options.allow_metadata();//获取允许的元数据
  idle_session_requires_ping_interval_ = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(
      http2_options.connection_keepalive(), connection_idle_interval, 0));//空闲会话需要ping的周期
}

source/common/http/codec_client.cc 179行

NoConnectCodecClientProd::NoConnectCodecClientProd(
    CodecType type, Network::ClientConnectionPtr&& connection,
    Upstream::HostDescriptionConstSharedPtr host, Event::Dispatcher& dispatcher,
    Random::RandomGenerator& random_generator,
    const Network::TransportSocketOptionsConstSharedPtr& options)
    : CodecClient(type, std::move(connection), host, dispatcher) {
  switch (type) {//编解码类型
  case CodecType::HTTP1: {
    // If the transport socket indicates this is being proxied, inform the HTTP/1.1 codec. It will
    // send fully qualified URLs iff the underlying transport is plaintext.
    bool proxied = false;
    if (options && options->http11ProxyInfo().has_value()) {
      proxied = true;
    }
    codec_ = std::make_unique<Http1::ClientConnectionImpl>(
        *connection_, host->cluster().http1CodecStats(), *this, host->cluster().http1Settings(),
        host->cluster().maxResponseHeadersCount(), proxied);//创建客户端编解码器
    break;
  }
  case CodecType::HTTP2: {
    codec_ = std::make_unique<Http2::ClientConnectionImpl>(
        *connection_, *this, host->cluster().http2CodecStats(), random_generator,
        host->cluster().http2Options(), Http::DEFAULT_MAX_REQUEST_HEADERS_KB,
        host->cluster().maxResponseHeadersCount(), Http2::ProdNghttp2SessionFactory::get());//创建http2编解码器
    break;
  }
  case CodecType::HTTP3: {
#ifdef ENVOY_ENABLE_QUIC
    auto& quic_session = dynamic_cast<Quic::EnvoyQuicClientSession&>(*connection_);
    codec_ = std::make_unique<Quic::QuicHttpClientConnectionImpl>(
        quic_session, *this, host->cluster().http3CodecStats(), host->cluster().http3Options(),
        Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount());//创建http3编解码器
    // Initialize the session after max request header size is changed in above http client
    // connection creation.
    quic_session.Initialize();//初始化quic会话
    break;
#else
    // Should be blocked by configuration checking at an earlier point.
    PANIC("unexpected");
#endif
  }
  }
}

source/common/http/http2/codec_impl.cc 1045行

Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) {
  ScopeTrackerScopeState scope(this, connection_.dispatcher());
  ENVOY_CONN_LOG(trace, "dispatching {} bytes", connection_, data.length());
  // Make sure that dispatching_ is set to false after dispatching, even when
  // ConnectionImpl::dispatch returns early or throws an exception (consider removing if there is a
  // single return after exception removal (#10878)).
  Cleanup cleanup([this]() {
    dispatching_ = false;
    current_slice_ = nullptr;
    current_stream_id_.reset();
  });//清理回调
  last_received_data_time_ = connection_.dispatcher().timeSource().monotonicTime();//获取时间
  for (const Buffer::RawSlice& slice : data.getRawSlices()) {//遍历数据片
    current_slice_ = &slice;//当前slice
    dispatching_ = true;//转发
    ssize_t rc;
    if (use_new_codec_wrapper_) {//使用新的编解码包装器
      rc = adapter_->ProcessBytes(absl::string_view(static_cast<char*>(slice.mem_), slice.len_));//处理数据
    } else {
      rc = nghttp2_session_mem_recv(session_, static_cast<const uint8_t*>(slice.mem_), slice.len_);//处理数据
    }
    if (!nghttp2_callback_status_.ok()) {//回调状态不正常
      return nghttp2_callback_status_;
    }
    // This error is returned when nghttp2 library detected a frame flood by one of its
    // internal mechanisms. Most flood protection is done by Envoy's codec and this error
    // should never be returned. However it is handled here in case nghttp2 has some flood
    // protections that Envoy's codec does not have.
    if (rc == NGHTTP2_ERR_FLOODED) //数据处理结果是flood 错误
      return bufferFloodError(
          "Flooding was detected in this HTTP/2 session, and it must be closed");
    }
    if (rc != static_cast<ssize_t>(slice.len_)) {
      return codecProtocolError(nghttp2_strerror(rc));
    }

    current_slice_ = nullptr;
    dispatching_ = false;
    current_stream_id_.reset();
  }

  ENVOY_CONN_LOG(trace, "dispatched {} bytes", connection_, data.length());
  data.drain(data.length());//排水数据

  // Decoding incoming frames can generate outbound frames so flush pending.
  return sendPendingFrames();//发送等待的帧
}

source/common/http/http2/codec_impl.cc 1619行

Status ConnectionImpl::sendPendingFrames() {
  if (dispatching_ || connection_.state() == Network::Connection::State::Closed) {//转发并且连接状态是关闭
    return okStatus();
  }

  int rc;
  if (use_new_codec_wrapper_) {//使用新的编解码包装器
    rc = adapter_->Send();//发送数据
  } else {
    rc = nghttp2_session_send(session_);//发送数据
  }
  if (rc != 0) {//发生错误
    ASSERT(rc == NGHTTP2_ERR_CALLBACK_FAILURE);
    return codecProtocolError(nghttp2_strerror(rc));
  }

  // See ConnectionImpl::StreamImpl::resetStream() for why we do this. This is an uncommon event,
  // so iterating through every stream to find the ones that have a deferred reset is not a big
  // deal. Furthermore, queueing a reset frame does not actually invoke the close stream callback.
  // This is only done when the reset frame is sent. Thus, it's safe to work directly with the
  // stream map.
  // NOTE: The way we handle deferred reset is essentially best effort. If we intend to do a
  //       deferred reset, we try to finish the stream, including writing any pending data frames.
  //       If we cannot do this (potentially due to not enough window), we just reset the stream.
  //       In general this behavior occurs only when we are trying to send immediate error messages
  //       to short circuit requests. In the best effort case, we complete the stream before
  //       resetting. In other cases, we just do the reset now which will blow away pending data
  //       frames and release any memory associated with the stream.
  if (!pending_deferred_reset_streams_.empty()) {//等待延迟重置流非空
    while (!pending_deferred_reset_streams_.empty()) {//等待延迟重置流非空
      auto it = pending_deferred_reset_streams_.begin();//获取第一个
      auto* stream = it->second;//获取流
      // Sanity check: the stream's id matches the map key.
      ASSERT(it->first == stream->stream_id_);
      pending_deferred_reset_streams_.erase(it);//擦除map中的对象
      ASSERT(stream->deferred_reset_);
      stream->resetStreamWorker(stream->deferred_reset_.value());//调用重置流工作对象
    }
    RETURN_IF_ERROR(sendPendingFrames());//发送等待帧,有错误就返回
  }

  // After all pending frames have been written into the outbound buffer check if any of
  // protocol constraints had been violated.
  Status status = protocol_constraints_.checkOutboundFrameLimits();//约束检查向外帧限制
  if (!status.ok()) {//状态不正确
    ENVOY_CONN_LOG(debug, "error sending frames: Too many frames in the outbound queue.",
                   connection_);
  }
  return status;
}