程序员人生 网站导航

nginx upstream使用及源码解析

栏目:互联网时间:2014-10-10 08:00:00

nginx upstream机制使得nginx可以成为一个反向代理服务器,nginx一方面从下游客户端接收http请求,处理请求,并根据请求发送tcp报文到上游服务器,根据上游服务器的返回报文,来向下游客户端发送请求响应报文。
upstream机制也提供了负载分担的功能,可以将请求负载分担到集群服务器的某个服务器上面。

2.1upstream的流程介绍

1分析客户端请求报文,构建发往上游服务器的请求报文。
2调用ngx_http_upstream_init开始与上游服务器建立tcp连接。
  3发送在第一步中组建的请求报文。
4接收来自上游服务器的响应头并进行解析,往下游转发。
5接收来自上游服务器的相应体,进行转发。

在这5个阶段中,upstream机制允许开发人员自己设定相应的处理方式,来达到自己的目的,这也是开发人员使用upstream的方式。


2.2upstream的使用
开发人员使用upstream机制时,主要就是设置上面五个阶段的处理回调函数。
以http反向代理为例:

ngx_http_proxy_handler(ngx_http_request_t *r) { : : : //设置http proxy使用到的upstream机制的各种方法 //设置创建请求报文的回调函数 u->create_request = ngx_http_proxy_create_request; //设置当链接失败时,需要执行的动作 u->reinit_request = ngx_http_proxy_reinit_request; //设置处理上游服务器的响应头回调函数 u->process_header = ngx_http_proxy_process_status_line; //当前无意义 u->abort_request = ngx_http_proxy_abort_request; //请求结束后会调用该方法 u->finalize_request = ngx_http_proxy_finalize_request; //设置upstream的buffer标志位,为0时,以下游网速优先, //不会使用文件缓存响应包体,为1时,有多个buffer,并且 //可以使用文件来缓存响应包体 u->buffering = plcf->upstream.buffering; //当buffering为1时会使用到该pipe结构,即下游网速优先,需要使用更多的buffer和临时文件缓存响应 u->pipe = ngx_pcalloc(r->pool, sizeof(ngx_event_pipe_t)); if (u->pipe == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } u->pipe->input_filter = ngx_event_pipe_copy_input_filter; u->accel = 1; //开始读取请求包体,读取结束后,开始调用ngx_http_upstream_init, //开始upstream的流程 rc = ngx_http_read_client_request_body(r, ngx_http_upstream_init); : : return NGX_DONE; }

2.3upstream源码解析(1.0.15版本)
2.3.1构建发往上游服务器的请求,建立与上游服务器的连接

主要函数是ngx_http_upstream_init_request,该函数会调用用户注册的请求构建函数去构建发往上游服务器的请求,同时将建立与上游服务器的连接。首先介绍两个辅助函数:

ngx_http_upstream_rd_check_broken_connection:该函数用来检查nginx与客户端之间的链路是否可用,

ngx_http_upstream_connect:该函数用来与上游服务器之间建立连接。


static void ngx_http_upstream_check_broken_connection(ngx_http_request_t *r, ngx_event_t *ev) { int n; char buf[1]; ngx_err_t err; ngx_int_t event; ngx_connection_t *c; ngx_http_upstream_t *u; c = r->connection; u = r->upstream; //若连接已终止的话,该recv返回值会为0,MSG_PEEK表示会去 //读取数据,但不会减少接收缓存中的数据,在这里读取1 //个字节,来判断读方向能否正常工作 n = recv(c->fd, buf, 1, MSG_PEEK); err = ngx_socket_errno; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, ev->log, err, "http upstream recv(): %d", n); //ev->write表明是写方向触发的事件,读方向能读到数据, //或者返回码为NGX_eagain,表明应该没有问题 if (ev->write && (n >= 0 || err == NGX_EAGAIN)) { return; } if ((ngx_event_flags & NGX_USE_LEVEL_EVENT) && ev->active) { event = ev->write ? NGX_WRITE_EVENT : NGX_READ_EVENT; if (ngx_del_event(ev, event, 0) != NGX_OK) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } } //能用该socket读出数据,说明连接没有问题 if (n > 0) { return; } //返回值为-1,但错误为NGX_EAGAIN,表明recv超时时间到了 if (n == -1) { if (err == NGX_EAGAIN) { return; } //其他情况表明发生错误 ev->error = 1; } else { //n=0,一般表示连接已经结束 err = 0; } //设置事件的标记位,标记已经结束了 ev->eof = 1; c->error = 1; if (!u->cacheable && u->peer.connection) { ngx_log_error(NGX_LOG_INFO, ev->log, err, "client prematurely closed connection, " "so upstream connection is closed too"); ngx_http_upstream_finalize_request(r, u, NGX_HTTP_CLIENT_CLOSED_REQUEST); return; } ngx_log_error(NGX_LOG_INFO, ev->log, err, "client prematurely closed connection"); if (u->peer.connection == NULL) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_CLIENT_CLOSED_REQUEST); } } 2 ngx_http_upstream_connect static void ngx_http_upstream_connect(ngx_http_request_t *r, ngx_http_upstream_t *u) { ngx_int_t rc; ngx_time_t *tp; ngx_connection_t *c; r->connection->log->action = "connecting to upstream"; r->connection->single_connection = 0; //记录下当前的响应秒数和毫秒数 if (u->state && u->state->response_sec) { tp = ngx_timeofday(); u->state->response_sec = tp->sec - u->state->response_sec; u->state->response_msec = tp->msec - u->state->response_msec; } u->state = ngx_array_push(r->upstream_states); if (u->state == NULL) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } ngx_memzero(u->state, sizeof(ngx_http_upstream_state_t)); //记录下当前的响应秒数和毫秒数 tp = ngx_timeofday(); u->state->response_sec = tp->sec; u->state->response_msec = tp->msec; //开始连接上游服务器 rc = ngx_event_connect_peer(&u->peer); //printf("@@@@####rc is %d ", (int)rc); ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http upstream connect: %i", rc); if (rc == NGX_ERROR) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } u->state->peer = u->peer.name; //在busy或者declined的情况下,会调用ngx_http_upstream_next,该函数会 //多次尝试调用connect试图与上游服务器连接,多次连接失败后, //才会调用ngx_http_upstream_finalize_request if (rc == NGX_BUSY) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "no live upstreams"); ngx_http_upstream_next(r, u, NGX_HTTP_UPSTREAM_FT_NOLIVE); return; } if (rc == NGX_DECLINED) { ngx_http_upstream_next(r, u, NGX_HTTP_UPSTREAM_FT_ERROR); return; } /* rc == NGX_OK || rc == NGX_AGAIN */ c = u->peer.connection; c->data = r; //将客户端与上游服务器的连接的读写事件的处理回调设置为 //ngx_http_upstream_handler c->write->handler = ngx_http_upstream_handler; c->read->handler = ngx_http_upstream_handler; //ngx_http_upstream_handler最后会调用u->write_event_handler或者read_event_handler u->write_event_handler = ngx_http_upstream_send_request_handler; u->read_event_handler = ngx_http_upstream_process_header; c->sendfile &= r->connection->sendfile; u->output.sendfile = c->sendfile; c->pool = r->pool; c->log = r->connection->log; c->read->log = c->log; c->write->log = c->log; /* init or reinit the ngx_output_chain() and ngx_chain_writer() contexts */ u->writer.out = NULL; u->writer.last = &u->writer.out; u->writer.connection = c; u->writer.limit = 0; if (u->request_sent) { if (ngx_http_upstream_reinit(r, u) != NGX_OK) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } } if (r->request_body && r->request_body->buf && r->request_body->temp_file && r == r->main) { /* * the r->request_body->buf can be reused for one request only, * the subrequests should allocate their own temporay bufs */ u->output.free = ngx_alloc_chain_link(r->pool); if (u->output.free == NULL) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } u->output.free->buf = r->request_body->buf; u->output.free->next = NULL; u->output.allocated = 1; r->request_body->buf->pos = r->request_body->buf->start; r->request_body->buf->last = r->request_body->buf->start; r->request_body->buf->tag = u->output.tag; } u->request_sent = 0; //与上游连接尚未建立起来,加入定时器,返回 //当与上游服务器连接建立成功会调用相关的处理函数 if (rc == NGX_AGAIN) { ngx_add_timer(c->write, u->conf->connect_timeout); return; } #if (NGX_HTTP_SSL) if (u->ssl && c->ssl == NULL) { ngx_http_upstream_ssl_init_connection(r, u, c); return; } #endif //已经建立连接,向上游服务器发送请求内容 ngx_http_upstream_send_request(r, u); } 3 ngx_http_upstream_init_request static void ngx_http_upstream_init_request(ngx_http_request_t *r) { ngx_str_t *host; ngx_uint_t i; ngx_resolver_ctx_t *ctx, temp; ngx_http_cleanup_t *cln; ngx_http_upstream_t *u; ngx_http_core_loc_conf_t *clcf; ngx_http_upstream_srv_conf_t *uscf, **uscfp; ngx_http_upstream_main_conf_t *umcf; if (r->aio) { return; } u = r->upstream; u->store = (u->conf->store || u->conf->store_lengths); //ignore_client_abort为0标志着需要关注nginx和客户端的连接是否稳定 if (!u->store && !r->post_action && !u->conf->ignore_client_abort) { r->read_event_handler = ngx_http_upstream_rd_check_broken_connection; r->write_event_handler = ngx_http_upstream_wr_check_broken_connection; } //从代码来看,request_bufs貌似是在create_request中设置的 if (r->request_body) { u->request_bufs = r->request_body->bufs; } //调用用户设置的create_request函数 if (u->create_request(r) != NGX_OK) { ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } //u->conf->local中保存的是与上游服务建立连接的本地地址 u->peer.local = u->conf->local; //得到http core模块在该loc下的配置 clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); //设置upstream向下游客户端转发数据的各种参数,主要和 //buf相关 u->output.alignment = clcf->directio_alignment; u->output.pool = r->pool; u->output.bufs.num = 1; u->output.bufs.size = clcf->client_body_buffer_size; //往下游客户端写数据的接口 u->output.output_filter = ngx_chain_writer; u->output.filter_ctx = &u->writer; u->writer.pool = r->pool; if (r->upstream_states == NULL) { r->upstream_states = ngx_array_create(r->pool, 1, sizeof(ngx_http_upstream_state_t)); if (r->upstream_states == NULL) { ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } } else { u->state = ngx_array_push(r->upstream_states); if (u->state == NULL) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } ngx_memzero(u->state, sizeof(ngx_http_upstream_state_t)); } //将ngx_http_upstream_cleanup函数加入到request的cleanup链表中, //当request被删除时,会调用该函数 cln = ngx_http_cleanup_add(r, 0); if (cln == NULL) { ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } //ngx_http_upstream_cleanup主要释放resolve数据结构,执行ngx_http_upstream_finalize cln->handler = ngx_http_upstream_cleanup; cln->data = r; u->cleanup = &cln->handler; //u->resolved中保存了用于与上游服务器建立连接的信息, //可以由开发人员在代码中设置,不设置的话,从配置文件中 //去获取 if (u->resolved == NULL) { uscf = u->conf->upstream; } else { //upstream中直接指定了相关的服务器地址,建立连接就ok了 if (u->resolved->sockaddr) { if (ngx_http_upstream_create_round_robin_peer(r, u->resolved) != NGX_OK) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } ngx_http_upstream_connect(r, u); return; } //在这里host应该为一个upstream组的名字 host = &u->resolved->host; umcf = ngx_http_get_module_main_conf(r, ngx_http_upstream_module); uscfp = umcf->upstreams.elts; //遍历系统中的upstream数组,找到匹配的upstream for (i = 0; i < umcf->upstreams.nelts; i++) { uscf = uscfp[i]; if (uscf->host.len == host->len && ((uscf->port == 0 && u->resolved->no_port) || uscf->port == u->resolved->port) && ngx_memcmp(uscf->host.data, host->data, host->len) == 0) { goto found; } } if (u->resolved->port == 0) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "no port in upstream "%V"", host); ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } temp.name = *host; //下面这部分需要进行域名解析 ctx = ngx_resolve_start(clcf->resolver, &temp); if (ctx == NULL) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } if (ctx == NGX_NO_RESOLVER) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "no resolver defined to resolve %V", host); ngx_http_upstream_finalize_request(r, u, NGX_HTTP_BAD_GATEWAY); return; } //ngx_http_upstream_resolve_handler是域名解析后的回调函数 ctx->name = *host; ctx->type = NGX_RESOLVE_A; ctx->handler = ngx_http_upstream_resolve_handler; ctx->data = r; ctx->timeout = clcf->resolver_timeout; u->resolved->ctx = ctx; if (ngx_resolve_name(ctx) != NGX_OK) { u->resolved->ctx = NULL; ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } return; } found: //peer.init()方法中会根据upstream的算法去选择一个服务器,来进行发送 //for example:us->peer.init = ngx_http_upstream_init_ip_hash_peer; if (uscf->peer.init(r, uscf) != NGX_OK) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } //与上游服务器建立连接 ngx_http_upstream_connect(r, u); }
2.3.2往上游发送请求 

   当建立了与上游服务器的连接后,就会向上游服务器发送请求,主要函数是ngx_http_upstream_send_request。

static void ngx_http_upstream_send_request(ngx_http_request_t *r, ngx_http_upstream_t *u) { ngx_int_t rc; ngx_connection_t *c; //peer.connection中是nginx与上游服务器建立的connection c = u->peer.connection; ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http upstream send request"); if (!u->request_sent && ngx_http_upstream_test_connect(c) != NGX_OK) { ngx_http_upstream_next(r, u, NGX_HTTP_UPSTREAM_FT_ERROR); return; } c->log->action = "sending request to upstream"; //通过ngx_output_chain向上游服务器发送请求报文,request_sent //用来表示是否已经发送请求头了,发送了的话,继续发送 //剩余未发的就OK了,剩余未发送的数据保存在了u->output里面 rc = ngx_output_chain(&u->output, u->request_sent ? NULL : u->request_bufs); //设置request_sent标志,表明已经发送过请求 u->request_sent = 1; if (rc == NGX_ERROR) { ngx_http_upstream_next(r, u, NGX_HTTP_UPSTREAM_FT_ERROR); return; } //若写事件已经被加入到了定时器中,删除它,为后面的 //添加做准备 if (c->write->timer_set) { ngx_del_timer(c->write); } //NGX_AGAIN表明数据尚未发送完毕,需要将其加入到定时器中 //当发送事件触发时,会继续调用该函数。 if (rc == NGX_AGAIN) { ngx_add_timer(c->write, u->conf->send_timeout); //主要是设置发送缓存的事件唤醒下限 if (ngx_handle_write_event(c->write, u->conf->send_lowat) != NGX_OK) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } return; } /* rc == NGX_OK */ if (c->tcp_nopush == NGX_TCP_NOPUSH_SET) { if (ngx_tcp_push(c->fd) == NGX_ERROR) { ngx_log_error(NGX_LOG_CRIT, c->log, ngx_socket_errno, ngx_tcp_push_n " failed"); ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } c->tcp_nopush = NGX_TCP_NOPUSH_UNSET; } //数据发送成功,添加一个读事件定时器 ngx_add_timer(c->read, u->conf->read_timeout); #if 1 //写事件已经发出,判断读事件是否ready if (c->read->ready) { /* post aio operation */ /* * TODO comment * although we can post aio operation just in the end * of ngx_http_upstream_connect() CHECK IT !!! * it's better to do here because we postpone header buffer allocation */ //读事件已经ready了,处理返回的报文头 ngx_http_upstream_process_header(r, u); return; } #endif //将写事件处理函数置为dummy的话,表明在读完相应之前,不允许 //接着写了 u->write_event_handler = ngx_http_upstream_dummy_handler; if (ngx_handle_write_event(c->write, 0) != NGX_OK) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } }

2.3.3处理上游服务器返回的回应头部
往上游服务器发送完请求后,就要等待着处理服务器的回应了,首先会去处理服务器发回的响应头。处理函数是ngx_http_upstream_process_header.

static void ngx_http_upstream_process_header(ngx_http_request_t *r, ngx_http_upstream_t *u) { ssize_t n; ngx_int_t rc; ngx_connection_t *c; c = u->peer.connection; c->log->action = "reading response header from upstream"; //读事件超时 if (c->read->timedout) { ngx_http_upstream_next(r, u, NGX_HTTP_UPSTREAM_FT_TIMEOUT); return; } //测试与upstream服务器的连通性 if (!u->request_sent && ngx_http_upstream_test_connect(c) != NGX_OK) { ngx_http_upstream_next(r, u, NGX_HTTP_UPSTREAM_FT_ERROR); return; } //尚未实质分配数据缓冲区 if (u->buffer.start == NULL) { //分配数据缓冲区 u->buffer.start = ngx_palloc(r->pool, u->conf->buffer_size); if (u->buffer.start == NULL) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } //对缓冲区描述符进行初始化 u->buffer.pos = u->buffer.start; u->buffer.last = u->buffer.start; u->buffer.end = u->buffer.start + u->conf->buffer_size; u->buffer.temporary = 1; u->buffer.tag = u->output.tag; //为收到的请求头们创建ngx_list结构,用来存贮解析到的 //请求头的名值对 if (ngx_list_init(&u->headers_in.headers, r->pool, 8, sizeof(ngx_table_elt_t)) != NGX_OK) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } } //准备接受数据吧!!!狂奔的小怪兽 for ( ;; ) { n = c->recv(c, u->buffer.last, u->buffer.end - u->buffer.last); //数据尚未接收完毕 if (n == NGX_AGAIN) { if (ngx_handle_read_event(c->read, 0) != NGX_OK) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } return; } //返回值为0,标志upstream服务器关闭了连接 if (n == 0) { ngx_log_error(NGX_LOG_ERR, c->log, 0, "upstream prematurely closed connection"); } if (n == NGX_ERROR || n == 0) { ngx_http_upstream_next(r, u, NGX_HTTP_UPSTREAM_FT_ERROR); return; } u->buffer.last += n; //处理接收到响应头数据 rc = u->process_header(r); //响应头尚未接收完毕 if (rc == NGX_AGAIN) { //buffer已经满了,无法容纳更多的响应头部 if (u->buffer.last == u->buffer.end) { ngx_log_error(NGX_LOG_ERR, c->log, 0, "upstream sent too big header"); ngx_http_upstream_next(r, u, NGX_HTTP_UPSTREAM_FT_INVALID_HEADER); return; } continue; } break; } //解析到了无效错误头,真真苦逼啊 if (rc == NGX_HTTP_UPSTREAM_INVALID_HEADER) { ngx_http_upstream_next(r, u, NGX_HTTP_UPSTREAM_FT_INVALID_HEADER); return; } if (rc == NGX_ERROR) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } /* rc == NGX_OK */ //头部处理完毕,头部返回码大于300 if (u->headers_in.status_n > NGX_HTTP_SPECIAL_RESPONSE) { if (r->subrequest_in_memory) { u->buffer.last = u->buffer.pos; } if (ngx_http_upstream_test_next(r, u) == NGX_OK) { return; } //处理错误码大于300的错误情况,比如404错误, //页面没找到 if (ngx_http_upstream_intercept_errors(r, u) == NGX_OK) { return; } } //对u->headers_in中的头部进行处理过滤,把u->headers_in中的 //各个头部信息挪到r->headers_out里面,以便于发送 if (ngx_http_upstream_process_headers(r, u) != NGX_OK) { return; } //不是子请求,需要转发响应体 if (!r->subrequest_in_memory) { //调用该函数,先转发响应头,再转发响应体 ngx_http_upstream_send_response(r, u); return; } /* subrequest content in memory */ //以下为子请求的处理流程,当子请求的input_filter未设置时, //其默认的input_filter方法为ngx_http_upstream_non_buffered_filter, //即不转发收到的响应 if (u->input_filter == NULL) { u->input_filter_init = ngx_http_upstream_non_buffered_filter_init; u->input_filter = ngx_http_upstream_non_buffered_filter; u->input_filter_ctx = r; } if (u->input_filter_init(u->input_filter_ctx) == NGX_ERROR) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } //buffer.last和buffer.pos之间是多余的包体 n = u->buffer.last - u->buffer.pos; //下面对这段头部以外的包体进行处理 if (n) { u->buffer.last -= n; u->state->response_length += n; if (u->input_filter(u->input_filter_ctx, n) == NGX_ERROR) { ngx_http_upstream_finalize_request(r, u, NGX_ERROR); return; } //表明包体已经全部处理完毕,可以结束请求类 if (u->length == 0) { ngx_http_upstream_finalize_request(r, u, 0); return; } } //设置接收事件的处理函数 u->read_event_handler = ngx_http_upstream_process_body_in_memory; //在该函数中调用u->input_filter对后续包体进行处理, //该函数是针对子请求来说的,不转发包体,在内存中 //对包体进行处理 ngx_http_upstream_process_body_in_memory(r, u);

2.3.4处理响应包体
处理完返回的响应头就要处理响应包体了,处理响应包体比较复杂,在子请求的情况下,不用转发响应包体,处理一下就可以了,在upstream模式下,需要转发接收到的请求,这时有下游网速优先和上游网速优先两种,下游网速优先,假设下游网速比上游快,因此分配了一块固定大小的buffer缓冲区去接收数据,同时进行转发,上游网速优先,假设上游网速比下游快,因此需要使用多块buffer缓冲区去缓存数据,同时必要时,使用临时文件来缓存接收到的数据。
ngx_http_upstream_process_body_in_memory:该函数用来处理子请求的情形,不转发响应包体。
ngx_http_upstream_send_response:该函数用来处理转发响应包体的情形,该函数会转发响应头和响应体,转发响应体时同时考虑了上游网速优先和下游网速优先两种情况。

1ngx_http_upstream_process_body_in_memory static void ngx_http_upstream_process_body_in_memory(ngx_http_request_t *r, ngx_http_upstream_t *u) { size_t size; ssize_t n; ngx_buf_t *b; ngx_event_t *rev; ngx_connection_t *c; //c是nginx与upstream上游服务器之间建立的连接 c = u->peer.connection; rev = c->read; //读事件超时,结束upstream if (rev->timedout) { return; } //u->buffer用来保存读取的数据 b = &u->buffer; for ( ;; ) { size = b->end - b->last; if (size == 0) { ngx_http_upstream_finalize_request(r, u, NGX_ERROR); return; } //读取相关数据到u->buffer中 n = c->recv(c, b->last, size); //没有数据可读了,等待下一次处理 if (n == NGX_AGAIN) { break; } //对端已经结束了该连接或者发生了错误 if (n == 0 || n == NGX_ERROR) { ngx_http_upstream_finalize_request(r, u, n); return; } //response_length记录了已接收相应的长度 u->state->response_length += n; //对接收到的数据进行处理,一般子请求会重置该方法, //未设置的话,则会默认为ngx_http_upstream_non_buffered_filter,该 //方法仅仅是设置下该buffer以便继续接收数据 if (u->input_filter(u->input_filter_ctx, n) == NGX_ERROR) { ngx_http_upstream_finalize_request(r, u, NGX_ERROR); return; } //接收方向未ready退出 if (!rev->ready) { break; } } //设置读事件 if (ngx_handle_read_event(rev, 0) != NGX_OK) { ngx_http_upstream_finalize_request(r, u, NGX_ERROR); return; } if (rev->active) { ngx_add_timer(rev, u->conf->read_timeout); } else if (rev->timer_set) { ngx_del_timer(rev); } } 2 ngx_http_upstream_send_response 该函数会往客户端发送响应头及转发响应体,根据不同的设置来调用不同的包体转发。 static void ngx_http_upstream_send_response(ngx_http_request_t *r, ngx_http_upstream_t *u) { int tcp_nodelay; ssize_t n; ngx_int_t rc; ngx_event_pipe_t *p; ngx_connection_t *c; ngx_http_core_loc_conf_t *clcf; //发送回应头部,回应头部存放在request的headers_in里面, //在这里,有可能头部没有发送完毕,没关系,未发送 //完的数据在request的out链表里面放着呢,接着处理下面的 //响应包体即可 rc = ngx_http_send_header(r); if (rc == NGX_ERROR || rc > NGX_OK || r->post_action) { ngx_http_upstream_finalize_request(r, u, rc); return; } //c是客户端与nginx之间建立的连接 c = r->connection; if (r->header_only) { if (u->cacheable || u->store) { if (ngx_shutdown_socket(c->fd, NGX_WRITE_SHUTDOWN) == -1) { ngx_connection_error(c, ngx_socket_errno, ngx_shutdown_socket_n " failed"); } r->read_event_handler = ngx_http_request_empty_handler; r->write_event_handler = ngx_http_request_empty_handler; c->error = 1; } else { ngx_http_upstream_finalize_request(r, u, rc); return; } } //将header_sent置位,表示响应头部已经发送了 u->header_sent = 1; //请求中带有包体,且包体被保存在了临时文件里面, //现在这些临时文件没有用了,可以清理掉了,OK, //毕竟,服务器的回应都来了,应该没问题了 if (r->request_body && r->request_body->temp_file) { ngx_pool_run_cleanup_file(r->pool, r->request_body->temp_file->file.fd); r->request_body->temp_file->file.fd = NGX_INVALID_FILE; } //获得http core在该loc下的配置 clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); //u->buffering为0表示下游网速优先,不需要开辟更多的缓存区 //来存放相关回应报文 if (!u->buffering) { //未设置input_filter的话,设置默认的处理函数,input_filter是对 //在buffer中接收到的数据进行相应处理,感觉主要有两个功能 //一是把相关buffer挂到out链表,一是对内容进行过滤 if (u->input_filter == NULL) { //啥都不做 u->input_filter_init = ngx_http_upstream_non_buffered_filter_init; //该函数试图在buffer中缓存所有的数据,会操作设置ngx_buf中的 //各个字段 u->input_filter = ngx_http_upstream_non_buffered_filter; u->input_filter_ctx = r; } //设置upstream读事件的处理回调函数 u->read_event_handler = ngx_http_upstream_process_non_buffered_upstream; //设置request写事件的处理回调函数 r->write_event_handler = ngx_http_upstream_process_non_buffered_downstream; r->limit_rate = 0; //调用input_filter之前进行初始化 if (u->input_filter_init(u->input_filter_ctx) == NGX_ERROR) { ngx_http_upstream_finalize_request(r, u, 0); return; } if (clcf->tcp_nodelay && c->tcp_nodelay == NGX_TCP_NODELAY_UNSET) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "tcp_nodelay"); tcp_nodelay = 1; if (setsockopt(c->fd, IPPROTO_TCP, TCP_NODELAY, (const void *) &tcp_nodelay, sizeof(int)) == -1) { ngx_connection_error(c, ngx_socket_errno, "setsockopt(TCP_NODELAY) failed"); ngx_http_upstream_finalize_request(r, u, 0); return; } c->tcp_nodelay = NGX_TCP_NODELAY_SET; } //buffer.last与buffer.pos之间是剩余未被处理的数据 n = u->buffer.last - u->buffer.pos; //n>0,说明buffer中有未被转发的响应包体 if (n) { //在这里设置该last是为了在input_filter中处理时,对其 //进行重置 u->buffer.last = u->buffer.pos; //将响应包体的长度加上n u->state->response_length += n; //在input_filter中处理此次接收到的数据 if (u->input_filter(u->input_filter_ctx, n) == NGX_ERROR) { ngx_http_upstream_finalize_request(r, u, 0); return; } //在该函数中,开始向下游客户端发送响应包体, //发送完数据还会从上游接收包体 ngx_http_upstream_process_non_buffered_downstream(r); } else { //该buffer中目前仅有头部,没有回应包体,那下次 //从头部接收就可以了 u->buffer.pos = u->buffer.start; u->buffer.last = u->buffer.start; if (ngx_http_send_special(r, NGX_HTTP_FLUSH) == NGX_ERROR) { ngx_http_upstream_finalize_request(r, u, 0); return; } //有数据可以进行处理,处理上游数据,在该函数中 //收完上游包体也会往下游发送相应。 if (u->peer.connection->read->ready) { ngx_http_upstream_process_non_buffered_upstream(r, u); } } return; } /* TODO: preallocate event_pipe bufs, look "Content-Length" */ //下面这部分是buffer为1的情况,该情况允许nginx使用更多的buffer //去缓存包体数据,或者使用文件来进行缓存 p = u->pipe; //对pipe结构进行初始化,该结构专用于上游网速优先的情况 //设置向下游发送响应的调用函数 p->output_filter = (ngx_event_pipe_output_filter_pt) ngx_http_output_filter; p->output_ctx = r; p->tag = u->output.tag; //设置可以使用的缓冲区的个数 p->bufs = u->conf->bufs; //设置busy缓冲区中待发送的响应长度触发值 p->busy_size = u->conf->busy_buffers_size; p->upstream = u->peer.connection; p->downstream = c; p->pool = r->pool; p->log = c->log; p->cache
------分隔线----------------------------
------分隔线----------------------------

最新技术推荐