Introducing application and port shared memory queues.

The goal is to minimize the number of syscalls needed to deliver a message.
This commit is contained in:
Max Romanov 2020-08-11 19:20:34 +03:00
parent a82cf4ffb6
commit e227fc9e62
11 changed files with 1802 additions and 320 deletions

165
src/nxt_app_nncq.h Normal file
View file

@ -0,0 +1,165 @@
/*
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_APP_NNCQ_H_INCLUDED_
#define _NXT_APP_NNCQ_H_INCLUDED_
/* Appilcation Numeric Naive Circular Queue */
#define NXT_APP_NNCQ_SIZE 131072
typedef uint32_t nxt_app_nncq_atomic_t;
typedef uint16_t nxt_app_nncq_cycle_t;
typedef struct {
nxt_app_nncq_atomic_t head;
nxt_app_nncq_atomic_t entries[NXT_APP_NNCQ_SIZE];
nxt_app_nncq_atomic_t tail;
} nxt_app_nncq_t;
static inline nxt_app_nncq_atomic_t
nxt_app_nncq_head(nxt_app_nncq_t const volatile *q)
{
return q->head;
}
static inline nxt_app_nncq_atomic_t
nxt_app_nncq_tail(nxt_app_nncq_t const volatile *q)
{
return q->tail;
}
static inline void
nxt_app_nncq_tail_cmp_inc(nxt_app_nncq_t volatile *q, nxt_app_nncq_atomic_t t)
{
nxt_atomic_cmp_set(&q->tail, t, t + 1);
}
static inline nxt_app_nncq_atomic_t
nxt_app_nncq_index(nxt_app_nncq_t const volatile *q, nxt_app_nncq_atomic_t i)
{
return i % NXT_APP_NNCQ_SIZE;
}
static inline nxt_app_nncq_atomic_t
nxt_app_nncq_map(nxt_app_nncq_t const volatile *q, nxt_app_nncq_atomic_t i)
{
return i % NXT_APP_NNCQ_SIZE;
}
static inline nxt_app_nncq_cycle_t
nxt_app_nncq_cycle(nxt_app_nncq_t const volatile *q, nxt_app_nncq_atomic_t i)
{
return i / NXT_APP_NNCQ_SIZE;
}
static inline nxt_app_nncq_cycle_t
nxt_app_nncq_next_cycle(nxt_app_nncq_t const volatile *q,
nxt_app_nncq_cycle_t i)
{
return i + 1;
}
static inline nxt_app_nncq_atomic_t
nxt_app_nncq_new_entry(nxt_app_nncq_t const volatile *q,
nxt_app_nncq_cycle_t cycle,
nxt_app_nncq_atomic_t i)
{
return cycle * NXT_APP_NNCQ_SIZE + (i % NXT_APP_NNCQ_SIZE);
}
static inline nxt_app_nncq_atomic_t
nxt_app_nncq_empty(nxt_app_nncq_t const volatile *q)
{
return NXT_APP_NNCQ_SIZE;
}
static void
nxt_app_nncq_init(nxt_app_nncq_t volatile *q)
{
q->head = NXT_APP_NNCQ_SIZE;
nxt_memzero((void *) q->entries,
NXT_APP_NNCQ_SIZE * sizeof(nxt_app_nncq_atomic_t));
q->tail = NXT_APP_NNCQ_SIZE;
}
static void
nxt_app_nncq_enqueue(nxt_app_nncq_t volatile *q, nxt_app_nncq_atomic_t val)
{
nxt_app_nncq_cycle_t e_cycle, t_cycle;
nxt_app_nncq_atomic_t n, t, e, j;
for ( ;; ) {
t = nxt_app_nncq_tail(q);
j = nxt_app_nncq_map(q, t);
e = q->entries[j];
e_cycle = nxt_app_nncq_cycle(q, e);
t_cycle = nxt_app_nncq_cycle(q, t);
if (e_cycle == t_cycle) {
nxt_app_nncq_tail_cmp_inc(q, t);
continue;
}
if (nxt_app_nncq_next_cycle(q, e_cycle) != t_cycle) {
continue;
}
n = nxt_app_nncq_new_entry(q, t_cycle, val);
if (nxt_atomic_cmp_set(&q->entries[j], e, n)) {
break;
}
}
nxt_app_nncq_tail_cmp_inc(q, t);
}
static nxt_app_nncq_atomic_t
nxt_app_nncq_dequeue(nxt_app_nncq_t volatile *q)
{
nxt_app_nncq_cycle_t e_cycle, h_cycle;
nxt_app_nncq_atomic_t h, j, e;
for ( ;; ) {
h = nxt_app_nncq_head(q);
j = nxt_app_nncq_map(q, h);
e = q->entries[j];
e_cycle = nxt_app_nncq_cycle(q, e);
h_cycle = nxt_app_nncq_cycle(q, h);
if (e_cycle != h_cycle) {
if (nxt_app_nncq_next_cycle(q, e_cycle) == h_cycle) {
return nxt_app_nncq_empty(q);
}
continue;
}
if (nxt_atomic_cmp_set(&q->head, h, h + 1)) {
break;
}
}
return nxt_app_nncq_index(q, e);
}
#endif /* _NXT_APP_NNCQ_H_INCLUDED_ */

119
src/nxt_app_queue.h Normal file
View file

@ -0,0 +1,119 @@
/*
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_APP_QUEUE_H_INCLUDED_
#define _NXT_APP_QUEUE_H_INCLUDED_
#include <nxt_app_nncq.h>
/* Using Numeric Naive Circular Queue as a backend. */
#define NXT_APP_QUEUE_SIZE NXT_APP_NNCQ_SIZE
#define NXT_APP_QUEUE_MSG_SIZE 31
typedef struct {
uint8_t size;
uint8_t data[NXT_APP_QUEUE_MSG_SIZE];
uint32_t tracking;
} nxt_app_queue_item_t;
typedef struct {
nxt_app_nncq_atomic_t nitems;
nxt_app_nncq_t free_items;
nxt_app_nncq_t queue;
nxt_app_queue_item_t items[NXT_APP_QUEUE_SIZE];
} nxt_app_queue_t;
nxt_inline void
nxt_app_queue_init(nxt_app_queue_t volatile *q)
{
nxt_app_nncq_atomic_t i;
nxt_app_nncq_init(&q->free_items);
nxt_app_nncq_init(&q->queue);
for (i = 0; i < NXT_APP_QUEUE_SIZE; i++) {
nxt_app_nncq_enqueue(&q->free_items, i);
}
q->nitems = 0;
}
nxt_inline nxt_int_t
nxt_app_queue_send(nxt_app_queue_t volatile *q, const void *p,
uint8_t size, uint32_t tracking, int *notify, uint32_t *cookie)
{
nxt_app_queue_item_t *qi;
nxt_app_nncq_atomic_t i;
i = nxt_app_nncq_dequeue(&q->free_items);
if (i == nxt_app_nncq_empty(&q->free_items)) {
return NXT_AGAIN;
}
qi = (nxt_app_queue_item_t *) &q->items[i];
qi->size = size;
nxt_memcpy(qi->data, p, size);
qi->tracking = tracking;
*cookie = i;
nxt_app_nncq_enqueue(&q->queue, i);
i = nxt_atomic_fetch_add(&q->nitems, 1);
if (notify != NULL) {
*notify = (i == 0);
}
return NXT_OK;
}
nxt_inline nxt_bool_t
nxt_app_queue_cancel(nxt_app_queue_t volatile *q, uint32_t cookie,
uint32_t tracking)
{
nxt_app_queue_item_t *qi;
qi = (nxt_app_queue_item_t *) &q->items[cookie];
return nxt_atomic_cmp_set(&qi->tracking, tracking, 0);
}
nxt_inline ssize_t
nxt_app_queue_recv(nxt_app_queue_t volatile *q, void *p, uint32_t *cookie)
{
ssize_t res;
nxt_app_queue_item_t *qi;
nxt_app_nncq_atomic_t i;
i = nxt_app_nncq_dequeue(&q->queue);
if (i == nxt_app_nncq_empty(&q->queue)) {
*cookie = 0;
return -1;
}
qi = (nxt_app_queue_item_t *) &q->items[i];
res = qi->size;
nxt_memcpy(p, qi->data, qi->size);
*cookie = i;
nxt_app_nncq_enqueue(&q->free_items, i);
nxt_atomic_fetch_add(&q->nitems, -1);
return res;
}
#endif /* _NXT_APP_QUEUE_H_INCLUDED_ */

View file

@ -98,10 +98,10 @@ nxt_http_websocket_client(nxt_task_t *task, void *obj, void *data)
b = next;
}
res = nxt_port_socket_twrite(task, req_rpc_data->app_port,
NXT_PORT_MSG_WEBSOCKET, -1,
req_rpc_data->stream,
task->thread->engine->port->id, out, NULL);
res = nxt_port_socket_write(task, req_rpc_data->app_port,
NXT_PORT_MSG_WEBSOCKET, -1,
req_rpc_data->stream,
task->thread->engine->port->id, out);
if (nxt_slow_path(res != NXT_OK)) {
// TODO: handle
}
@ -144,10 +144,10 @@ nxt_http_websocket_error_handler(nxt_task_t *task, void *obj, void *data)
goto close_handler;
}
(void) nxt_port_socket_twrite(task, req_rpc_data->app_port,
NXT_PORT_MSG_WEBSOCKET_LAST,
-1, req_rpc_data->stream,
task->thread->engine->port->id, NULL, NULL);
(void) nxt_port_socket_write(task, req_rpc_data->app_port,
NXT_PORT_MSG_WEBSOCKET_LAST,
-1, req_rpc_data->stream,
task->thread->engine->port->id, NULL);
close_handler:

View file

@ -8,6 +8,7 @@
#include <nxt_runtime.h>
#include <nxt_port.h>
#include <nxt_router.h>
#include <nxt_port_queue.h>
static void nxt_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg);
@ -68,6 +69,8 @@ nxt_port_new(nxt_task_t *task, nxt_port_id_t id, nxt_pid_t pid,
nxt_queue_init(&port->messages);
nxt_thread_mutex_create(&port->write_mutex);
port->queue_fd = -1;
} else {
nxt_mp_destroy(mp);
}
@ -99,6 +102,16 @@ nxt_port_close(nxt_task_t *task, nxt_port_t *port)
nxt_router_app_port_close(task, port);
}
}
if (port->queue_fd != -1) {
nxt_fd_close(port->queue_fd);
port->queue_fd = -1;
}
if (port->queue != NULL) {
nxt_mem_munmap(port->queue, sizeof(nxt_port_queue_t));
port->queue = NULL;
}
}
@ -176,6 +189,7 @@ nxt_port_quit_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg)
}
/* TODO join with process_ready and move to nxt_main_process.c */
nxt_inline void
nxt_port_send_new_port(nxt_task_t *task, nxt_runtime_t *rt,
nxt_port_t *new_port, uint32_t stream)
@ -227,8 +241,9 @@ nxt_port_send_port(nxt_task_t *task, nxt_port_t *port, nxt_port_t *new_port,
msg->max_share = port->max_share;
msg->type = new_port->type;
return nxt_port_socket_write(task, port, NXT_PORT_MSG_NEW_PORT,
new_port->pair[1], stream, 0, b);
return nxt_port_socket_write2(task, port, NXT_PORT_MSG_NEW_PORT,
new_port->pair[1], new_port->queue_fd,
stream, 0, b);
}
@ -279,7 +294,7 @@ nxt_port_new_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg)
msg->u.new_port = port;
}
/* TODO move to nxt_main_process.c */
void
nxt_port_process_ready_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg)
{
@ -304,6 +319,13 @@ nxt_port_process_ready_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg)
nxt_debug(task, "process %PI ready", msg->port_msg.pid);
if (msg->fd != -1) {
port->queue_fd = msg->fd;
port->queue = nxt_mem_mmap(NULL, sizeof(nxt_port_queue_t),
PROT_READ | PROT_WRITE, MAP_SHARED, msg->fd,
0);
}
nxt_port_send_new_port(task, rt, port, msg->port_msg.stream);
}

View file

@ -42,6 +42,7 @@ struct nxt_port_handlers_s {
/* Request headers. */
nxt_port_handler_t req_headers;
nxt_port_handler_t req_headers_ack;
nxt_port_handler_t req_body;
/* Websocket frame. */
nxt_port_handler_t websocket_frame;
@ -51,6 +52,8 @@ struct nxt_port_handlers_s {
nxt_port_handler_t oosm;
nxt_port_handler_t shm_ack;
nxt_port_handler_t read_queue;
nxt_port_handler_t read_socket;
};
@ -91,12 +94,15 @@ typedef enum {
_NXT_PORT_MSG_REQ_HEADERS = nxt_port_handler_idx(req_headers),
_NXT_PORT_MSG_REQ_HEADERS_ACK = nxt_port_handler_idx(req_headers_ack),
_NXT_PORT_MSG_REQ_BODY = nxt_port_handler_idx(req_body),
_NXT_PORT_MSG_WEBSOCKET = nxt_port_handler_idx(websocket_frame),
_NXT_PORT_MSG_DATA = nxt_port_handler_idx(data),
_NXT_PORT_MSG_OOSM = nxt_port_handler_idx(oosm),
_NXT_PORT_MSG_SHM_ACK = nxt_port_handler_idx(shm_ack),
_NXT_PORT_MSG_READ_QUEUE = nxt_port_handler_idx(read_queue),
_NXT_PORT_MSG_READ_SOCKET = nxt_port_handler_idx(read_socket),
NXT_PORT_MSG_MAX = sizeof(nxt_port_handlers_t)
/ sizeof(nxt_port_handler_t),
@ -124,6 +130,7 @@ typedef enum {
NXT_PORT_MSG_REMOVE_PID = nxt_msg_last(_NXT_PORT_MSG_REMOVE_PID),
NXT_PORT_MSG_REQ_HEADERS = _NXT_PORT_MSG_REQ_HEADERS,
NXT_PORT_MSG_REQ_BODY = _NXT_PORT_MSG_REQ_BODY,
NXT_PORT_MSG_WEBSOCKET = _NXT_PORT_MSG_WEBSOCKET,
NXT_PORT_MSG_WEBSOCKET_LAST = nxt_msg_last(_NXT_PORT_MSG_WEBSOCKET),
@ -132,6 +139,8 @@ typedef enum {
NXT_PORT_MSG_OOSM = nxt_msg_last(_NXT_PORT_MSG_OOSM),
NXT_PORT_MSG_SHM_ACK = nxt_msg_last(_NXT_PORT_MSG_SHM_ACK),
NXT_PORT_MSG_READ_QUEUE = _NXT_PORT_MSG_READ_QUEUE,
NXT_PORT_MSG_READ_SOCKET = _NXT_PORT_MSG_READ_SOCKET,
} nxt_port_msg_type_t;
@ -236,6 +245,12 @@ struct nxt_port_s {
nxt_atomic_t use_count;
nxt_process_type_t type;
nxt_fd_t queue_fd;
void *queue;
void *socket_msg;
int from_socket;
};
@ -286,17 +301,17 @@ void nxt_port_write_enable(nxt_task_t *task, nxt_port_t *port);
void nxt_port_write_close(nxt_port_t *port);
void nxt_port_read_enable(nxt_task_t *task, nxt_port_t *port);
void nxt_port_read_close(nxt_port_t *port);
nxt_int_t nxt_port_socket_twrite(nxt_task_t *task, nxt_port_t *port,
nxt_uint_t type, nxt_fd_t fd, uint32_t stream, nxt_port_id_t reply_port,
nxt_buf_t *b, void *tracking);
nxt_int_t nxt_port_socket_write2(nxt_task_t *task, nxt_port_t *port,
nxt_uint_t type, nxt_fd_t fd, nxt_fd_t fd2, uint32_t stream,
nxt_port_id_t reply_port, nxt_buf_t *b);
nxt_inline nxt_int_t
nxt_port_socket_write(nxt_task_t *task, nxt_port_t *port,
nxt_uint_t type, nxt_fd_t fd, uint32_t stream, nxt_port_id_t reply_port,
nxt_buf_t *b)
{
return nxt_port_socket_twrite(task, port, type, fd, stream, reply_port, b,
NULL);
return nxt_port_socket_write2(task, port, type, fd, -1, stream, reply_port,
b);
}
void nxt_port_enable(nxt_task_t *task, nxt_port_t *port,

102
src/nxt_port_queue.h Normal file
View file

@ -0,0 +1,102 @@
/*
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_PORT_QUEUE_H_INCLUDED_
#define _NXT_PORT_QUEUE_H_INCLUDED_
#include <nxt_nncq.h>
/* Using Numeric Naive Circular Queue as a backend. */
#define NXT_PORT_QUEUE_SIZE NXT_NNCQ_SIZE
#define NXT_PORT_QUEUE_MSG_SIZE 31
typedef struct {
uint8_t size;
uint8_t data[NXT_PORT_QUEUE_MSG_SIZE];
} nxt_port_queue_item_t;
typedef struct {
nxt_nncq_atomic_t nitems;
nxt_nncq_t free_items;
nxt_nncq_t queue;
nxt_port_queue_item_t items[NXT_PORT_QUEUE_SIZE];
} nxt_port_queue_t;
nxt_inline void
nxt_port_queue_init(nxt_port_queue_t volatile *q)
{
nxt_nncq_atomic_t i;
nxt_nncq_init(&q->free_items);
nxt_nncq_init(&q->queue);
for (i = 0; i < NXT_PORT_QUEUE_SIZE; i++) {
nxt_nncq_enqueue(&q->free_items, i);
}
q->nitems = 0;
}
nxt_inline nxt_int_t
nxt_port_queue_send(nxt_port_queue_t volatile *q, const void *p, uint8_t size,
int *notify)
{
nxt_nncq_atomic_t i;
nxt_port_queue_item_t *qi;
i = nxt_nncq_dequeue(&q->free_items);
if (i == nxt_nncq_empty(&q->free_items)) {
*notify = 0;
return NXT_AGAIN;
}
qi = (nxt_port_queue_item_t *) &q->items[i];
qi->size = size;
nxt_memcpy(qi->data, p, size);
nxt_nncq_enqueue(&q->queue, i);
i = nxt_atomic_fetch_add(&q->nitems, 1);
*notify = (i == 0);
return NXT_OK;
}
nxt_inline ssize_t
nxt_port_queue_recv(nxt_port_queue_t volatile *q, void *p)
{
ssize_t res;
nxt_nncq_atomic_t i;
nxt_port_queue_item_t *qi;
i = nxt_nncq_dequeue(&q->queue);
if (i == nxt_nncq_empty(&q->queue)) {
return -1;
}
qi = (nxt_port_queue_item_t *) &q->items[i];
res = qi->size;
nxt_memcpy(p, qi->data, qi->size);
nxt_nncq_enqueue(&q->free_items, i);
nxt_atomic_fetch_add(&q->nitems, -1);
return res;
}
#endif /* _NXT_PORT_QUEUE_H_INCLUDED_ */

View file

@ -5,6 +5,7 @@
*/
#include <nxt_main.h>
#include <nxt_port_queue.h>
static nxt_int_t nxt_port_msg_chk_insert(nxt_task_t *task, nxt_port_t *port,
@ -17,6 +18,8 @@ static nxt_buf_t *nxt_port_buf_completion(nxt_task_t *task,
static nxt_port_send_msg_t *nxt_port_msg_insert_tail(nxt_port_t *port,
nxt_port_send_msg_t *msg);
static void nxt_port_read_handler(nxt_task_t *task, void *obj, void *data);
static void nxt_port_queue_read_handler(nxt_task_t *task, void *obj,
void *data);
static void nxt_port_read_msg_process(nxt_task_t *task, nxt_port_t *port,
nxt_port_recv_msg_t *msg);
static nxt_buf_t *nxt_port_buf_alloc(nxt_port_t *port);
@ -143,12 +146,15 @@ nxt_port_release_send_msg(nxt_port_send_msg_t *msg)
nxt_int_t
nxt_port_socket_twrite(nxt_task_t *task, nxt_port_t *port, nxt_uint_t type,
nxt_fd_t fd, uint32_t stream, nxt_port_id_t reply_port, nxt_buf_t *b,
void *tracking)
nxt_port_socket_write2(nxt_task_t *task, nxt_port_t *port, nxt_uint_t type,
nxt_fd_t fd, nxt_fd_t fd2, uint32_t stream, nxt_port_id_t reply_port,
nxt_buf_t *b)
{
int notify;
uint8_t *p;
nxt_int_t res;
nxt_port_send_msg_t msg;
uint8_t qmsg[NXT_PORT_QUEUE_MSG_SIZE];
msg.link.next = NULL;
msg.link.prev = NULL;
@ -156,14 +162,10 @@ nxt_port_socket_twrite(nxt_task_t *task, nxt_port_t *port, nxt_uint_t type,
msg.buf = b;
msg.share = 0;
msg.fd = fd;
msg.fd2 = -1;
msg.fd2 = fd2;
msg.close_fd = (type & NXT_PORT_MSG_CLOSE_FD) != 0;
msg.allocated = 0;
if (tracking != NULL) {
nxt_port_mmap_tracking_write(msg.tracking_msg, tracking);
}
msg.port_msg.stream = stream;
msg.port_msg.pid = nxt_pid;
msg.port_msg.reply_port = reply_port;
@ -172,7 +174,42 @@ nxt_port_socket_twrite(nxt_task_t *task, nxt_port_t *port, nxt_uint_t type,
msg.port_msg.mmap = 0;
msg.port_msg.nf = 0;
msg.port_msg.mf = 0;
msg.port_msg.tracking = tracking != NULL;
if (port->queue != NULL && type != _NXT_PORT_MSG_READ_QUEUE) {
if (fd == -1
&& (b == NULL
|| nxt_buf_mem_used_size(&b->mem)
<= (int) (NXT_PORT_QUEUE_MSG_SIZE - sizeof(nxt_port_msg_t))))
{
p = nxt_cpymem(qmsg, &msg.port_msg, sizeof(nxt_port_msg_t));
if (b != NULL) {
p = nxt_cpymem(p, b->mem.pos, nxt_buf_mem_used_size(&b->mem));
}
res = nxt_port_queue_send(port->queue, qmsg, p - qmsg, &notify);
nxt_debug(task, "port{%d,%d} %d: enqueue %d notify %d, %d",
(int) port->pid, (int) port->id, port->socket.fd,
(int) (p - qmsg), notify, res);
if (notify == 0) {
return res;
}
msg.port_msg.type = _NXT_PORT_MSG_READ_QUEUE;
msg.buf = NULL;
} else {
qmsg[0] = _NXT_PORT_MSG_READ_SOCKET;
res = nxt_port_queue_send(port->queue, qmsg, 1, &notify);
nxt_debug(task, "port{%d,%d} %d: enqueue 1 notify %d, %d",
(int) port->pid, (int) port->id, port->socket.fd,
notify, res);
}
}
res = nxt_port_msg_chk_insert(task, port, &msg);
if (nxt_fast_path(res == NXT_DECLINED)) {
@ -308,10 +345,6 @@ nxt_port_write_handler(nxt_task_t *task, void *obj, void *data)
port->max_size / PORT_MMAP_MIN_SIZE);
}
if (msg->port_msg.tracking) {
iov[0].iov_len += sizeof(msg->tracking_msg);
}
sb.limit -= iov[0].iov_len;
nxt_sendbuf_mem_coalesce(task, &sb);
@ -368,7 +401,6 @@ nxt_port_write_handler(nxt_task_t *task, void *obj, void *data)
msg->fd2 = -1;
msg->share += n;
msg->port_msg.nf = 1;
msg->port_msg.tracking = 0;
if (msg->share >= port->max_share) {
msg->share = 0;
@ -576,7 +608,9 @@ nxt_port_read_enable(nxt_task_t *task, nxt_port_t *port)
port->engine = task->thread->engine;
port->socket.read_work_queue = &port->engine->fast_work_queue;
port->socket.read_handler = nxt_port_read_handler;
port->socket.read_handler = port->queue != NULL
? nxt_port_queue_read_handler
: nxt_port_read_handler;
port->socket.error_handler = nxt_port_error_handler;
nxt_fd_event_enable_read(port->engine, &port->socket);
@ -660,6 +694,206 @@ nxt_port_read_handler(nxt_task_t *task, void *obj, void *data)
}
static void
nxt_port_queue_read_handler(nxt_task_t *task, void *obj, void *data)
{
ssize_t n;
nxt_buf_t *b;
nxt_port_t *port;
struct iovec iov[2];
nxt_port_queue_t *queue;
nxt_port_recv_msg_t msg, *smsg;
uint8_t qmsg[NXT_PORT_QUEUE_MSG_SIZE];
port = nxt_container_of(obj, nxt_port_t, socket);
msg.port = port;
nxt_assert(port->engine == task->thread->engine);
queue = port->queue;
nxt_atomic_fetch_add(&queue->nitems, 1);
for ( ;; ) {
if (port->from_socket == 0) {
n = nxt_port_queue_recv(queue, qmsg);
if (n < 0 && !port->socket.read_ready) {
nxt_atomic_fetch_add(&queue->nitems, -1);
n = nxt_port_queue_recv(queue, qmsg);
if (n < 0) {
return;
}
nxt_atomic_fetch_add(&queue->nitems, 1);
}
if (n == 1 && qmsg[0] == _NXT_PORT_MSG_READ_SOCKET) {
port->from_socket++;
nxt_debug(task, "port{%d,%d} %d: dequeue 1 read_socket %d",
(int) port->pid, (int) port->id, port->socket.fd,
port->from_socket);
n = -1;
continue;
}
nxt_debug(task, "port{%d,%d} %d: dequeue %d",
(int) port->pid, (int) port->id, port->socket.fd,
(int) n);
} else {
if ((smsg = port->socket_msg) != NULL && smsg->size != 0) {
msg.port_msg = smsg->port_msg;
b = smsg->buf;
n = smsg->size;
msg.fd = smsg->fd;
msg.fd2 = smsg->fd2;
smsg->size = 0;
port->from_socket--;
nxt_debug(task, "port{%d,%d} %d: use suspended message %d",
(int) port->pid, (int) port->id, port->socket.fd,
(int) n);
goto process;
}
n = -1;
}
if (n < 0 && !port->socket.read_ready) {
nxt_atomic_fetch_add(&queue->nitems, -1);
return;
}
b = nxt_port_buf_alloc(port);
if (nxt_slow_path(b == NULL)) {
/* TODO: disable event for some time */
}
if (n >= (ssize_t) sizeof(nxt_port_msg_t)) {
nxt_memcpy(&msg.port_msg, qmsg, sizeof(nxt_port_msg_t));
if (n > (ssize_t) sizeof(nxt_port_msg_t)) {
nxt_memcpy(b->mem.pos, qmsg + sizeof(nxt_port_msg_t),
n - sizeof(nxt_port_msg_t));
}
} else {
iov[0].iov_base = &msg.port_msg;
iov[0].iov_len = sizeof(nxt_port_msg_t);
iov[1].iov_base = b->mem.pos;
iov[1].iov_len = port->max_size;
n = nxt_socketpair_recv(&port->socket, &msg.fd, iov, 2);
if (n == (ssize_t) sizeof(nxt_port_msg_t)
&& msg.port_msg.type == _NXT_PORT_MSG_READ_QUEUE)
{
nxt_port_buf_free(port, b);
nxt_debug(task, "port{%d,%d} %d: recv %d read_queue",
(int) port->pid, (int) port->id, port->socket.fd,
(int) n);
continue;
}
nxt_debug(task, "port{%d,%d} %d: recvmsg %d",
(int) port->pid, (int) port->id, port->socket.fd,
(int) n);
if (n > 0) {
if (port->from_socket == 0) {
nxt_debug(task, "port{%d,%d} %d: suspend message %d",
(int) port->pid, (int) port->id, port->socket.fd,
(int) n);
smsg = port->socket_msg;
if (nxt_slow_path(smsg == NULL)) {
smsg = nxt_mp_alloc(port->mem_pool,
sizeof(nxt_port_recv_msg_t));
if (nxt_slow_path(smsg == NULL)) {
nxt_alert(task, "port{%d,%d} %d: suspend message "
"failed",
(int) port->pid, (int) port->id,
port->socket.fd);
return;
}
port->socket_msg = smsg;
} else {
if (nxt_slow_path(smsg->size != 0)) {
nxt_alert(task, "port{%d,%d} %d: too many suspend "
"messages",
(int) port->pid, (int) port->id,
port->socket.fd);
return;
}
}
smsg->port_msg = msg.port_msg;
smsg->buf = b;
smsg->size = n;
smsg->fd = msg.fd;
smsg->fd2 = msg.fd2;
continue;
}
port->from_socket--;
}
}
process:
if (n > 0) {
msg.buf = b;
msg.size = n;
nxt_port_read_msg_process(task, port, &msg);
/*
* To disable instant completion or buffer re-usage,
* handler should reset 'msg.buf'.
*/
if (msg.buf == b) {
nxt_port_buf_free(port, b);
}
continue;
}
if (n == NXT_AGAIN) {
nxt_port_buf_free(port, b);
nxt_fd_event_enable_read(task->thread->engine, &port->socket);
continue;
}
/* n == 0 || n == NXT_ERROR */
nxt_work_queue_add(&task->thread->engine->fast_work_queue,
nxt_port_error_handler, task, &port->socket, NULL);
return;
}
}
typedef struct {
uint32_t stream;
uint32_t pid;
@ -831,12 +1065,7 @@ nxt_port_read_msg_process(nxt_task_t *task, nxt_port_t *port,
b = orig_b = msg->buf;
b->mem.free += msg->size;
if (msg->port_msg.tracking) {
msg->cancelled = nxt_port_mmap_tracking_read(task, msg) == 0;
} else {
msg->cancelled = 0;
}
msg->cancelled = 0;
if (nxt_slow_path(msg->port_msg.nf != 0)) {

View file

@ -15,6 +15,8 @@
#include <nxt_unit_request.h>
#include <nxt_unit_response.h>
#include <nxt_router_request.h>
#include <nxt_app_queue.h>
#include <nxt_port_queue.h>
typedef struct {
nxt_str_t type;
@ -92,6 +94,12 @@ static nxt_int_t nxt_router_conf_create(nxt_task_t *task,
static nxt_int_t nxt_router_conf_process_static(nxt_task_t *task,
nxt_router_conf_t *rtcf, nxt_conf_value_t *conf);
static nxt_app_t *nxt_router_app_find(nxt_queue_t *queue, nxt_str_t *name);
static nxt_int_t nxt_router_app_queue_init(nxt_task_t *task,
nxt_port_t *port);
static nxt_int_t nxt_router_port_queue_init(nxt_task_t *task,
nxt_port_t *port);
static nxt_int_t nxt_router_port_queue_map(nxt_task_t *task,
nxt_port_t *port, nxt_fd_t fd);
static void nxt_router_listen_socket_rpc_create(nxt_task_t *task,
nxt_router_temp_conf_t *tmcf, nxt_socket_conf_t *skcf);
static void nxt_router_listen_socket_ready(nxt_task_t *task,
@ -473,21 +481,25 @@ nxt_router_start_app_process(nxt_task_t *task, nxt_app_t *app)
nxt_inline nxt_bool_t
nxt_router_msg_cancel(nxt_task_t *task, nxt_msg_info_t *msg_info,
uint32_t stream)
nxt_router_msg_cancel(nxt_task_t *task, nxt_request_rpc_data_t *req_rpc_data)
{
nxt_buf_t *b, *next;
nxt_bool_t cancelled;
nxt_buf_t *b, *next;
nxt_bool_t cancelled;
nxt_msg_info_t *msg_info;
msg_info = &req_rpc_data->msg_info;
if (msg_info->buf == NULL) {
return 0;
}
cancelled = nxt_port_mmap_tracking_cancel(task, &msg_info->tracking,
stream);
cancelled = nxt_app_queue_cancel(req_rpc_data->app->shared_port->queue,
msg_info->tracking_cookie,
req_rpc_data->stream);
if (cancelled) {
nxt_debug(task, "stream #%uD: cancelled by router", stream);
nxt_debug(task, "stream #%uD: cancelled by router",
req_rpc_data->stream);
}
for (b = msg_info->buf; b != NULL; b = next) {
@ -529,7 +541,7 @@ nxt_request_rpc_data_unlink(nxt_task_t *task,
{
nxt_http_request_t *r;
nxt_router_msg_cancel(task, &req_rpc_data->msg_info, req_rpc_data->stream);
nxt_router_msg_cancel(task, req_rpc_data);
if (req_rpc_data->app_port != NULL) {
nxt_router_app_port_release(task, req_rpc_data->app_port,
@ -573,6 +585,7 @@ nxt_request_rpc_data_unlink(nxt_task_t *task,
static void
nxt_router_new_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg)
{
nxt_int_t res;
nxt_app_t *app;
nxt_port_t *port, *main_app_port;
nxt_runtime_t *rt;
@ -592,6 +605,17 @@ nxt_router_new_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg)
}
msg->port_msg.type = _NXT_PORT_MSG_RPC_ERROR;
} else {
if (msg->fd2 != -1) {
res = nxt_router_port_queue_map(task, port, msg->fd2);
if (nxt_slow_path(res != NXT_OK)) {
return;
}
nxt_fd_close(msg->fd2);
msg->fd2 = -1;
}
}
if (msg->port_msg.stream != 0) {
@ -1523,6 +1547,12 @@ nxt_router_conf_create(nxt_task_t *task, nxt_router_temp_conf_t *tmcf,
return NXT_ERROR;
}
ret = nxt_router_app_queue_init(task, port);
if (nxt_slow_path(ret != NXT_OK)) {
nxt_port_use(task, port, -1);
return NXT_ERROR;
}
nxt_port_write_enable(task, port);
port->app = app;
@ -1828,6 +1858,82 @@ nxt_router_app_find(nxt_queue_t *queue, nxt_str_t *name)
}
static nxt_int_t
nxt_router_app_queue_init(nxt_task_t *task, nxt_port_t *port)
{
void *mem;
nxt_int_t fd;
fd = nxt_shm_open(task, sizeof(nxt_app_queue_t));
if (nxt_slow_path(fd == -1)) {
return NXT_ERROR;
}
mem = nxt_mem_mmap(NULL, sizeof(nxt_app_queue_t),
PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (nxt_slow_path(mem == MAP_FAILED)) {
nxt_fd_close(fd);
return NXT_ERROR;
}
nxt_app_queue_init(mem);
port->queue_fd = fd;
port->queue = mem;
return NXT_OK;
}
static nxt_int_t
nxt_router_port_queue_init(nxt_task_t *task, nxt_port_t *port)
{
void *mem;
nxt_int_t fd;
fd = nxt_shm_open(task, sizeof(nxt_port_queue_t));
if (nxt_slow_path(fd == -1)) {
return NXT_ERROR;
}
mem = nxt_mem_mmap(NULL, sizeof(nxt_port_queue_t),
PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (nxt_slow_path(mem == MAP_FAILED)) {
nxt_fd_close(fd);
return NXT_ERROR;
}
nxt_port_queue_init(mem);
port->queue_fd = fd;
port->queue = mem;
return NXT_OK;
}
static nxt_int_t
nxt_router_port_queue_map(nxt_task_t *task, nxt_port_t *port, nxt_fd_t fd)
{
void *mem;
nxt_assert(fd != -1);
mem = nxt_mem_mmap(NULL, sizeof(nxt_port_queue_t),
PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (nxt_slow_path(mem == MAP_FAILED)) {
return NXT_ERROR;
}
port->queue = mem;
return NXT_OK;
}
void
nxt_router_listener_application(nxt_router_temp_conf_t *tmcf, nxt_str_t *name,
nxt_http_action_t *action)
@ -2748,6 +2854,12 @@ nxt_router_thread_start(void *data)
return;
}
ret = nxt_router_port_queue_init(task, port);
if (nxt_slow_path(ret != NXT_OK)) {
nxt_port_use(task, port, -1);
return;
}
engine->port = port;
nxt_port_enable(task, port, &nxt_router_app_port_handlers);
@ -3670,6 +3782,7 @@ static void
nxt_router_req_headers_ack_handler(nxt_task_t *task,
nxt_port_recv_msg_t *msg, nxt_request_rpc_data_t *req_rpc_data)
{
int res;
nxt_app_t *app;
nxt_bool_t start_process;
nxt_port_t *app_port, *main_app_port, *idle_port;
@ -3752,6 +3865,24 @@ nxt_router_req_headers_ack_handler(nxt_task_t *task,
req_rpc_data->app_port = app_port;
if (req_rpc_data->msg_info.body_fd != -1) {
nxt_debug(task, "stream #%uD: send body fd %d", req_rpc_data->stream,
req_rpc_data->msg_info.body_fd);
lseek(req_rpc_data->msg_info.body_fd, 0, SEEK_SET);
res = nxt_port_socket_write(task, app_port, NXT_PORT_MSG_REQ_BODY,
req_rpc_data->msg_info.body_fd,
req_rpc_data->stream,
task->thread->engine->port->id, NULL);
if (nxt_slow_path(res != NXT_OK)) {
r = req_rpc_data->request;
nxt_http_request_error(task, r, NXT_HTTP_INTERNAL_SERVER_ERROR);
}
}
if (app->timeout != 0) {
r = req_rpc_data->request;
@ -3886,10 +4017,10 @@ nxt_router_app_shared_port_send(nxt_task_t *task, nxt_port_t *app_port)
msg->max_share = port->max_share;
msg->type = port->type;
return nxt_port_socket_twrite(task, app_port,
return nxt_port_socket_write2(task, app_port,
NXT_PORT_MSG_NEW_PORT,
port->pair[0],
0, 0, b, NULL);
port->pair[0], port->queue_fd,
0, 0, b);
}
@ -4522,6 +4653,13 @@ nxt_router_app_prepare_request(nxt_task_t *task,
nxt_int_t res;
nxt_port_t *port, *reply_port;
int notify;
struct {
nxt_port_msg_t pm;
nxt_port_mmap_msg_t mm;
} msg;
app = req_rpc_data->app;
nxt_assert(app != NULL);
@ -4529,6 +4667,7 @@ nxt_router_app_prepare_request(nxt_task_t *task,
port = req_rpc_data->app_port;
nxt_assert(port != NULL);
nxt_assert(port->queue != NULL);
reply_port = task->thread->engine->port;
@ -4569,20 +4708,38 @@ nxt_router_app_prepare_request(nxt_task_t *task,
req_rpc_data->msg_info.body_fd = -1;
}
if (req_rpc_data->msg_info.body_fd != -1) {
nxt_debug(task, "stream #%uD: send body fd %d", req_rpc_data->stream,
req_rpc_data->msg_info.body_fd);
msg.pm.stream = req_rpc_data->stream;
msg.pm.pid = reply_port->pid;
msg.pm.reply_port = reply_port->id;
msg.pm.type = NXT_PORT_MSG_REQ_HEADERS;
msg.pm.last = 0;
msg.pm.mmap = 1;
msg.pm.nf = 0;
msg.pm.mf = 0;
msg.pm.tracking = 0;
lseek(req_rpc_data->msg_info.body_fd, 0, SEEK_SET);
}
nxt_port_mmap_handler_t *mmap_handler = buf->parent;
nxt_port_mmap_header_t *hdr = mmap_handler->hdr;
res = nxt_port_socket_twrite(task, port,
NXT_PORT_MSG_REQ_HEADERS,
req_rpc_data->msg_info.body_fd,
req_rpc_data->stream, reply_port->id, buf,
NULL);
msg.mm.mmap_id = hdr->id;
msg.mm.chunk_id = nxt_port_mmap_chunk_id(hdr, buf->mem.pos);
msg.mm.size = nxt_buf_used_size(buf);
if (nxt_slow_path(res != NXT_OK)) {
res = nxt_app_queue_send(port->queue, &msg, sizeof(msg),
req_rpc_data->stream, &notify,
&req_rpc_data->msg_info.tracking_cookie);
if (nxt_fast_path(res == NXT_OK)) {
if (notify != 0) {
(void) nxt_port_socket_write(task, port,
NXT_PORT_MSG_READ_QUEUE,
-1, req_rpc_data->stream,
reply_port->id, NULL);
} else {
nxt_debug(task, "queue is not empty");
}
} else {
nxt_alert(task, "stream #%uD, app '%V': failed to send app message",
req_rpc_data->stream, &app->name);

View file

@ -7,10 +7,10 @@
#define _NXT_ROUTER_REQUEST_H_INCLUDED_
typedef struct nxt_msg_info_s {
typedef struct {
nxt_buf_t *buf;
nxt_fd_t body_fd;
nxt_port_mmap_tracking_t tracking;
uint32_t tracking_cookie;
nxt_work_handler_t completion_handler;
} nxt_msg_info_t;

File diff suppressed because it is too large Load diff

View file

@ -121,6 +121,8 @@ struct nxt_unit_callbacks_s {
*/
void (*request_handler)(nxt_unit_request_info_t *req);
void (*data_handler)(nxt_unit_request_info_t *req);
/* Process websocket frame. */
void (*websocket_handler)(nxt_unit_websocket_frame_t *ws);