Skip to content

Commit d9e3fbe

Browse files
committed
Optimize IO handling by draining FDs on every wake-up.
A single IO wake-up can correspond to multiple actual IO events/waiting IO. Currently, after handling a single event we go back to waiting on the FD, where we will be immediatly woke again because of the already waiting IO. This increases context switches and can increase latency. By handling all the IO possible on every wakeup before waiting again we can reduce both of these.
1 parent 950a6b6 commit d9e3fbe

File tree

4 files changed

+55
-42
lines changed

4 files changed

+55
-42
lines changed

ipc.c

+20-17
Original file line numberDiff line numberDiff line change
@@ -278,26 +278,29 @@ void ipc_handle_job(int fd)
278278
ipc_job job;
279279
int n;
280280

281-
/* read one IPC job from the pipe; even if the read is blocking,
282-
* we are here triggered from the reactor, on a READ event, so
283-
* we shouldn;t ever block */
284-
n = read(fd, &job, sizeof(job) );
285-
if (n==-1) {
286-
if (errno==EAGAIN || errno==EINTR || errno==EWOULDBLOCK )
281+
//Process all jobs until handle is drained
282+
while (1) {
283+
/* read one IPC job from the pipe; even if the read is blocking,
284+
* we are here triggered from the reactor, on a READ event, so
285+
* we shouldn;t ever block */
286+
n = read(fd, &job, sizeof(job) );
287+
if (n==-1) {
288+
if (errno==EAGAIN || errno==EINTR || errno==EWOULDBLOCK )
289+
return;
290+
LM_ERR("read failed:[%d] %s\n", errno, strerror(errno));
287291
return;
288-
LM_ERR("read failed:[%d] %s\n", errno, strerror(errno));
289-
return;
290-
}
292+
}
291293

292-
LM_DBG("received job type %d[%s] from process %d\n",
293-
job.handler_type, ipc_handlers[job.handler_type].name, job.snd_proc);
294+
LM_DBG("received job type %d[%s] from process %d\n",
295+
job.handler_type, ipc_handlers[job.handler_type].name, job.snd_proc);
294296

295-
/* custom handling for RPC type */
296-
if (job.handler_type==ipc_rpc_type) {
297-
((ipc_rpc_f*)job.payload1)( job.snd_proc, job.payload2);
298-
} else {
299-
/* generic registered type */
300-
ipc_handlers[job.handler_type].func( job.snd_proc, job.payload1);
297+
/* custom handling for RPC type */
298+
if (job.handler_type==ipc_rpc_type) {
299+
((ipc_rpc_f*)job.payload1)( job.snd_proc, job.payload2);
300+
} else {
301+
/* generic registered type */
302+
ipc_handlers[job.handler_type].func( job.snd_proc, job.payload1);
303+
}
301304
}
302305

303306
return;

net/net_udp.c

+9-2
Original file line numberDiff line numberDiff line change
@@ -292,8 +292,11 @@ inline static int handle_io(struct fd_map* fm, int idx,int event_type)
292292

293293
switch(fm->type){
294294
case F_UDP_READ:
295-
n = protos[((struct socket_info*)fm->data)->proto].net.
296-
read( fm->data /*si*/, &read);
295+
do {
296+
n = protos[((struct socket_info*)fm->data)->proto].net.
297+
read( fm->data /*si*/, &read);
298+
//Continue reading packets until we get an error
299+
} while (n == 0);
297300
break;
298301
case F_TIMER_JOB:
299302
handle_timer_job();
@@ -327,6 +330,10 @@ inline static int handle_io(struct fd_map* fm, int idx,int event_type)
327330
post_run_handle_script_reload();
328331

329332
pt_become_idle();
333+
334+
if (n == 1) {
335+
n = 0;
336+
}
330337
return n;
331338
}
332339

net/proto_udp/proto_udp.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -135,9 +135,9 @@ static int udp_read_req(struct socket_info *si, int* bytes_read)
135135
/* coverity[overrun-buffer-arg: FALSE] - union has 28 bytes, CID #200029 */
136136
len=recvfrom(bind_address->socket, buf, BUF_SIZE,0,&ri.src_su.s,&fromlen);
137137
if (len==-1){
138-
if (errno==EAGAIN)
139-
return 0;
140-
if ((errno==EINTR)||(errno==EWOULDBLOCK)|| (errno==ECONNREFUSED))
138+
if (errno==EAGAIN || errno==EWOULDBLOCK || errno==EINTR)
139+
return 1;
140+
if (errno==ECONNREFUSED)
141141
return -1;
142142
LM_ERR("recvfrom:[%d] %s\n", errno, strerror(errno));
143143
return -2;

timer.c

+23-20
Original file line numberDiff line numberDiff line change
@@ -840,32 +840,35 @@ void handle_timer_job(void)
840840
struct os_timer *t;
841841
ssize_t l;
842842

843-
/* read one "os_timer" pointer from the pipe (non-blocking) */
844-
l = read( timer_fd_out, &t, sizeof(t) );
845-
if (l==-1) {
846-
if (errno==EAGAIN || errno==EINTR || errno==EWOULDBLOCK )
843+
/* Read events until epipe is empty */
844+
while(1) {
845+
/* read one "os_timer" pointer from the pipe (non-blocking) */
846+
l = read( timer_fd_out, &t, sizeof(t) );
847+
if (l==-1) {
848+
if (errno==EAGAIN || errno==EINTR || errno==EWOULDBLOCK )
849+
return;
850+
LM_ERR("read failed:[%d] %s\n", errno, strerror(errno));
847851
return;
848-
LM_ERR("read failed:[%d] %s\n", errno, strerror(errno));
849-
return;
850-
}
852+
}
851853

852-
/* run the handler */
853-
if (t->flags&TIMER_FLAG_IS_UTIMER) {
854+
/* run the handler */
855+
if (t->flags&TIMER_FLAG_IS_UTIMER) {
854856

855-
if (t->trigger_time<(*ijiffies-ITIMER_TICK) )
856-
LM_WARN("utimer job <%s> has a %lld us delay in execution\n",
857-
t->label, *ijiffies-t->trigger_time);
858-
t->u.utimer_f( t->time , t->t_param);
859-
t->trigger_time = 0;
857+
if (t->trigger_time<(*ijiffies-ITIMER_TICK) )
858+
LM_WARN("utimer job <%s> has a %lld us delay in execution\n",
859+
t->label, *ijiffies-t->trigger_time);
860+
t->u.utimer_f( t->time , t->t_param);
861+
t->trigger_time = 0;
860862

861-
} else {
863+
} else {
862864

863-
if (t->trigger_time<(*ijiffies-ITIMER_TICK) )
864-
LM_WARN("timer job <%s> has a %lld us delay in execution\n",
865-
t->label, *ijiffies-t->trigger_time);
866-
t->u.timer_f( (unsigned int)t->time , t->t_param);
867-
t->trigger_time = 0;
865+
if (t->trigger_time<(*ijiffies-ITIMER_TICK) )
866+
LM_WARN("timer job <%s> has a %lld us delay in execution\n",
867+
t->label, *ijiffies-t->trigger_time);
868+
t->u.timer_f( (unsigned int)t->time , t->t_param);
869+
t->trigger_time = 0;
868870

871+
}
869872
}
870873

871874
return;

0 commit comments

Comments
 (0)