httpd: fix for POSTDATA handling bugs:

erroneous close(0)
full_read -> safe_read (with explanation)
This commit is contained in:
Denis Vlasenko 2007-02-11 19:51:06 +00:00
parent 6089c2c31e
commit a3055846a4

View File

@ -950,7 +950,6 @@ static int getLine(void)
* (int bodyLen) . . . . . . . . Length of the post body.
* (const char *cookie) . . . . . For set HTTP_COOKIE.
* (const char *content_type) . . For set CONTENT_TYPE.
*
* $Return: (char *) . . . . A pointer to the decoded string (same as input).
*
@ -984,31 +983,32 @@ static int sendCgi(const char *url,
if (!pid) {
/* child process */
char *script;
char *purl = xstrdup(url);
char *purl;
char realpath_buff[MAXPATHLEN];
if (purl == NULL)
_exit(242);
if (config->accepted_socket > 1)
close(config->accepted_socket);
if (config->server_socket > 1)
close(config->server_socket);
inFd = toCgi[0];
outFd = fromCgi[1];
dup2(inFd, 0); // replace stdin with the pipe
dup2(outFd, 1); // replace stdout with the pipe
dup2(toCgi[0], 0); // replace stdin with the pipe
dup2(fromCgi[1], 1); // replace stdout with the pipe
/* Huh? User seeing stderr can be a security problem...
* and if cgi really wants that, it can always dup2(1,2)...
if (!DEBUG)
dup2(outFd, 2); // replace stderr with the pipe
dup2(fromCgi[1], 2); // replace stderr with the pipe
*/
/* I think we cannot inadvertently close 0, 1 here... */
close(toCgi[0]);
close(toCgi[1]);
close(fromCgi[0]);
close(fromCgi[1]);
close(config->accepted_socket);
close(config->server_socket);
/*
* Find PATH_INFO.
*/
xfunc_error_retval = 242;
purl = xstrdup(url);
script = purl;
while ((script = strchr(script + 1, '/')) != NULL) {
/* have script.cgi/PATH_INFO or dirs/script.cgi[/PATH_INFO] */
@ -1210,33 +1210,33 @@ static int sendCgi(const char *url,
#if PIPESIZE >= MAX_MEMORY_BUFF
# error "PIPESIZE >= MAX_MEMORY_BUFF"
#endif
/* NB: was safe_read. If it *has to be* safe_read, */
/* please explain why in this comment... */
count = full_read(inFd, rbuf, PIPESIZE);
/* Must use safe_read, not full_read, because
* cgi may output a few first lines and then wait
* for POSTDATA without closing stdout.
* With full_read we may wait here forever. */
count = safe_read(inFd, rbuf, PIPESIZE);
if (count == 0)
break; /* closed */
if (count < 0)
continue; /* huh, error, why continue?? */
if (firstLine) {
/* full_read (above) avoids
* "chopped up into small chunks" syndrome here */
static const char HTTP_200[] = "HTTP/1.0 200 OK\r\n\r\n";
rbuf[count] = '\0';
/* check to see if the user script added headers */
#define HTTP_200 "HTTP/1.0 200 OK\r\n\r\n"
/* (NB: buggy wrt cgi splitting "HTTP OK") */
if (memcmp(rbuf, HTTP_200, 4) != 0) {
/* there is no "HTTP", do it ourself */
full_write(s, HTTP_200, sizeof(HTTP_200)-1);
}
#undef HTTP_200
/* Example of valid GCI without "Content-type:"
* echo -en "HTTP/1.0 302 Found\r\n"
* echo -en "Location: http://www.busybox.net\r\n"
* echo -en "\r\n"
if (!strstr(rbuf, "ontent-")) {
full_write(s, "Content-type: text/plain\r\n\r\n", 28);
}
*/
//if (!strstr(rbuf, "ontent-")) {
// full_write(s, "Content-type: text/plain\r\n\r\n", 28);
//}
firstLine = 0;
}
if (full_write(s, rbuf, count) != count)