[git commit] ip link: fix mismatched enums in vlan_parse_opt(), closes 11631

Denys Vlasenko vda.linux at googlemail.com
Tue Jan 22 08:34:07 UTC 2019


commit: https://git.busybox.net/busybox/commit/?id=0dda736ccf650789b296e1ec0122012f1280deb5
branch: https://git.busybox.net/busybox/commit/?id=refs/heads/master

Signed-off-by: Denys Vlasenko <vda.linux at googlemail.com>
---
 networking/wget.c | 76 ++++++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 55 insertions(+), 21 deletions(-)

diff --git a/networking/wget.c b/networking/wget.c
index fa4d21afd..141c2d848 100644
--- a/networking/wget.c
+++ b/networking/wget.c
@@ -122,16 +122,15 @@
 
 //usage:#define wget_trivial_usage
 //usage:	IF_FEATURE_WGET_LONG_OPTIONS(
-//usage:       "[-c|--continue] [--spider] [-q|--quiet] [-O|--output-document FILE]\n"
-//usage:       "	[-o|--output-file FILE] [--header 'header: value'] [-Y|--proxy on/off]\n"
+//usage:       "[-cqS] [--spider] [-O FILE] [-o FILE] [--header 'header: value']\n"
 /* Since we ignore these opts, we don't show them in --help */
-/* //usage:    "	[--no-check-certificate] [--no-cache] [--passive-ftp] [-t TRIES]" */
+/* //usage:    "	[--no-check-certificate] [--no-cache] [--passive-ftp]" */
 /* //usage:    "	[-nv] [-nc] [-nH] [-np]" */
-//usage:       "	[-P DIR] [-S|--server-response] [-U|--user-agent AGENT]" IF_FEATURE_WGET_TIMEOUT(" [-T SEC]") " URL..."
+//usage:       "	[-Y on/off] [-P DIR] [-U AGENT]" IF_FEATURE_WGET_TIMEOUT(" [-T SEC] [-t TRIES]") " URL..."
 //usage:	)
 //usage:	IF_NOT_FEATURE_WGET_LONG_OPTIONS(
-//usage:       "[-cq] [-O FILE] [-o FILE] [-Y on/off] [-P DIR] [-S] [-U AGENT]"
-//usage:			IF_FEATURE_WGET_TIMEOUT(" [-T SEC]") " URL..."
+//usage:       "[-cqS] [-O FILE] [-o FILE] [-Y on/off] [-P DIR] [-U AGENT]"
+//usage:			IF_FEATURE_WGET_TIMEOUT(" [-T SEC] [-t TRIES]") " URL..."
 //usage:	)
 //usage:#define wget_full_usage "\n\n"
 //usage:       "Retrieve files via HTTP or FTP\n"
@@ -145,6 +144,7 @@
 //usage:     "\n	-S    		Show server response"
 //usage:	IF_FEATURE_WGET_TIMEOUT(
 //usage:     "\n	-T SEC		Network read timeout is SEC seconds"
+//usage:     "\n	-t TRIES	Retry on errors (0:infinite)"
 //usage:	)
 //usage:     "\n	-O FILE		Save to FILE ('-' for stdout)"
 //usage:     "\n	-o FILE		Log messages to FILE"
@@ -239,11 +239,13 @@ struct globals {
 	int log_fd;
 	int o_flags;
 #if ENABLE_FEATURE_WGET_TIMEOUT
+	unsigned retries;
 	unsigned timeout_seconds;
 	smallint die_if_timed_out;
 #endif
 	smallint chunked;         /* chunked transfer encoding */
 	smallint got_clen;        /* got content-length: from server  */
+	smalluint exitcode;
 	/* Local downloads do benefit from big buffer.
 	 * With 512 byte buffer, it was measured to be
 	 * an order of magnitude slower than with big one.
@@ -444,8 +446,10 @@ static char* sanitize_string(char *s)
 	return s;
 }
 
-/* Returns '\n' if it was seen, else '\0'. Trims at first '\r' or '\n' */
-static char fgets_trim_sanitize(FILE *fp, const char *fmt)
+/* Returns '\n' if it was seen, -1 if timed out, else '\0'.
+ * Trims at first control char (except tab).
+ */
+static int fgets_trim_sanitize(FILE *fp, const char *fmt)
 {
 	char c;
 	char *buf_ptr;
@@ -471,7 +475,7 @@ static char fgets_trim_sanitize(FILE *fp, const char *fmt)
 	if (fmt && (option_mask32 & WGET_OPT_SERVER_RESPONSE))
 		fprintf(stderr, fmt, G.wget_buf);
 
-	return c;
+	return bb_got_signal ? -1 : c;
 }
 
 static int ftpcmd(const char *s1, const char *s2, FILE *fp)
@@ -491,7 +495,10 @@ static int ftpcmd(const char *s1, const char *s2, FILE *fp)
 	/* Read until "Nxx something" is received */
 	G.wget_buf[3] = 0;
 	do {
-		fgets_trim_sanitize(fp, "%s\n");
+		if (fgets_trim_sanitize(fp, "%s\n") == -1) {
+			strcpy(G.wget_buf, "timeout");
+			return -1;
+		}
 	} while (!isdigit(G.wget_buf[0]) || G.wget_buf[3] != ' ');
 
 	G.wget_buf[3] = '\0';
@@ -779,6 +786,8 @@ static FILE* prepare_ftp_session(FILE **dfpp, struct host_info *target, len_and_
 	int port;
 
 	sfp = open_socket(lsa);
+if (!sfp)
+ ...
 #if ENABLE_FEATURE_WGET_HTTPS
 	if (target->protocol == P_FTPS)
 		spawn_ssl_client(target->host, fileno(sfp), TLSLOOP_EXIT_ON_LOCAL_EOF);
@@ -834,6 +843,8 @@ static FILE* prepare_ftp_session(FILE **dfpp, struct host_info *target, len_and_
 	set_nport(&lsa->u.sa, htons(port));
 
 	*dfpp = open_socket(lsa);
+if (!*dfpp)
+ ...
 
 #if ENABLE_FEATURE_WGET_HTTPS
 	if (target->protocol == P_FTPS) {
@@ -862,7 +873,8 @@ static FILE* prepare_ftp_session(FILE **dfpp, struct host_info *target, len_and_
 	return sfp;
 }
 
-static void NOINLINE retrieve_file_data(FILE *dfp)
+/* Returns 0 on failure */
+static int NOINLINE retrieve_file_data(FILE *dfp)
 {
 #if ENABLE_FEATURE_WGET_STATUSBAR || ENABLE_FEATURE_WGET_TIMEOUT
 # if ENABLE_FEATURE_WGET_TIMEOUT
@@ -948,7 +960,8 @@ static void NOINLINE retrieve_file_data(FILE *dfp)
 			if (errno != EAGAIN) {
 				if (ferror(dfp)) {
 					progress_meter(PROGRESS_END);
-					bb_perror_msg_and_die(bb_msg_read_error);
+					bb_perror_msg(bb_msg_read_error);
+					return 0; /* "error" */
 				}
 				break; /* EOF, not error */
 			}
@@ -961,7 +974,8 @@ static void NOINLINE retrieve_file_data(FILE *dfp)
 # if ENABLE_FEATURE_WGET_TIMEOUT
 				if (second_cnt != 0 && --second_cnt == 0) {
 					progress_meter(PROGRESS_END);
-					bb_error_msg_and_die("download timed out");
+					bb_error_msg("download timed out");
+					return 0; /* "error" */
 				}
 # endif
 				/* We used to loop back to poll here,
@@ -985,18 +999,23 @@ static void NOINLINE retrieve_file_data(FILE *dfp)
 			break;
 
 		/* Each chunk ends with "\r\n" - eat it */
-		fgets_trim_sanitize(dfp, NULL);
+		if (fgets_trim_sanitize(dfp, NULL) == -1)
+			break; /* timed out */
  get_clen:
 		/* chunk size format is "HEXNUM[;name[=val]]\r\n" */
-		fgets_trim_sanitize(dfp, NULL);
+		if (fgets_trim_sanitize(dfp, NULL) == -1)
+			break; /* timed out */
 		errno = 0;
 		G.content_len = STRTOOFF(G.wget_buf, NULL, 16);
 		/*
 		 * Had a bug with inputs like "ffffffff0001f400"
 		 * smashing the heap later. Ensure >= 0.
 		 */
-		if (G.content_len < 0 || errno)
-			bb_error_msg_and_die("bad chunk length '%s'", G.wget_buf);
+		if (G.content_len < 0 || errno) {
+			progress_meter(PROGRESS_END);
+			bb_error_msg("bad chunk length '%s'", G.wget_buf);
+			return 0; /* "error" */
+		}
 		if (G.content_len == 0)
 			break; /* all done! */
 		G.got_clen = 1;
@@ -1013,9 +1032,12 @@ static void NOINLINE retrieve_file_data(FILE *dfp)
 	G.chunked = 0;  /* makes it show 100% even for chunked download */
 	G.got_clen = 1; /* makes it show 100% even for download of (formerly) unknown size */
 	progress_meter(PROGRESS_END);
-	if (G.content_len != 0) {
-		bb_perror_msg_and_die("connection closed prematurely");
-		/* GNU wget says "DATE TIME (NN MB/s) - Connection closed at byte NNN. Retrying." */
+
+	if (!bb_got_signal) { /* if it's not a timeout */
+		if (G.content_len != 0) {
+			bb_perror_msg_and_die("connection closed prematurely");
+			/* GNU wget says "DATE TIME (NN MB/s) - Connection closed at byte NNN. Retrying." */
+		}
 	}
 
 	/* If -c failed, we restart from the beginning,
@@ -1035,6 +1057,8 @@ static void NOINLINE retrieve_file_data(FILE *dfp)
 		else
 			fprintf(stderr, "'%s' saved\n", G.fname_out);
 	}
+
+	return (bb_got_signal == 0); /* "success" if not timed out */
 }
 
 static void download_one_url(const char *url)
@@ -1139,6 +1163,8 @@ static void download_one_url(const char *url)
 # if ENABLE_FEATURE_WGET_HTTPS
 			if (fd < 0) { /* no openssl? try internal */
 				sfp = open_socket(lsa);
+if (!sfp)
+ ...
 				spawn_ssl_client(server.host, fileno(sfp), /*flags*/ 0);
 				goto socket_opened;
 			}
@@ -1151,15 +1177,21 @@ static void download_one_url(const char *url)
 			goto socket_opened;
 		}
 		sfp = open_socket(lsa);
+if (!sfp)
+ ...
  socket_opened:
 #elif ENABLE_FEATURE_WGET_HTTPS
 		/* Only internal TLS support is configured */
 		sfp = open_socket(lsa);
+if (!sfp)
+ ...
 		if (server.protocol == P_HTTPS)
 			spawn_ssl_client(server.host, fileno(sfp), /*flags*/ 0);
 #else
 		/* ssl (https) support is not configured */
 		sfp = open_socket(lsa);
+if (!sfp)
+ ...
 #endif
 		/* Send HTTP request */
 		if (use_proxy) {
@@ -1235,7 +1267,8 @@ static void download_one_url(const char *url)
 		 * Retrieve HTTP response line and check for "200" status code.
 		 */
  read_response:
-		fgets_trim_sanitize(sfp, "  %s\n");
+		if (fgets_trim_sanitize(sfp, "  %s\n") == -1)
+			goto ret; /* timed out */
 
 		str = G.wget_buf;
 		str = skip_non_whitespace(str);
@@ -1406,6 +1439,7 @@ However, in real world it was observed that some web servers
 			bb_error_msg_and_die("ftp error: %s", G.wget_buf);
 		/* ftpcmd("QUIT", NULL, sfp); - why bother? */
 	}
+ ret:
 	fclose(sfp);
 
 	free(server.allocated);


More information about the busybox-cvs mailing list