패치가있는 wget 소스에 diff를 적용하는 동안 "잘못된 패치"오류


10

이 패치 를 적용 하여 "Index : src / options.h"에서 시작하여 "+ @ item"으로 끝나는 wget코드를 복사하여 소스 코드 폴더 에서 생성 된 새 파일에 넣었습니다 . 그런 다음 :

$ patch -p0 < name_of_patch
(Patch is indented 1 space.)
patching file src/options.h
patch: **** malformed patch at line 6: char **excludes; /* List of excluded FTP directories. */

어떻게 적용해야합니까?

이것은 내가 만든 파일의 내용입니다.

 Index: src/options.h
 ===================================================================
 --- src/options.h (revision 2276)
 +++ src/options.h (working copy)
 @@ -62,6 +62,8 @@
 char **excludes; /* List of excluded FTP directories. */
 char **includes; /* List of FTP directories to
 follow. */
 + int maxsize; /* Maximum file size (kB) */
 + int minsize; /* Minimum file size (kB) */
 bool ignore_case; /* Whether to ignore case when
 matching dirs and files */

 Index: src/init.c
 ===================================================================
 --- src/init.c (revision 2276)
 +++ src/init.c (working copy)
 @@ -182,6 +182,8 @@
 { "loadcookies", &opt.cookies_input, cmd_file },
 { "logfile", &opt.lfilename, cmd_file },
 { "login", &opt.ftp_user, cmd_string },/* deprecated*/
 + { "maxsize", &opt.maxsize, cmd_number },
 + { "minsize", &opt.minsize, cmd_number },
 { "mirror", NULL, cmd_spec_mirror },
 { "netrc", &opt.netrc, cmd_boolean },
 { "noclobber", &opt.noclobber, cmd_boolean },
 Index: src/http.c
 ===================================================================
 --- src/http.c (revision 2276)
 +++ src/http.c (working copy)
 @@ -2252,7 +2252,7 @@
 retried, and retried, and retried, and... */
 uerr_t
 http_loop (struct url *u, char **newloc, char **local_file, const char 
 *referer,
 - int *dt, struct url *proxy)
 + int *dt, struct url *proxy, bool can_ommit)
 {
 int count;
 bool got_head = false; /* used for time-stamping and filename 
 detection */
 @@ -2285,6 +2285,27 @@
 if (opt.ftp_glob && has_wildcards_p (u->path))
 logputs (LOG_VERBOSE, _("Warning: wildcards not supported in HTTP.\n"));

 + /* Try fetching the document header and checking the document length */
 + if (can_ommit && !opt.spider && !opt.ignore_length &&
 + (opt.minsize > 0 || opt.maxsize > 0))
 + {
 + /* Setup hstat struct. */
 + xzero (hstat);
 + hstat.referer = referer;
 +
 + *dt = HEAD_ONLY;
 + err = gethttp (u, &hstat, dt, proxy);
 + 
 + if (err == RETRFINISHED && hstat.contlen > 0 &&
 + (opt.minsize > 0 && hstat.contlen < opt.minsize * 1024 ||
 + opt.maxsize > 0 && hstat.contlen > opt.maxsize * 1024)) 
 + {
 + logputs (LOG_VERBOSE, _("File too small or too big -- not 
 retrieving.\n"));
 + ret = FILEBADFILE;
 + goto exit;
 + }
 + }
 +
 /* Setup hstat struct. */
 xzero (hstat);
 hstat.referer = referer;
 @@ -2300,7 +2321,7 @@

 /* Reset the document type. */
 *dt = 0;
 - 
 +
 /* THE loop */
 do
 {
 Index: src/http.h
 ===================================================================
 --- src/http.h (revision 2276)
 +++ src/http.h (working copy)
 @@ -32,7 +32,7 @@
 struct url;

 uerr_t http_loop (struct url *, char **, char **, const char *, int *,
 - struct url *);
 + struct url *, bool);
 void save_cookies (void);
 void http_cleanup (void);
 time_t http_atotm (const char *);
 Index: src/res.c
 ===================================================================
 --- src/res.c (revision 2276)
 +++ src/res.c (working copy)
 @@ -545,7 +545,7 @@
 *file = NULL;
 opt.timestamping = false;
 opt.spider = false;
 - err = retrieve_url (robots_url, file, NULL, NULL, NULL, false);
 + err = retrieve_url (robots_url, file, NULL, NULL, NULL, false, false);
 opt.timestamping = saved_ts_val;
 opt.spider = saved_sp_val; 
 xfree (robots_url);
 Index: src/retr.c
 ===================================================================
 --- src/retr.c (revision 2276)
 +++ src/retr.c (working copy)
 @@ -601,7 +601,7 @@

 uerr_t
 retrieve_url (const char *origurl, char **file, char **newloc,
 - const char *refurl, int *dt, bool recursive)
 + const char *refurl, int *dt, bool recursive, bool can_ommit)
 {
 uerr_t result;
 char *url;
 @@ -676,7 +676,7 @@
 #endif
 || (proxy_url && proxy_url->scheme == SCHEME_HTTP))
 {
 - result = http_loop (u, &mynewloc, &local_file, refurl, dt, proxy_url);
 + result = http_loop (u, &mynewloc, &local_file, refurl, dt, proxy_url, 
 can_ommit);
 }
 else if (u->scheme == SCHEME_FTP)
 {
 @@ -856,7 +856,7 @@
 opt.follow_ftp = old_follow_ftp;
 }
 else
 - status = retrieve_url (cur_url->url->url, &filename, &new_file, NULL, 
 &dt, opt.recursive);
 + status = retrieve_url (cur_url->url->url, &filename, &new_file, NULL, 
 &dt, opt.recursive, false);

 if (filename && opt.delete_after && file_exists_p (filename))
 {
 Index: src/retr.h
 ===================================================================
 --- src/retr.h (revision 2276)
 +++ src/retr.h (working copy)
 @@ -49,7 +49,7 @@
 char *fd_read_hunk (int, hunk_terminator_t, long, long);
 char *fd_read_line (int);

 -uerr_t retrieve_url (const char *, char **, char **, const char *, int *, 
 bool);
 +uerr_t retrieve_url (const char *, char **, char **, const char *, int *, 
 bool, bool);
 uerr_t retrieve_from_file (const char *, bool, int *);

 const char *retr_rate (wgint, double);
 Index: src/recur.c
 ===================================================================
 --- src/recur.c (revision 2276)
 +++ src/recur.c (working copy)
 @@ -247,7 +247,7 @@
 int dt = 0;
 char *redirected = NULL;

 - status = retrieve_url (url, &file, &redirected, referer, &dt, false);
 + status = retrieve_url (url, &file, &redirected, referer, &dt, false, 
 !html_allowed);

 if (html_allowed && file && status == RETROK
 && (dt & RETROKF) && (dt & TEXTHTML))
 Index: src/main.c
 ===================================================================
 --- src/main.c (revision 2276)
 +++ src/main.c (working copy)
 @@ -189,6 +189,8 @@
 { "level", 'l', OPT_VALUE, "reclevel", -1 },
 { "limit-rate", 0, OPT_VALUE, "limitrate", -1 },
 { "load-cookies", 0, OPT_VALUE, "loadcookies", -1 },
 + { "max-size", 'M', OPT_VALUE, "maxsize", -1 },
 + { "min-size", 's', OPT_VALUE, "minsize", -1 },
 { "mirror", 'm', OPT_BOOLEAN, "mirror", -1 },
 { "no", 'n', OPT__NO, NULL, required_argument },
 { "no-clobber", 0, OPT_BOOLEAN, "noclobber", -1 },
 @@ -446,6 +448,10 @@
 N_("\
 --limit-rate=RATE limit download rate to RATE.\n"),
 N_("\
 + -M, --max-size=SIZE limit maximum file size to SIZE (kB).\n"),
 + N_("\
 + -s, --min-size=SIZE limit minimum file size to SIZE (kB).\n"),
 + N_("\
 --no-dns-cache disable caching DNS lookups.\n"),
 N_("\
 --restrict-file-names=OS restrict chars in file names to ones OS 
 allows.\n"),
 @@ -675,7 +681,6 @@
 stdout);
 exit (0);
 }
 - 
 #ifndef TESTING
 int
 main (int argc, char *const *argv)
 @@ -979,7 +984,7 @@
 opt.follow_ftp = old_follow_ftp;
 }
 else
 - status = retrieve_url (*t, &filename, &redirected_URL, NULL, &dt, 
 opt.recursive);
 + status = retrieve_url (*t, &filename, &redirected_URL, NULL, &dt, 
 opt.recursive, false);

 if (opt.delete_after && file_exists_p(filename))
 {
 Index: doc/wget.texi
 ===================================================================
 --- doc/wget.texi (revision 2276)
 +++ doc/wget.texi (working copy)
 @@ -1592,7 +1592,7 @@
 @item -l @var{depth}
 @itemx --level=@var{depth}
 Specify recursion maximum depth level @var{depth} (@pxref{Recursive
 -Download}). The default maximum depth is 5.
 +Download}). The default maximum depth is 5. Zero means infinite recursion.

 @cindex proxy filling
 @cindex delete after retrieval
 @@ -1803,6 +1803,15 @@
 Specify the domains that are @emph{not} to be followed.
 (@pxref{Spanning Hosts}).

 +@cindex file size range
 +@item -s @var{size}
 +@itemx --min-size=@var{size}
 +Limit the minimum size of non-HTML files to @var{size} kB. Smaller files will 
 not be retrieved.
 +
 +@item -M @var{size}
 +@itemx --max-size=@var{size}
 +Limit the maximum size of non-HTML files to @var{size} kB. Larger files will 
 not be retrieved.
 +
 @cindex follow FTP links
 @item --follow-ftp
 Follow @sc{ftp} links from @sc{html} documents. Without this option,
 @@ -3064,6 +3073,14 @@
 too.

 @item
 +Retrieve in directory 'pics' all jpeg images from a given site, excluding
 +files smaller than 50k (to avoid thumbnails) or larger than 400k.
 +
 +@example
 +wget -Ppics -nd -r -l0 -Ajpg,jpeg -s50 -M400 http://www.server.com
 +@end example
 +
 +@item
 Suppose you were in the middle of downloading, when Wget was
 interrupted. Now you do not want to clobber the files already present.
 It would be:
 Index: src/utils.c
 ===================================================================
 --- src/utils.c (revision 2276)
 +++ src/utils.c (working copy)
 @@ -432,33 +432,52 @@
 #endif
 }

 -/* stat file names named PREFIX.1, PREFIX.2, etc., until one that
 - doesn't exist is found. Return a freshly allocated copy of the
 - unused file name. */
 +/*
 + * Stat file names named PREFIX-1.SUFFIX, PREFIX-2.SUFFIX, etc., until
 + * one that doesn't exist is found. Return a freshly allocated copy of
 + * the unused file name.
 + */

 static char *
 -unique_name_1 (const char *prefix)
 +unique_name_1 (const char *s)
 {
 int count = 1;
 - int plen = strlen (prefix);
 - char *template = (char *)alloca (plen + 1 + 24);
 - char *template_tail = template + plen;
 + int p, l = strlen (s);
 + char *prefix = (char *) alloca (l + 1);
 + char *suffix = (char *) alloca (l + 1);
 + char *filename = (char *) alloca (l + 26);
 + 
 + /* Look for last '.' in filename */
 + 
 + for(p = l; p >= 0 && s[p] != '.'; p--);

 - memcpy (template, prefix, plen);
 - *template_tail++ = '.';
 + /* If none found, then prefix is the whole filename */
 + 
 + if (p < 0)
 + p = l;

 + /* Extract prefix and (possibly empty) suffix from filename */
 + 
 + memcpy (prefix, s, p);
 + prefix[p] = '\0';
 +
 + memcpy (suffix, s+p, l-p);
 + suffix[l-p] = '\0';
 +
 + /* Try indexed filenames until an unused one is found */
 + 
 do
 - number_to_string (template_tail, count++);
 - while (file_exists_p (template));
 + sprintf (filename, "%s-%d%s", prefix, count++, suffix);
 + while (file_exists_p (filename));

 - return xstrdup (template);
 + return xstrdup (filename);
 }

 /* Return a unique file name, based on FILE.

 - More precisely, if FILE doesn't exist, it is returned unmodified.
 - If not, FILE.1 is tried, then FILE.2, etc. The first FILE.<number>
 - file name that doesn't exist is returned.
 + More precisely, if FILE.SUF doesn't exist, it is returned unmodified.
 + If not, FILE-1.SUF is tried, then FILE-2.SUF etc. The first
 + FILE-<number>.SUF file name that doesn't exist is returned.

 The resulting file is not created, only verified that it didn't
 exist at the point in time when the function was called.
 Index: doc/wget.texi
 ===================================================================
 --- doc/wget.texi (revision 2276)
 +++ doc/wget.texi (working copy)
 @@ -561,16 +561,16 @@
 cases, the local file will be @dfn{clobbered}, or overwritten, upon
 repeated download. In other cases it will be preserved.

 -When running Wget without @samp{-N}, @samp{-nc}, or @samp{-r},
 -downloading the same file in the same directory will result in the
 -original copy of @var{file} being preserved and the second copy being
 -named @samp{@var{file}.1}. If that file is downloaded yet again, the
 -third copy will be named @samp{@var{file}.2}, and so on. When
 -@samp{-nc} is specified, this behavior is suppressed, and Wget will
 -refuse to download newer copies of @samp{@var{file}}. Therefore,
 -``@code{no-clobber}'' is actually a misnomer in this mode---it's not
 -clobbering that's prevented (as the numeric suffixes were already
 -preventing clobbering), but rather the multiple version saving that's
 +When running Wget without @samp{-N}, @samp{-nc}, or @samp{-r}, downloading the
 +same file in the same directory will result in the original copy of @var{file}
 +being preserved and the second copy being named
 +@samp{@var{prefix}-1.@var{suffix}}, assuming @var{file} = @var{prefix.suffix}.
 +If that file is downloaded yet again, the third copy will be named
 +@samp{@var{prefix}-2.@var{suffix}}, and so on. When @samp{-nc} is specified,
 +this behavior is suppressed, and Wget will refuse to download newer copies of
 +@samp{@var{file}}. Therefore, ``@code{no-clobber}'' is actually a misnomer in
 +this mode---it's not clobbering that's prevented (as the numeric suffixes were
 +already preventing clobbering), but rather the multiple version saving that's
 prevented.

 When running Wget with @samp{-r}, but without @samp{-N} or @samp{-nc},
 @@ -1592,7 +1592,7 @@
 @item -l @var{depth}
 @itemx --level=@var{depth}
 Specify recursion maximum depth level @var{depth} (@pxref{Recursive
 -Download}). The default maximum depth is 5.
 +Download}). The default maximum depth is 5. Zero means infinite recursion.

 @cindex proxy filling
 @cindex delete after retrieval
 @@ -1803,6 +1803,15 @@
 Specify the domains that are @emph{not} to be followed.
 (@pxref{Spanning Hosts}).

 +@cindex file size range
 +@item -s @var{size}
 +@itemx --min-size=@var{size}
 +Limit the minimum size of non-HTML files to @var{size} kB. Smaller files will 
 not be retrieved.
 +
 +@item -M @var{size}
 +@itemx --max-size=@var{size}
 +Limit the maximum size of non-HTML files to @var{size} kB. Larger files will 
 not be retrieved.
 +
 @cindex follow FTP links
 @item --follow-ftp
 Follow @sc{ftp} links from @sc{html} documents. Without this option,
 @@ -3064,6 +3073,14 @@
 too.

 @item
 +Retrieve in directory 'pics' all jpeg images from a given site, excluding
 +files smaller than 50k (to avoid thumbnails) or larger than 400k.
 +
 +@example
 +wget -Ppics -nd -r -l0 -Ajpg,jpeg -s50 -M400 http://www.server.com
 +@end example
 +
 +@item

실제로 복사 한 내용을 붙여 넣으십시오.
Jeff Ferland

좋아, 편집 원본 포스트
gadelat

답변:


16

이것은 공백 들여 쓰기없이 텍스트 파일에 복사 / 붙여 넣기 할 때 발생하는 일반적인 문제입니다. "+", "-"및 "@@"기호로 시작하는 행을 제외하고 각 행 앞에 공백을 추가해야합니다. 이 문제를 피하려면 직접 diff 파일을 직접 생성 diff하거나 ( 버전 관리 diff 도구를 사용하여 ) diff 파일을 브라우저에서 복사하여 붙여 넣는 대신 diff 파일 전체를 다운로드하는 것이 좋습니다.

패치를 적용하기 전에 패치가 업스트림으로 만들어 졌는지 확인하고 최신 / 최신 / 안정적인 릴리스로 제공되므로 문제를 깔끔하게 해결할 수 있습니다. .c파일 을 패치하더라도 어쨌든 파일을 컴파일 할 것이므로 패치가 업스트림으로 만들어진 경우 안정적이고 신선한 타르볼로 파일을 작성하지 마십시오.


3

나는 두 번째 Nikhils 일반적인 의견입니다. 그러나 일반적으로 잘라 내기 및 붙여 넣기를 사용하는 대신 html 파일을 다운로드 한 다음 텍스트로 변환하는 것이 좋습니다. 예를 들어, 할 수 있습니다

1) wget -c http://osdir.com/ml/web.wget.patches/2007-07/msg00011.html

2) 예를 들어 Openoffice에서 msg00011.html을 엽니 다. 텍스트로 저장하십시오. OO 변환기는 잘 작동하며 변환 된 패치에 명백한 문제는 없었습니다.

당사 사이트를 사용함과 동시에 당사의 쿠키 정책개인정보 보호정책을 읽고 이해하였음을 인정하는 것으로 간주합니다.
Licensed under cc by-sa 3.0 with attribution required.