summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog10
-rw-r--r--job.c34
2 files changed, 40 insertions, 4 deletions
diff --git a/ChangeLog b/ChangeLog
index fb91f7b..c477249 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,13 @@
+2012-12-07 Eli Zaretskii <eliz@gnu.org>
+
+ * job.c (create_batch_file): Declare the counter of batch files
+ static, to avoid having 2 jobs using the same file name and
+ stepping on each other's toes. When all 64K names are used up,
+ make one more loop looking for slots that became vacant. This
+ avoids leaving behind temporary batch files in the temporary
+ directory, which happens frequently on a fast machine when using
+ parallel builds.
+
2012-10-29 Paul Smith <psmith@gnu.org>
New feature: "load" directive for dynamically-loaded objects.
diff --git a/job.c b/job.c
index f7b7d51..b11d1b4 100644
--- a/job.c
+++ b/job.c
@@ -265,7 +265,16 @@ create_batch_file (char const *base, int unixy, int *fd)
char temp_path[MAXPATHLEN]; /* need to know its length */
unsigned path_size = GetTempPath(sizeof temp_path, temp_path);
int path_is_dot = 0;
- unsigned uniq = 1;
+ /* The following variable is static so we won't try to reuse a name
+ that was generated a little while ago, because that file might
+ not be on disk yet, since we use FILE_ATTRIBUTE_TEMPORARY below,
+ which tells the OS it doesn't need to flush the cache to disk.
+ If the file is not yet on disk, we might think the name is
+ available, while it really isn't. This happens in parallel
+ builds, where Make doesn't wait for one job to finish before it
+ launches the next one. */
+ static unsigned uniq = 1;
+ static int second_loop = 0;
const unsigned sizemax = strlen (base) + strlen (ext) + 10;
if (path_size == 0)
@@ -276,7 +285,7 @@ create_batch_file (char const *base, int unixy, int *fd)
while (path_size > 0 &&
path_size + sizemax < sizeof temp_path &&
- uniq < 0x10000)
+ !(uniq >= 0x10000 && second_loop))
{
unsigned size = sprintf (temp_path + path_size,
"%s%s-%x.%s",
@@ -296,7 +305,18 @@ create_batch_file (char const *base, int unixy, int *fd)
const DWORD er = GetLastError();
if (er == ERROR_FILE_EXISTS || er == ERROR_ALREADY_EXISTS)
- ++uniq;
+ {
+ ++uniq;
+ if (uniq == 0x10000 && !second_loop)
+ {
+ /* If we already had 64K batch files in this
+ process, make a second loop through the numbers,
+ looking for free slots, i.e. files that were
+ deleted in the meantime. */
+ second_loop = 1;
+ uniq = 0;
+ }
+ }
/* the temporary path is not guaranteed to exist */
else if (path_is_dot == 0)
@@ -791,11 +811,17 @@ reap_children (int block, int err)
c, pid2str (c->pid), c->remote ? _(" (remote)") : ""));
if (c->sh_batch_file) {
+ int rm_status;
+
DB (DB_JOBS, (_("Cleaning up temp batch file %s\n"),
c->sh_batch_file));
/* just try and remove, don't care if this fails */
- remove (c->sh_batch_file);
+ errno = 0;
+ rm_status = remove (c->sh_batch_file);
+ if (rm_status)
+ DB (DB_JOBS, (_("Cleaning up temp batch file %s failed (%d)\n"),
+ c->sh_batch_file, errno));
/* all done with memory */
free (c->sh_batch_file);