* Sync with the trunk.

This commit is contained in:
Eelco Dolstra 2011-12-30 13:08:14 +00:00
commit 254b3399ba
9 changed files with 70 additions and 30 deletions

View File

@ -1,5 +1,5 @@
{ nixpkgs ? <nixpkgs>, nixos ? <nixos>
, nix ? { outPath = ../nix-export; rev = 1234; }
, nix ? { outPath = ./.; rev = 1234; }
, officialRelease ? false
}:
@ -98,7 +98,7 @@ let
];
configureFlags = ''
--disable-init-state --disable-shared
--disable-init-state
--with-bzip2=${bzip2} --with-sqlite=${sqlite}
--with-dbi=${perlPackages.DBI}/lib/perl5/site_perl
--with-dbd-sqlite=${perlPackages.DBDSQLite}/lib/perl5/site_perl

View File

@ -1,6 +1,6 @@
#! @perl@ -w @perlFlags@
use Fcntl ':flock';
use Fcntl qw(:DEFAULT :flock);
use English '-no_match_vars';
use IO::Handle;
use Nix::Config;
@ -56,7 +56,7 @@ sub openSlotLock {
my ($machine, $slot) = @_;
my $slotLockFn = "$currentLoad/" . (join '+', @{$machine->{systemTypes}}) . "-" . $machine->{hostName} . "-$slot";
my $slotLock = new IO::Handle;
open $slotLock, ">>$slotLockFn" or die;
sysopen $slotLock, "$slotLockFn", O_RDWR|O_CREAT, 0600 or die;
return $slotLock;
}
@ -64,7 +64,7 @@ sub openSlotLock {
# Read the list of machines.
my @machines;
if (defined $conf && -e $conf) {
open CONF, "< $conf" or die;
open CONF, "<$conf" or die;
while (<CONF>) {
chomp;
s/\#.*$//g;
@ -104,7 +104,7 @@ REQ: while (1) {
# Acquire the exclusive lock on $currentLoad/main-lock.
mkdir $currentLoad, 0777 or die unless -d $currentLoad;
my $mainLock = "$currentLoad/main-lock";
open MAINLOCK, ">>$mainLock" or die;
sysopen MAINLOCK, "$mainLock", O_RDWR|O_CREAT, 0600 or die;
flock(MAINLOCK, LOCK_EX) or die;
@ -225,8 +225,17 @@ sub removeRoots {
}
# Copy the derivation and its dependencies to the build machine.
# Copy the derivation and its dependencies to the build machine. This
# is guarded by an exclusive lock per machine to prevent multiple
# build-remote instances from copying to a machine simultaneously.
# That's undesirable because we may end up with N instances uploading
# the same missing path simultaneously, causing the effective network
# bandwidth and target disk speed to be divided by N.
my $uploadLock = "$currentLoad/$hostName.upload-lock";
sysopen MAINLOCK, "$uploadLock", O_RDWR|O_CREAT, 0600 or die;
flock(MAINLOCK, LOCK_EX) or die;
Nix::CopyClosure::copyTo($hostName, [ @sshOpts ], [ $drvPath, @inputs ], "", "", 0, 0, $maybeSign ne "");
close MAINLOCK;
# Perform the build.

View File

@ -1300,6 +1300,13 @@ void DerivationGoal::buildDone()
being valid. */
computeClosure();
/* It is now safe to delete the lock files, since all future
lockers will see that the output paths are valid; they will
not create new lock files with the same names as the old
(unlinked) lock files. */
outputLocks.setDeletion(true);
outputLocks.unlock();
} catch (BuildError & e) {
printMsg(lvlError, e.msg());
outputLocks.unlock();
@ -1987,13 +1994,6 @@ void DerivationGoal::computeClosure()
infos.push_back(info);
}
worker.store.registerValidPaths(infos);
/* It is now safe to delete the lock files, since all future
lockers will see that the output paths are valid; they will not
create new lock files with the same names as the old (unlinked)
lock files. */
outputLocks.setDeletion(true);
outputLocks.unlock();
}

View File

@ -617,27 +617,51 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
} else {
printMsg(lvlError, format("reading the Nix store..."));
Paths entries = readDirectory(nixStore);
/* Randomise the order in which we delete entries to make the
collector less biased towards deleting paths that come
alphabetically first (e.g. /nix/store/000...). This
matters when using --max-freed etc. */
vector<Path> entries_(entries.begin(), entries.end());
random_shuffle(entries_.begin(), entries_.end());
if (shouldDelete(state.options.action))
printMsg(lvlError, format("deleting garbage..."));
else
printMsg(lvlError, format("determining live/dead paths..."));
try {
AutoCloseDir dir = opendir(nixStore.c_str());
if (!dir) throw SysError(format("opening directory `%1%'") % nixStore);
/* Read the store and immediately delete all paths that
aren't valid. When using --max-freed etc., deleting
invalid paths is preferred over deleting unreachable
paths, since unreachable paths could become reachable
again. We don't use readDirectory() here so that GCing
can start faster. */
Paths entries;
struct dirent * dirent;
while (errno = 0, dirent = readdir(dir)) {
checkInterrupt();
string name = dirent->d_name;
if (name == "." || name == "..") continue;
Path path = nixStore + "/" + name;
if (isValidPath(path))
entries.push_back(path);
else
tryToDelete(state, path);
}
dir.close();
/* Now delete the unreachable valid paths. Randomise the
order in which we delete entries to make the collector
less biased towards deleting paths that come
alphabetically first (e.g. /nix/store/000...). This
matters when using --max-freed etc. */
vector<Path> entries_(entries.begin(), entries.end());
random_shuffle(entries_.begin(), entries_.end());
foreach (vector<Path>::iterator, i, entries_)
tryToDelete(state, canonPath(nixStore + "/" + *i));
tryToDelete(state, *i);
} catch (GCLimitReached & e) {
}
}
}
}

View File

@ -16,7 +16,7 @@ int openLockFile(const Path & path, bool create)
{
AutoCloseFD fd;
fd = open(path.c_str(), O_RDWR | (create ? O_CREAT : 0), 0666);
fd = open(path.c_str(), O_RDWR | (create ? O_CREAT : 0), 0600);
if (fd == -1 && (create || errno != ENOENT))
throw SysError(format("opening lock file `%1%'") % path);

View File

@ -701,7 +701,7 @@ AutoCloseDir::AutoCloseDir(DIR * dir)
AutoCloseDir::~AutoCloseDir()
{
if (dir) closedir(dir);
close();
}
@ -717,6 +717,14 @@ AutoCloseDir::operator DIR *()
}
void AutoCloseDir::close()
{
if (dir) {
closedir(dir);
dir = 0;
}
}
//////////////////////////////////////////////////////////////////////

View File

@ -223,6 +223,7 @@ public:
~AutoCloseDir();
void operator =(DIR * dir);
operator DIR *();
void close();
};

View File

@ -1 +0,0 @@
(import <a.nix>)

View File

@ -1 +0,0 @@
"abcc"