summaryrefslogtreecommitdiff
path: root/apt-inst
diff options
context:
space:
mode:
authorArch Librarian <arch@canonical.com>2004-09-20 16:56:32 +0000
committerArch Librarian <arch@canonical.com>2004-09-20 16:56:32 +0000
commitb2e465d6d32d2dc884f58b94acb7e35f671a87fe (patch)
tree5928383b9bde7b0ba9812e6526ad746466e558f7 /apt-inst
parent00b47c98ca4a4349686a082eba6d77decbb03a4d (diff)
downloadapt-b2e465d6d32d2dc884f58b94acb7e35f671a87fe.tar.gz
Join with aliencode
Author: jgg Date: 2001-02-20 07:03:16 GMT Join with aliencode
Diffstat (limited to 'apt-inst')
-rw-r--r--apt-inst/contrib/arfile.cc154
-rw-r--r--apt-inst/contrib/arfile.h68
-rw-r--r--apt-inst/contrib/extracttar.cc342
-rw-r--r--apt-inst/contrib/extracttar.h54
-rw-r--r--apt-inst/database.cc30
-rw-r--r--apt-inst/database.h56
-rw-r--r--apt-inst/deb/debfile.cc262
-rw-r--r--apt-inst/deb/debfile.h92
-rw-r--r--apt-inst/deb/dpkgdb.cc490
-rw-r--r--apt-inst/deb/dpkgdb.h53
-rw-r--r--apt-inst/dirstream.cc103
-rw-r--r--apt-inst/dirstream.h61
-rw-r--r--apt-inst/dpkg-diffs.txt5
-rw-r--r--apt-inst/extract.cc509
-rw-r--r--apt-inst/extract.h52
-rw-r--r--apt-inst/filelist.cc588
-rw-r--r--apt-inst/filelist.h314
-rw-r--r--apt-inst/makefile30
18 files changed, 3263 insertions, 0 deletions
diff --git a/apt-inst/contrib/arfile.cc b/apt-inst/contrib/arfile.cc
new file mode 100644
index 00000000..c2964b7a
--- /dev/null
+++ b/apt-inst/contrib/arfile.cc
@@ -0,0 +1,154 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: arfile.cc,v 1.2 2001/02/20 07:03:16 jgg Exp $
+/* ######################################################################
+
+ AR File - Handle an 'AR' archive
+
+ AR Archives have plain text headers at the start of each file
+ section. The headers are aligned on a 2 byte boundry.
+
+ Information about the structure of AR files can be found in ar(5)
+ on a BSD system, or in the binutils source.
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#ifdef __GNUG__
+#pragma implementation "apt-pkg/arfile.h"
+#endif
+#include <apt-pkg/arfile.h>
+#include <apt-pkg/strutl.h>
+#include <apt-pkg/error.h>
+
+#include <stdlib.h>
+ /*}}}*/
+
+struct ARArchive::MemberHeader
+{
+ char Name[16];
+ char MTime[12];
+ char UID[6];
+ char GID[6];
+ char Mode[8];
+ char Size[10];
+ char Magic[2];
+};
+
+// ARArchive::ARArchive - Constructor /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+ARArchive::ARArchive(FileFd &File) : List(0), File(File)
+{
+ LoadHeaders();
+}
+ /*}}}*/
+// ARArchive::~ARArchive - Destructor /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+ARArchive::~ARArchive()
+{
+ while (List != 0)
+ {
+ Member *Tmp = List;
+ List = List->Next;
+ delete Tmp;
+ }
+}
+ /*}}}*/
+// ARArchive::LoadHeaders - Load the headers from each file /*{{{*/
+// ---------------------------------------------------------------------
+/* AR files are structured with a 8 byte magic string followed by a 60
+ byte plain text header then the file data, another header, data, etc */
+bool ARArchive::LoadHeaders()
+{
+ signed long Left = File.Size();
+
+ // Check the magic byte
+ char Magic[8];
+ if (File.Read(Magic,sizeof(Magic)) == false)
+ return false;
+ if (memcmp(Magic,"!<arch>\012",sizeof(Magic)) != 0)
+ return _error->Error("Invalid archive signature");
+ Left -= sizeof(Magic);
+
+ // Read the member list
+ while (Left > 0)
+ {
+ MemberHeader Head;
+ if (File.Read(&Head,sizeof(Head)) == false)
+ return _error->Error("Error reading archive member header");
+ Left -= sizeof(Head);
+
+ // Convert all of the integer members
+ Member *Memb = new Member();
+ if (StrToNum(Head.MTime,Memb->MTime,sizeof(Head.MTime)) == false ||
+ StrToNum(Head.UID,Memb->UID,sizeof(Head.UID)) == false ||
+ StrToNum(Head.GID,Memb->GID,sizeof(Head.GID)) == false ||
+ StrToNum(Head.Mode,Memb->Mode,sizeof(Head.Mode),8) == false ||
+ StrToNum(Head.Size,Memb->Size,sizeof(Head.Size)) == false)
+ {
+ delete Memb;
+ return _error->Error("Invalid archive member header");
+ }
+
+ // Check for an extra long name string
+ if (memcmp(Head.Name,"#1/",3) == 0)
+ {
+ char S[300];
+ unsigned long Len;
+ if (StrToNum(Head.Name+3,Len,sizeof(Head.Size)-3) == false ||
+ Len >= strlen(S))
+ {
+ delete Memb;
+ return _error->Error("Invalid archive member header");
+ }
+ if (File.Read(S,Len) == false)
+ return false;
+ S[Len] = 0;
+ Memb->Name = S;
+ Memb->Size -= Len;
+ Left -= Len;
+ }
+ else
+ {
+ unsigned int I = sizeof(Head.Name) - 1;
+ for (; Head.Name[I] == ' '; I--);
+ Memb->Name = string(Head.Name,0,I+1);
+ }
+
+ // Account for the AR header alignment
+ unsigned Skip = Memb->Size % 2;
+
+ // Add it to the list
+ Memb->Next = List;
+ List = Memb;
+ Memb->Start = File.Tell();
+ if (File.Skip(Memb->Size + Skip) == false)
+ return false;
+ if (Left < (signed)(Memb->Size + Skip))
+ return _error->Error("Archive is too short");
+ Left -= Memb->Size + Skip;
+ }
+ if (Left != 0)
+ return _error->Error("Failed to read the archive headers");
+
+ return true;
+}
+ /*}}}*/
+// ARArchive::FindMember - Find a name in the member list /*{{{*/
+// ---------------------------------------------------------------------
+/* Find a member with the given name */
+const ARArchive::Member *ARArchive::FindMember(const char *Name) const
+{
+ const Member *Res = List;
+ while (Res != 0)
+ {
+ if (Res->Name == Name)
+ return Res;
+ Res = Res->Next;
+ }
+
+ return 0;
+}
+ /*}}}*/
diff --git a/apt-inst/contrib/arfile.h b/apt-inst/contrib/arfile.h
new file mode 100644
index 00000000..6c54d3e6
--- /dev/null
+++ b/apt-inst/contrib/arfile.h
@@ -0,0 +1,68 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: arfile.h,v 1.2 2001/02/20 07:03:16 jgg Exp $
+/* ######################################################################
+
+ AR File - Handle an 'AR' archive
+
+ This is a reader for the usual 4.4 BSD AR format. It allows raw
+ stream access to a single member at a time. Basically all this class
+ provides is header parsing and verification. It is up to the client
+ to correctly make use of the stream start/stop points.
+
+ ##################################################################### */
+ /*}}}*/
+#ifndef PKGLIB_ARFILE_H
+#define PKGLIB_ARFILE_H
+
+#ifdef __GNUG__
+#pragma interface "apt-pkg/arfile.h"
+#endif
+
+#include <string>
+#include <apt-pkg/fileutl.h>
+
+class ARArchive
+{
+ struct MemberHeader;
+ public:
+ struct Member;
+
+ protected:
+
+ // Linked list of members
+ Member *List;
+
+ bool LoadHeaders();
+
+ public:
+
+ // The stream file
+ FileFd &File;
+
+ // Locate a member by name
+ const Member *FindMember(const char *Name) const;
+
+ ARArchive(FileFd &File);
+ ~ARArchive();
+};
+
+// A member of the archive
+struct ARArchive::Member
+{
+ // Fields from the header
+ string Name;
+ unsigned long MTime;
+ unsigned long UID;
+ unsigned long GID;
+ unsigned long Mode;
+ unsigned long Size;
+
+ // Location of the data.
+ unsigned long Start;
+ Member *Next;
+
+ Member() : Start(0), Next(0) {};
+};
+
+#endif
diff --git a/apt-inst/contrib/extracttar.cc b/apt-inst/contrib/extracttar.cc
new file mode 100644
index 00000000..57e083b5
--- /dev/null
+++ b/apt-inst/contrib/extracttar.cc
@@ -0,0 +1,342 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: extracttar.cc,v 1.2 2001/02/20 07:03:17 jgg Exp $
+/* ######################################################################
+
+ Extract a Tar - Tar Extractor
+
+ Some performance measurements showed that zlib performed quite poorly
+ in comparision to a forked gzip process. This tar extractor makes use
+ of the fact that dup'd file descriptors have the same seek pointer
+ and that gzip will not read past the end of a compressed stream,
+ even if there is more data. We use the dup property to track extraction
+ progress and the gzip feature to just feed gzip a fd in the middle
+ of an AR file.
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#ifdef __GNUG__
+#pragma implementation "apt-pkg/extracttar.h"
+#endif
+#include <apt-pkg/extracttar.h>
+
+#include <apt-pkg/error.h>
+#include <apt-pkg/strutl.h>
+#include <apt-pkg/configuration.h>
+#include <system.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <fcntl.h>
+ /*}}}*/
+
+// The on disk header for a tar file.
+struct ExtractTar::TarHeader
+{
+ char Name[100];
+ char Mode[8];
+ char UserID[8];
+ char GroupID[8];
+ char Size[12];
+ char MTime[12];
+ char Checksum[8];
+ char LinkFlag;
+ char LinkName[100];
+ char MagicNumber[8];
+ char UserName[32];
+ char GroupName[32];
+ char Major[8];
+ char Minor[8];
+};
+
+// ExtractTar::ExtractTar - Constructor /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+ExtractTar::ExtractTar(FileFd &Fd,unsigned long Max) : File(Fd),
+ MaxInSize(Max)
+
+{
+ GZPid = -1;
+ InFd = -1;
+ Eof = false;
+}
+ /*}}}*/
+// ExtractTar::ExtractTar - Destructor /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+ExtractTar::~ExtractTar()
+{
+ Done(false);
+}
+ /*}}}*/
+// ExtractTar::Done - Reap the gzip sub process /*{{{*/
+// ---------------------------------------------------------------------
+/* If the force flag is given then error messages are suppressed - this
+ means we hit the end of the tar file but there was still gzip data. */
+bool ExtractTar::Done(bool Force)
+{
+ InFd.Close();
+ if (GZPid <= 0)
+ return true;
+
+ /* If there is a pending error then we are cleaning up gzip and are
+ not interested in it's failures */
+ if (_error->PendingError() == true)
+ Force = true;
+
+ // Make sure we clean it up!
+ kill(GZPid,SIGINT);
+ if (ExecWait(GZPid,_config->Find("dir::bin::gzip","/bin/gzip").c_str(),
+ Force) == false)
+ {
+ GZPid = -1;
+ return Force;
+ }
+
+ GZPid = -1;
+ return true;
+}
+ /*}}}*/
+// ExtractTar::StartGzip - Startup gzip /*{{{*/
+// ---------------------------------------------------------------------
+/* This creates a gzip sub process that has its input as the file itself.
+ If this tar file is embedded into something like an ar file then
+ gzip will efficiently ignore the extra bits. */
+bool ExtractTar::StartGzip()
+{
+ int Pipes[2];
+ if (pipe(Pipes) != 0)
+ return _error->Errno("pipe","Failed to create pipes");
+
+ // Fork off the process
+ GZPid = ExecFork();
+
+ // Spawn the subprocess
+ if (GZPid == 0)
+ {
+ // Setup the FDs
+ dup2(Pipes[1],STDOUT_FILENO);
+ dup2(File.Fd(),STDIN_FILENO);
+ int Fd = open("/dev/null",O_RDWR);
+ if (Fd == -1)
+ _exit(101);
+ dup2(Fd,STDERR_FILENO);
+ close(Fd);
+ SetCloseExec(STDOUT_FILENO,false);
+ SetCloseExec(STDIN_FILENO,false);
+ SetCloseExec(STDERR_FILENO,false);
+
+ const char *Args[3];
+ Args[0] = _config->Find("dir::bin::gzip","/bin/gzip").c_str();
+ Args[1] = "-d";
+ Args[2] = 0;
+ execv(Args[0],(char **)Args);
+ cerr << "Failed to exec gzip " << Args[0] << endl;
+ _exit(100);
+ }
+
+ // Fix up our FDs
+ InFd.Fd(Pipes[0]);
+ close(Pipes[1]);
+ return true;
+}
+ /*}}}*/
+// ExtractTar::Go - Perform extraction /*{{{*/
+// ---------------------------------------------------------------------
+/* This reads each 512 byte block from the archive and extracts the header
+ information into the Item structure. Then it resolves the UID/GID and
+ invokes the correct processing function. */
+bool ExtractTar::Go(pkgDirStream &Stream)
+{
+ if (StartGzip() == false)
+ return false;
+
+ // Loop over all blocks
+ string LastLongLink;
+ string LastLongName;
+ while (1)
+ {
+ bool BadRecord = false;
+ unsigned char Block[512];
+ if (InFd.Read(Block,sizeof(Block),true) == false)
+ return false;
+
+ if (InFd.Eof() == true)
+ break;
+
+ // Get the checksum
+ TarHeader *Tar = (TarHeader *)Block;
+ unsigned long CheckSum;
+ if (StrToNum(Tar->Checksum,CheckSum,sizeof(Tar->Checksum),8) == false)
+ return _error->Error("Corrupted archive");
+
+ /* Compute the checksum field. The actual checksum is blanked out
+ with spaces so it is not included in the computation */
+ unsigned long NewSum = 0;
+ memset(Tar->Checksum,' ',sizeof(Tar->Checksum));
+ for (int I = 0; I != sizeof(Block); I++)
+ NewSum += Block[I];
+
+ /* Check for a block of nulls - in this case we kill gzip, GNU tar
+ does this.. */
+ if (NewSum == ' '*sizeof(Tar->Checksum))
+ return Done(true);
+
+ if (NewSum != CheckSum)
+ return _error->Error("Tar Checksum failed, archive corrupted");
+
+ // Decode all of the fields
+ pkgDirStream::Item Itm;
+ unsigned long UID;
+ unsigned long GID;
+ if (StrToNum(Tar->Mode,Itm.Mode,sizeof(Tar->Mode),8) == false ||
+ StrToNum(Tar->UserID,UID,sizeof(Tar->UserID),8) == false ||
+ StrToNum(Tar->GroupID,GID,sizeof(Tar->GroupID),8) == false ||
+ StrToNum(Tar->Size,Itm.Size,sizeof(Tar->Size),8) == false ||
+ StrToNum(Tar->MTime,Itm.MTime,sizeof(Tar->MTime),8) == false ||
+ StrToNum(Tar->Major,Itm.Major,sizeof(Tar->Major),8) == false ||
+ StrToNum(Tar->Minor,Itm.Minor,sizeof(Tar->Minor),8) == false)
+ return _error->Error("Corrupted archive");
+
+ // Grab the filename
+ if (LastLongName.empty() == false)
+ Itm.Name = (char *)LastLongName.c_str();
+ else
+ {
+ Tar->Name[sizeof(Tar->Name)] = 0;
+ Itm.Name = Tar->Name;
+ }
+ if (Itm.Name[0] == '.' && Itm.Name[1] == '/' && Itm.Name[2] != 0)
+ Itm.Name += 2;
+
+ // Grab the link target
+ Tar->Name[sizeof(Tar->LinkName)] = 0;
+ Itm.LinkTarget = Tar->LinkName;
+
+ if (LastLongLink.empty() == false)
+ Itm.LinkTarget = (char *)LastLongLink.c_str();
+
+ // Convert the type over
+ switch (Tar->LinkFlag)
+ {
+ case NormalFile0:
+ case NormalFile:
+ Itm.Type = pkgDirStream::Item::File;
+ break;
+
+ case HardLink:
+ Itm.Type = pkgDirStream::Item::HardLink;
+ break;
+
+ case SymbolicLink:
+ Itm.Type = pkgDirStream::Item::SymbolicLink;
+ break;
+
+ case CharacterDevice:
+ Itm.Type = pkgDirStream::Item::CharDevice;
+ break;
+
+ case BlockDevice:
+ Itm.Type = pkgDirStream::Item::BlockDevice;
+ break;
+
+ case Directory:
+ Itm.Type = pkgDirStream::Item::Directory;
+ break;
+
+ case FIFO:
+ Itm.Type = pkgDirStream::Item::FIFO;
+ break;
+
+ case GNU_LongLink:
+ {
+ unsigned long Length = Itm.Size;
+ unsigned char Block[512];
+ while (Length > 0)
+ {
+ if (InFd.Read(Block,sizeof(Block),true) == false)
+ return false;
+ if (Length <= sizeof(Block))
+ {
+ LastLongLink.append(Block,Block+sizeof(Block));
+ break;
+ }
+ LastLongLink.append(Block,Block+sizeof(Block));
+ Length -= sizeof(Block);
+ }
+ continue;
+ }
+
+ case GNU_LongName:
+ {
+ unsigned long Length = Itm.Size;
+ unsigned char Block[512];
+ while (Length > 0)
+ {
+ if (InFd.Read(Block,sizeof(Block),true) == false)
+ return false;
+ if (Length < sizeof(Block))
+ {
+ LastLongName.append(Block,Block+sizeof(Block));
+ break;
+ }
+ LastLongName.append(Block,Block+sizeof(Block));
+ Length -= sizeof(Block);
+ }
+ continue;
+ }
+
+ default:
+ BadRecord = true;
+ _error->Warning("Unkown TAR header type %u, member %s",(unsigned)Tar->LinkFlag,Tar->Name);
+ break;
+ }
+
+ int Fd = -1;
+ if (BadRecord == false)
+ if (Stream.DoItem(Itm,Fd) == false)
+ return false;
+
+ // Copy the file over the FD
+ unsigned long Size = Itm.Size;
+ while (Size != 0)
+ {
+ unsigned char Junk[32*1024];
+ unsigned long Read = MIN(Size,sizeof(Junk));
+ if (InFd.Read(Junk,((Read+511)/512)*512) == false)
+ return false;
+
+ if (BadRecord == false)
+ {
+ if (Fd > 0)
+ {
+ if (write(Fd,Junk,Read) != (signed)Read)
+ return Stream.Fail(Itm,Fd);
+ }
+ else
+ {
+ /* An Fd of -2 means to send to a special processing
+ function */
+ if (Fd == -2)
+ if (Stream.Process(Itm,Junk,Read,Itm.Size - Size) == false)
+ return Stream.Fail(Itm,Fd);
+ }
+ }
+
+ Size -= Read;
+ }
+
+ // And finish up
+ if (Itm.Size != 0 && BadRecord == false)
+ if (Stream.FinishedFile(Itm,Fd) == false)
+ return false;
+
+ LastLongName.erase();
+ LastLongLink.erase();
+ }
+
+ return Done(false);
+}
+ /*}}}*/
diff --git a/apt-inst/contrib/extracttar.h b/apt-inst/contrib/extracttar.h
new file mode 100644
index 00000000..aaca987f
--- /dev/null
+++ b/apt-inst/contrib/extracttar.h
@@ -0,0 +1,54 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: extracttar.h,v 1.2 2001/02/20 07:03:17 jgg Exp $
+/* ######################################################################
+
+ Extract a Tar - Tar Extractor
+
+ The tar extractor takes an ordinary gzip compressed tar stream from
+ the given file and explodes it, passing the individual items to the
+ given Directory Stream for processing.
+
+ ##################################################################### */
+ /*}}}*/
+#ifndef PKGLIB_EXTRACTTAR_H
+#define PKGLIB_EXTRACTTAR_H
+
+#ifdef __GNUG__
+#pragma interface "apt-pkg/extracttar.h"
+#endif
+
+#include <apt-pkg/fileutl.h>
+#include <apt-pkg/dirstream.h>
+
+class ExtractTar
+{
+ protected:
+
+ struct TarHeader;
+
+ // The varios types items can be
+ enum ItemType {NormalFile0 = '\0',NormalFile = '0',HardLink = '1',
+ SymbolicLink = '2',CharacterDevice = '3',
+ BlockDevice = '4',Directory = '5',FIFO = '6',
+ GNU_LongLink = 'K',GNU_LongName = 'L'};
+
+ FileFd &File;
+ unsigned long MaxInSize;
+ int GZPid;
+ FileFd InFd;
+ bool Eof;
+
+ // Fork and reap gzip
+ bool StartGzip();
+ bool Done(bool Force);
+
+ public:
+
+ bool Go(pkgDirStream &Stream);
+
+ ExtractTar(FileFd &Fd,unsigned long Max);
+ virtual ~ExtractTar();
+};
+
+#endif
diff --git a/apt-inst/database.cc b/apt-inst/database.cc
new file mode 100644
index 00000000..beee692b
--- /dev/null
+++ b/apt-inst/database.cc
@@ -0,0 +1,30 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: database.cc,v 1.2 2001/02/20 07:03:16 jgg Exp $
+/* ######################################################################
+
+ Data Base Abstraction
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#ifdef __GNUG__
+#pragma implementation "apt-pkg/database.h"
+#endif
+
+#include <apt-pkg/database.h>
+ /*}}}*/
+
+// DataBase::GetMetaTmp - Get the temp dir /*{{{*/
+// ---------------------------------------------------------------------
+/* This re-initializes the meta temporary directory if it hasn't yet
+ been inited for this cycle. The flag is the emptyness of MetaDir */
+bool pkgDataBase::GetMetaTmp(string &Dir)
+{
+ if (MetaDir.empty() == true)
+ if (InitMetaTmp(MetaDir) == false)
+ return false;
+ Dir = MetaDir;
+ return true;
+}
+ /*}}}*/
diff --git a/apt-inst/database.h b/apt-inst/database.h
new file mode 100644
index 00000000..0972d40c
--- /dev/null
+++ b/apt-inst/database.h
@@ -0,0 +1,56 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: database.h,v 1.2 2001/02/20 07:03:16 jgg Exp $
+/* ######################################################################
+
+ Data Base Abstraction
+
+ This class provides a simple interface to an abstract notion of a
+ database directory for storing state information about the system.
+
+ The 'Meta' information for a package is the control information and
+ setup scripts stored inside the archive. GetMetaTmp returns the name of
+ a directory that is used to store named files containing the control
+ information.
+
+ The File Listing is the database of installed files. It is loaded
+ into the memory/persistent cache structure by the ReadFileList method.
+
+ ##################################################################### */
+ /*}}}*/
+#ifndef PKGLIB_DATABASE_H
+#define PKGLIB_DATABASE_H
+
+#ifdef __GNUG__
+#pragma interface "apt-pkg/database.h"
+#endif
+
+#include <apt-pkg/filelist.h>
+#include <apt-pkg/pkgcachegen.h>
+
+class pkgDataBase
+{
+ protected:
+
+ pkgCacheGenerator *Cache;
+ pkgFLCache *FList;
+ string MetaDir;
+ virtual bool InitMetaTmp(string &Dir) = 0;
+
+ public:
+
+ // Some manipulators for the cache and generator
+ inline pkgCache &GetCache() {return Cache->GetCache();};
+ inline pkgFLCache &GetFLCache() {return *FList;};
+ inline pkgCacheGenerator &GetGenerator() {return *Cache;};
+
+ bool GetMetaTmp(string &Dir);
+ virtual bool ReadyFileList(OpProgress &Progress) = 0;
+ virtual bool ReadyPkgCache(OpProgress &Progress) = 0;
+ virtual bool LoadChanges() = 0;
+
+ pkgDataBase() : Cache(0), FList(0) {};
+ virtual ~pkgDataBase() {delete Cache; delete FList;};
+};
+
+#endif
diff --git a/apt-inst/deb/debfile.cc b/apt-inst/deb/debfile.cc
new file mode 100644
index 00000000..c93ba88a
--- /dev/null
+++ b/apt-inst/deb/debfile.cc
@@ -0,0 +1,262 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: debfile.cc,v 1.2 2001/02/20 07:03:17 jgg Exp $
+/* ######################################################################
+
+ Debian Archive File (.deb)
+
+ .DEB archives are AR files containing two tars and an empty marker
+ member called 'debian-binary'. The two tars contain the meta data and
+ the actual archive contents. Thus this class is a very simple wrapper
+ around ar/tar to simply extract the right tar files.
+
+ It also uses the deb package list parser to parse the control file
+ into the cache.
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#ifdef __GNUG__
+#pragma implementation "apt-pkg/debfile.h"
+#endif
+
+#include <apt-pkg/debfile.h>
+#include <apt-pkg/extracttar.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/deblistparser.h>
+
+#include <sys/stat.h>
+#include <unistd.h>
+ /*}}}*/
+
+// DebFile::debDebFile - Constructor /*{{{*/
+// ---------------------------------------------------------------------
+/* Open the AR file and check for consistency */
+debDebFile::debDebFile(FileFd &File) : File(File), AR(File)
+{
+ if (_error->PendingError() == true)
+ return;
+
+ // Check the members for validity
+ if (CheckMember("debian-binary") == false ||
+ CheckMember("control.tar.gz") == false ||
+ CheckMember("data.tar.gz") == false)
+ return;
+}
+ /*}}}*/
+// DebFile::CheckMember - Check if a named member is in the archive /*{{{*/
+// ---------------------------------------------------------------------
+/* This is used to check for a correct deb and to give nicer error messages
+ for people playing around. */
+bool debDebFile::CheckMember(const char *Name)
+{
+ if (AR.FindMember(Name) == 0)
+ return _error->Error("This is not a valid DEB archive, missing '%s' member",Name);
+ return true;
+}
+ /*}}}*/
+// DebFile::GotoMember - Jump to a Member /*{{{*/
+// ---------------------------------------------------------------------
+/* Jump in the file to the start of a named member and return the information
+ about that member. The caller can then read from the file up to the
+ returned size. Note, since this relies on the file position this is
+ a destructive operation, it also changes the last returned Member
+ structure - so don't nest them! */
+const ARArchive::Member *debDebFile::GotoMember(const char *Name)
+{
+ // Get the archive member and positition the file
+ const ARArchive::Member *Member = AR.FindMember(Name);
+ if (Member == 0)
+ {
+ _error->Error("Internal Error, could not locate member %s",Name);
+ return 0;
+ }
+ if (File.Seek(Member->Start) == false)
+ return 0;
+
+ return Member;
+}
+ /*}}}*/
+// DebFile::ExtractControl - Extract Control information /*{{{*/
+// ---------------------------------------------------------------------
+/* Extract the control information into the Database's temporary
+ directory. */
+bool debDebFile::ExtractControl(pkgDataBase &DB)
+{
+ // Get the archive member and positition the file
+ const ARArchive::Member *Member = GotoMember("control.tar.gz");
+ if (Member == 0)
+ return false;
+
+ // Prepare Tar
+ ControlExtract Extract;
+ ExtractTar Tar(File,Member->Size);
+ if (_error->PendingError() == true)
+ return false;
+
+ // Get into the temporary directory
+ string Cwd = SafeGetCWD();
+ string Tmp;
+ if (DB.GetMetaTmp(Tmp) == false)
+ return false;
+ if (chdir(Tmp.c_str()) != 0)
+ return _error->Errno("chdir","Couldn't change to %s",Tmp.c_str());
+
+ // Do extraction
+ if (Tar.Go(Extract) == false)
+ return false;
+
+ // Switch out of the tmp directory.
+ if (chdir(Cwd.c_str()) != 0)
+ chdir("/");
+
+ return true;
+}
+ /*}}}*/
+// DebFile::ExtractArchive - Extract the archive data itself /*{{{*/
+// ---------------------------------------------------------------------
+/* Simple wrapper around tar.. */
+bool debDebFile::ExtractArchive(pkgDirStream &Stream)
+{
+ // Get the archive member and positition the file
+ const ARArchive::Member *Member = AR.FindMember("data.tar.gz");
+ if (Member == 0)
+ return _error->Error("Internal Error, could not locate member");
+ if (File.Seek(Member->Start) == false)
+ return false;
+
+ // Prepare Tar
+ ExtractTar Tar(File,Member->Size);
+ if (_error->PendingError() == true)
+ return false;
+ return Tar.Go(Stream);
+}
+ /*}}}*/
+// DebFile::MergeControl - Merge the control information /*{{{*/
+// ---------------------------------------------------------------------
+/* This reads the extracted control file into the cache and returns the
+ version that was parsed. All this really does is select the correct
+ parser and correct file to parse. */
+pkgCache::VerIterator debDebFile::MergeControl(pkgDataBase &DB)
+{
+ // Open the control file
+ string Tmp;
+ if (DB.GetMetaTmp(Tmp) == false)
+ return pkgCache::VerIterator(DB.GetCache());
+ FileFd Fd(Tmp + "control",FileFd::ReadOnly);
+ if (_error->PendingError() == true)
+ return pkgCache::VerIterator(DB.GetCache());
+
+ // Parse it
+ debListParser Parse(&Fd);
+ pkgCache::VerIterator Ver(DB.GetCache());
+ if (DB.GetGenerator().MergeList(Parse,&Ver) == false)
+ return pkgCache::VerIterator(DB.GetCache());
+
+ if (Ver.end() == true)
+ _error->Error("Failed to locate a valid control file");
+ return Ver;
+}
+ /*}}}*/
+
+// DebFile::ControlExtract::DoItem - Control Tar Extraction /*{{{*/
+// ---------------------------------------------------------------------
+/* This directory stream handler for the control tar handles extracting
+ it into the temporary meta directory. It only extracts files, it does
+ not create directories, links or anything else. */
+bool debDebFile::ControlExtract::DoItem(Item &Itm,int &Fd)
+{
+ if (Itm.Type != Item::File)
+ return true;
+
+ /* Cleanse the file name, prevent people from trying to unpack into
+ absolute paths, .., etc */
+ for (char *I = Itm.Name; *I != 0; I++)
+ if (*I == '/')
+ *I = '_';
+
+ /* Force the ownership to be root and ensure correct permissions,
+ go-w, the rest are left untouched */
+ Itm.UID = 0;
+ Itm.GID = 0;
+ Itm.Mode &= ~(S_IWGRP | S_IWOTH);
+
+ return pkgDirStream::DoItem(Itm,Fd);
+}
+ /*}}}*/
+
+// MemControlExtract::DoItem - Check if it is the control file /*{{{*/
+// ---------------------------------------------------------------------
+/* This sets up to extract the control block member file into a memory
+ block of just the right size. All other files go into the bit bucket. */
+bool debDebFile::MemControlExtract::DoItem(Item &Itm,int &Fd)
+{
+ // At the control file, allocate buffer memory.
+ if (Member == Itm.Name)
+ {
+ delete [] Control;
+ Control = new char[Itm.Size+2];
+ IsControl = true;
+ Fd = -2; // Signal to pass to Process
+ Length = Itm.Size;
+ }
+ else
+ IsControl = false;
+
+ return true;
+}
+ /*}}}*/
+// MemControlExtract::Process - Process extracting the control file /*{{{*/
+// ---------------------------------------------------------------------
+/* Just memcopy the block from the tar extractor and put it in the right
+ place in the pre-allocated memory block. */
+bool debDebFile::MemControlExtract::Process(Item &Itm,const unsigned char *Data,
+ unsigned long Size,unsigned long Pos)
+{
+ memcpy(Control + Pos, Data,Size);
+ return true;
+}
+ /*}}}*/
+// MemControlExtract::Read - Read the control information from the deb /*{{{*/
+// ---------------------------------------------------------------------
+/* This uses the internal tar extractor to fetch the control file, and then
+ it parses it into a tag section parser. */
+bool debDebFile::MemControlExtract::Read(debDebFile &Deb)
+{
+ // Get the archive member and positition the file
+ const ARArchive::Member *Member = Deb.GotoMember("control.tar.gz");
+ if (Member == 0)
+ return false;
+
+ // Extract it.
+ ExtractTar Tar(Deb.GetFile(),Member->Size);
+ if (Tar.Go(*this) == false)
+ return false;
+
+ if (Control == 0)
+ return true;
+
+ Control[Length] = '\n';
+ Control[Length+1] = '\n';
+ if (Section.Scan(Control,Length+2) == false)
+ return _error->Error("Unparsible control file");
+ return true;
+}
+ /*}}}*/
+// MemControlExtract::TakeControl - Parse a memory block /*{{{*/
+// ---------------------------------------------------------------------
+/* The given memory block is loaded into the parser and parsed as a control
+ record. */
+bool debDebFile::MemControlExtract::TakeControl(const void *Data,unsigned long Size)
+{
+ delete [] Control;
+ Control = new char[Size+2];
+ Length = Size;
+ memcpy(Control,Data,Size);
+
+ Control[Length] = '\n';
+ Control[Length+1] = '\n';
+ return Section.Scan(Control,Length+2);
+}
+ /*}}}*/
+
diff --git a/apt-inst/deb/debfile.h b/apt-inst/deb/debfile.h
new file mode 100644
index 00000000..d89b8526
--- /dev/null
+++ b/apt-inst/deb/debfile.h
@@ -0,0 +1,92 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: debfile.h,v 1.2 2001/02/20 07:03:17 jgg Exp $
+/* ######################################################################
+
+ Debian Archive File (.deb)
+
+ This Class handles all the operations performed directly on .deb
+ files. It makes use of the AR and TAR classes to give the necessary
+ external interface.
+
+ There are only two things that can be done with a raw package,
+ extract it's control information and extract the contents itself.
+
+ This should probably subclass an as-yet unwritten super class to
+ produce a generic archive mechanism.
+
+ The memory control file extractor is useful to extract a single file
+ into memory from the control.tar.gz
+
+ ##################################################################### */
+ /*}}}*/
+#ifndef PKGLIB_DEBFILE_H
+#define PKGLIB_DEBFILE_H
+
+#ifdef __GNUG__
+#pragma interface "apt-pkg/debfile.h"
+#endif
+
+#include <apt-pkg/arfile.h>
+#include <apt-pkg/database.h>
+#include <apt-pkg/dirstream.h>
+#include <apt-pkg/tagfile.h>
+
+class debDebFile
+{
+ protected:
+
+ FileFd &File;
+ ARArchive AR;
+
+ bool CheckMember(const char *Name);
+
+ public:
+
+ class ControlExtract;
+ class MemControlExtract;
+
+ bool ExtractControl(pkgDataBase &DB);
+ bool ExtractArchive(pkgDirStream &Stream);
+ pkgCache::VerIterator MergeControl(pkgDataBase &DB);
+ const ARArchive::Member *GotoMember(const char *Name);
+ inline FileFd &GetFile() {return File;};
+
+ debDebFile(FileFd &File);
+};
+
+class debDebFile::ControlExtract : public pkgDirStream
+{
+ public:
+
+ virtual bool DoItem(Item &Itm,int &Fd);
+};
+
+class debDebFile::MemControlExtract : public pkgDirStream
+{
+ bool IsControl;
+
+ public:
+
+ char *Control;
+ pkgTagSection Section;
+ unsigned long Length;
+ string Member;
+
+ // Members from DirStream
+ virtual bool DoItem(Item &Itm,int &Fd);
+ virtual bool Process(Item &Itm,const unsigned char *Data,
+ unsigned long Size,unsigned long Pos);
+
+
+ // Helpers
+ bool Read(debDebFile &Deb);
+ bool TakeControl(const void *Data,unsigned long Size);
+
+ MemControlExtract() : IsControl(false), Control(0), Length(0), Member("control") {};
+ MemControlExtract(string Member) : IsControl(false), Control(0), Length(0), Member(Member) {};
+ ~MemControlExtract() {delete [] Control;};
+};
+ /*}}}*/
+
+#endif
diff --git a/apt-inst/deb/dpkgdb.cc b/apt-inst/deb/dpkgdb.cc
new file mode 100644
index 00000000..85fec1cc
--- /dev/null
+++ b/apt-inst/deb/dpkgdb.cc
@@ -0,0 +1,490 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: dpkgdb.cc,v 1.2 2001/02/20 07:03:17 jgg Exp $
+/* ######################################################################
+
+ DPKGv1 Database Implemenation
+
+ This class provides parsers and other implementations for the DPKGv1
+ database. It reads the diversion file, the list files and the status
+ file to build both the list of currently installed files and the
+ currently installed package list.
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#ifdef __GNUG__
+#pragma implementation "apt-pkg/dpkgdb.h"
+#endif
+
+#include <apt-pkg/dpkgdb.h>
+#include <apt-pkg/configuration.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/progress.h>
+#include <apt-pkg/tagfile.h>
+#include <apt-pkg/strutl.h>
+
+#include <stdio.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <unistd.h>
+ /*}}}*/
+
+// EraseDir - Erase A Directory /*{{{*/
+// ---------------------------------------------------------------------
+/* This is necessary to create a new empty sub directory. The caller should
+ invoke mkdir after this with the proper permissions and check for
+ error. Maybe stick this in fileutils */
+static bool EraseDir(const char *Dir)
+{
+ // First we try a simple RM
+ if (rmdir(Dir) == 0 ||
+ errno == ENOENT)
+ return true;
+
+ // A file? Easy enough..
+ if (errno == ENOTDIR)
+ {
+ if (unlink(Dir) != 0)
+ return _error->Errno("unlink","Failed to remove %s",Dir);
+ return true;
+ }
+
+ // Should not happen
+ if (errno != ENOTEMPTY)
+ return _error->Errno("rmdir","Failed to remove %s",Dir);
+
+ // Purge it using rm
+ int Pid = ExecFork();
+
+ // Spawn the subprocess
+ if (Pid == 0)
+ {
+ execlp(_config->Find("Dir::Bin::rm","/bin/rm").c_str(),
+ "rm","-rf","--",Dir,0);
+ _exit(100);
+ }
+ return ExecWait(Pid,_config->Find("dir::bin::rm","/bin/rm").c_str());
+}
+ /*}}}*/
+// DpkgDB::debDpkgDB - Constructor /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+debDpkgDB::debDpkgDB() : CacheMap(0), FileMap(0)
+{
+ AdminDir = flNotFile(_config->Find("Dir::State::status"));
+ DiverInode = 0;
+ DiverTime = 0;
+}
+ /*}}}*/
+// DpkgDB::~debDpkgDB - Destructor /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+debDpkgDB::~debDpkgDB()
+{
+ delete Cache;
+ Cache = 0;
+ delete CacheMap;
+ CacheMap = 0;
+
+ delete FList;
+ FList = 0;
+ delete FileMap;
+ FileMap = 0;
+}
+ /*}}}*/
+// DpkgDB::InitMetaTmp - Get the temp dir for meta information /*{{{*/
+// ---------------------------------------------------------------------
+/* This creats+empties the meta temporary directory /var/lib/dpkg/tmp.ci
+ Only one package at a time can be using the returned meta directory. */
+bool debDpkgDB::InitMetaTmp(string &Dir)
+{
+ string Tmp = AdminDir + "tmp.ci/";
+ if (EraseDir(Tmp.c_str()) == false)
+ return _error->Error("Unable to create %s",Tmp.c_str());
+ if (mkdir(Tmp.c_str(),0755) != 0)
+ return _error->Errno("mkdir","Unable to create %s",Tmp.c_str());
+
+ // Verify it is on the same filesystem as the main info directory
+ dev_t Dev;
+ struct stat St;
+ if (stat((AdminDir + "info").c_str(),&St) != 0)
+ return _error->Errno("stat","Failed to stat %sinfo",AdminDir.c_str());
+ Dev = St.st_dev;
+ if (stat(Tmp.c_str(),&St) != 0)
+ return _error->Errno("stat","Failed to stat %s",Tmp.c_str());
+ if (Dev != St.st_dev)
+ return _error->Error("The info and temp directories need to be on the same filesystem");
+
+ // Done
+ Dir = Tmp;
+ return true;
+}
+ /*}}}*/
+// DpkgDB::ReadyPkgCache - Prepare the cache with the current status /*{{{*/
+// ---------------------------------------------------------------------
+/* This reads in the status file into an empty cache. This really needs
+ to be somehow unified with the high level APT notion of the Database
+ directory, but there is no clear way on how to do that yet. */
+bool debDpkgDB::ReadyPkgCache(OpProgress &Progress)
+{
+ if (Cache != 0)
+ {
+ Progress.OverallProgress(1,1,1,"Reading Package Lists");
+ return true;
+ }
+
+ if (CacheMap != 0)
+ {
+ delete CacheMap;
+ CacheMap = 0;
+ }
+
+ if (pkgMakeOnlyStatusCache(Progress,&CacheMap) == false)
+ return false;
+ Cache->DropProgress();
+
+ return true;
+}
+ /*}}}*/
+// DpkgDB::ReadFList - Read the File Listings in /*{{{*/
+// ---------------------------------------------------------------------
+/* This reads the file listing in from the state directory. This is a
+ performance critical routine, as it needs to parse about 50k lines of
+ text spread over a hundred or more files. For an initial cold start
+ most of the time is spent in reading file inodes and so on, not
+ actually parsing. */
+bool debDpkgDB::ReadFList(OpProgress &Progress)
+{
+ // Count the number of packages we need to read information for
+ unsigned long Total = 0;
+ pkgCache &Cache = this->Cache->GetCache();
+ for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; I++)
+ {
+ // Only not installed packages have no files.
+ if (I->CurrentState == pkgCache::State::NotInstalled)
+ continue;
+ Total++;
+ }
+
+ /* Switch into the admin dir, this prevents useless lookups for the
+ path components */
+ string Cwd = SafeGetCWD();
+ if (chdir((AdminDir + "info/").c_str()) != 0)
+ return _error->Errno("chdir","Failed to change to the admin dir %sinfo",AdminDir.c_str());
+
+ // Allocate a buffer. Anything larger than this buffer will be mmaped
+ unsigned long BufSize = 32*1024;
+ char *Buffer = new char[BufSize];
+
+ // Begin Loading them
+ unsigned long Count = 0;
+ char Name[300];
+ for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; I++)
+ {
+ /* Only not installed packages have no files. ConfFile packages have
+ file lists but we don't want to read them in */
+ if (I->CurrentState == pkgCache::State::NotInstalled ||
+ I->CurrentState == pkgCache::State::ConfigFiles)
+ continue;
+
+ // Fetch a package handle to associate with the file
+ pkgFLCache::PkgIterator FlPkg = FList->GetPkg(I.Name(),0,true);
+ if (FlPkg.end() == true)
+ {
+ _error->Error("Internal Error getting a Package Name");
+ break;
+ }
+
+ Progress.OverallProgress(Count,Total,1,"Reading File Listing");
+
+ // Open the list file
+ snprintf(Name,sizeof(Name),"%s.list",I.Name());
+ int Fd = open(Name,O_RDONLY);
+
+ /* Okay this is very strange and bad.. Best thing is to bail and
+ instruct the user to look into it. */
+ struct stat Stat;
+ if (Fd == -1 || fstat(Fd,&Stat) != 0)
+ {
+ _error->Errno("open","Failed to open the list file '%sinfo/%s'. If you "
+ "cannot restore this file then make it empty "
+ "and immediately re-install the same version of the package!",
+ AdminDir.c_str(),Name);
+ break;
+ }
+
+ // Set File to be a memory buffer containing the whole file
+ char *File;
+ if ((unsigned)Stat.st_size < BufSize)
+ {
+ if (read(Fd,Buffer,Stat.st_size) != Stat.st_size)
+ {
+ _error->Errno("read","Failed reading the list file %sinfo/%s",
+ AdminDir.c_str(),Name);
+ close(Fd);
+ break;
+ }
+ File = Buffer;
+ }
+ else
+ {
+ // Use mmap
+ File = (char *)mmap(0,Stat.st_size,PROT_READ,MAP_PRIVATE,Fd,0);
+ if (File == (char *)(-1))
+ {
+ _error->Errno("mmap","Failed reading the list file %sinfo/%s",
+ AdminDir.c_str(),Name);
+ close(Fd);
+ break;
+ }
+ }
+
+ // Parse it
+ const char *Start = File;
+ const char *End = File;
+ const char *Finish = File + Stat.st_size;
+ for (; End < Finish; End++)
+ {
+ // Not an end of line
+ if (*End != '\n' && End + 1 < Finish)
+ continue;
+
+ // Skip blank lines
+ if (End - Start > 1)
+ {
+ pkgFLCache::NodeIterator Node = FList->GetNode(Start,End,
+ FlPkg.Offset(),true,false);
+ if (Node.end() == true)
+ {
+ _error->Error("Internal Error getting a Node");
+ break;
+ }
+ }
+
+ // Skip past the end of line
+ for (; *End == '\n' && End < Finish; End++);
+ Start = End;
+ }
+
+ close(Fd);
+ if ((unsigned)Stat.st_size >= BufSize)
+ munmap((caddr_t)File,Stat.st_size);
+
+ // Failed
+ if (End < Finish)
+ break;
+
+ Count++;
+ }
+
+ delete [] Buffer;
+ if (chdir(Cwd.c_str()) != 0)
+ chdir("/");
+
+ return !_error->PendingError();
+}
+ /*}}}*/
+// DpkgDB::ReadDiversions - Load the diversions file /*{{{*/
+// ---------------------------------------------------------------------
+/* Read the diversion file in from disk. This is usually invoked by
+ LoadChanges before performing an operation that uses the FLCache. */
+bool debDpkgDB::ReadDiversions()
+{
+ struct stat Stat;
+ if (stat((AdminDir + "diversions").c_str(),&Stat) != 0)
+ return true;
+
+ if (_error->PendingError() == true)
+ return false;
+
+ FILE *Fd = fopen((AdminDir + "diversions").c_str(),"r");
+ if (Fd == 0)
+ return _error->Errno("fopen","Failed to open the diversions file %sdiversions",AdminDir.c_str());
+
+ FList->BeginDiverLoad();
+ while (1)
+ {
+ char From[300];
+ char To[300];
+ char Package[100];
+
+ // Read the three lines in
+ if (fgets(From,sizeof(From),Fd) == 0)
+ break;
+ if (fgets(To,sizeof(To),Fd) == 0 ||
+ fgets(Package,sizeof(Package),Fd) == 0)
+ {
+ _error->Error("The diversion file is corrupted");
+ break;
+ }
+
+ // Strip the \ns
+ unsigned long Len = strlen(From);
+ if (Len < 2 || From[Len-1] != '\n')
+ _error->Error("Invalid line in the diversion file: %s",From);
+ else
+ From[Len-1] = 0;
+ Len = strlen(To);
+ if (Len < 2 || To[Len-1] != '\n')
+ _error->Error("Invalid line in the diversion file: %s",To);
+ else
+ To[Len-1] = 0;
+ Len = strlen(Package);
+ if (Len < 2 || Package[Len-1] != '\n')
+ _error->Error("Invalid line in the diversion file: %s",Package);
+ else
+ Package[Len-1] = 0;
+
+ // Make sure the lines were parsed OK
+ if (_error->PendingError() == true)
+ break;
+
+ // Fetch a package
+ if (strcmp(Package,":") == 0)
+ Package[0] = 0;
+ pkgFLCache::PkgIterator FlPkg = FList->GetPkg(Package,0,true);
+ if (FlPkg.end() == true)
+ {
+ _error->Error("Internal Error getting a Package Name");
+ break;
+ }
+
+ // Install the diversion
+ if (FList->AddDiversion(FlPkg,From,To) == false)
+ {
+ _error->Error("Internal Error adding a diversion");
+ break;
+ }
+ }
+ if (_error->PendingError() == false)
+ FList->FinishDiverLoad();
+
+ DiverInode = Stat.st_ino;
+ DiverTime = Stat.st_mtime;
+
+ fclose(Fd);
+ return !_error->PendingError();
+}
+ /*}}}*/
+// DpkgDB::ReadFileList - Read the file listing /*{{{*/
+// ---------------------------------------------------------------------
+/* Read in the file listing. The file listing is created from three
+ sources, *.list, Conffile sections and the Diversion table. */
+bool debDpkgDB::ReadyFileList(OpProgress &Progress)
+{
+ if (Cache == 0)
+ return _error->Error("The pkg cache must be initialize first");
+ if (FList != 0)
+ {
+ Progress.OverallProgress(1,1,1,"Reading File List");
+ return true;
+ }
+
+ // Create the cache and read in the file listing
+ FileMap = new DynamicMMap(MMap::Public);
+ FList = new pkgFLCache(*FileMap);
+ if (_error->PendingError() == true ||
+ ReadFList(Progress) == false ||
+ ReadConfFiles() == false ||
+ ReadDiversions() == false)
+ {
+ delete FList;
+ delete FileMap;
+ FileMap = 0;
+ FList = 0;
+ return false;
+ }
+
+ cout << "Node: " << FList->HeaderP->NodeCount << ',' << FList->HeaderP->UniqNodes << endl;
+ cout << "Dir: " << FList->HeaderP->DirCount << endl;
+ cout << "Package: " << FList->HeaderP->PackageCount << endl;
+ cout << "HashSize: " << FList->HeaderP->HashSize << endl;
+ cout << "Size: " << FileMap->Size() << endl;
+ cout << endl;
+
+ return true;
+}
+ /*}}}*/
+// DpkgDB::ReadConfFiles - Read the conf file sections from the s-file /*{{{*/
+// ---------------------------------------------------------------------
+/* Reading the conf files is done by reparsing the status file. This is
+ actually rather fast so it is no big deal. */
+bool debDpkgDB::ReadConfFiles()
+{
+ FileFd File(_config->FindFile("Dir::State::status"),FileFd::ReadOnly);
+ pkgTagFile Tags(&File);
+ if (_error->PendingError() == true)
+ return false;
+
+ pkgTagSection Section;
+ while (1)
+ {
+ // Skip to the next section
+ unsigned long Offset = Tags.Offset();
+ if (Tags.Step(Section) == false)
+ break;
+
+ // Parse the line
+ const char *Start;
+ const char *Stop;
+ if (Section.Find("Conffiles",Start,Stop) == false)
+ continue;
+
+ const char *PkgStart;
+ const char *PkgEnd;
+ if (Section.Find("Package",PkgStart,PkgEnd) == false)
+ return _error->Error("Failed to find a Package: Header, offset %lu",Offset);
+
+ // Snag a package record for it
+ pkgFLCache::PkgIterator FlPkg = FList->GetPkg(PkgStart,PkgEnd,true);
+ if (FlPkg.end() == true)
+ return _error->Error("Internal Error getting a Package Name");
+
+ // Parse the conf file lines
+ while (1)
+ {
+ for (; isspace(*Start) != 0 && Start < Stop; Start++);
+ if (Start == Stop)
+ break;
+
+ // Split it into words
+ const char *End = Start;
+ for (; isspace(*End) == 0 && End < Stop; End++);
+ const char *StartMd5 = End;
+ for (; isspace(*StartMd5) != 0 && StartMd5 < Stop; StartMd5++);
+ const char *EndMd5 = StartMd5;
+ for (; isspace(*EndMd5) == 0 && EndMd5 < Stop; EndMd5++);
+ if (StartMd5 == EndMd5 || Start == End)
+ return _error->Error("Bad ConfFile section in the status file. Offset %lu",Offset);
+
+ // Insert a new entry
+ unsigned char MD5[16];
+ if (Hex2Num(StartMd5,EndMd5,MD5,16) == false)
+ return _error->Error("Error parsing MD5. Offset %lu",Offset);
+
+ if (FList->AddConfFile(Start,End,FlPkg,MD5) == false)
+ return false;
+ Start = EndMd5;
+ }
+ }
+
+ return true;
+}
+ /*}}}*/
+// DpkgDB::LoadChanges - Read in any changed state files /*{{{*/
+// ---------------------------------------------------------------------
+/* The only file in the dpkg system that can change while packages are
+ unpacking is the diversions file. */
+bool debDpkgDB::LoadChanges()
+{
+ struct stat Stat;
+ if (stat((AdminDir + "diversions").c_str(),&Stat) != 0)
+ return true;
+ if (DiverInode == Stat.st_ino && DiverTime == Stat.st_mtime)
+ return true;
+ return ReadDiversions();
+}
+ /*}}}*/
diff --git a/apt-inst/deb/dpkgdb.h b/apt-inst/deb/dpkgdb.h
new file mode 100644
index 00000000..ddbb6d6f
--- /dev/null
+++ b/apt-inst/deb/dpkgdb.h
@@ -0,0 +1,53 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: dpkgdb.h,v 1.2 2001/02/20 07:03:17 jgg Exp $
+/* ######################################################################
+
+ DPKGv1 Data Base Implemenation
+
+ The DPKGv1 database is typically stored in /var/lib/dpkg/. For
+ DPKGv1 the 'meta' information is the contents of the .deb control.tar.gz
+ member prepended by the package name. The meta information is unpacked
+ in its temporary directory and then migrated into the main list dir
+ at a checkpoint.
+
+ Journaling is providing by syncronized file writes to the updates sub
+ directory.
+
+ ##################################################################### */
+ /*}}}*/
+#ifndef PKGLIB_DPKGDB_H
+#define PKGLIB_DPKGDB_H
+
+#ifdef __GNUG__
+#pragma interface "apt-pkg/dpkgdb.h"
+#endif
+
+#include <apt-pkg/database.h>
+
+class debDpkgDB : public pkgDataBase
+{
+ protected:
+
+ string AdminDir;
+ DynamicMMap *CacheMap;
+ DynamicMMap *FileMap;
+ unsigned long DiverInode;
+ signed long DiverTime;
+
+ virtual bool InitMetaTmp(string &Dir);
+ bool ReadFList(OpProgress &Progress);
+ bool ReadDiversions();
+ bool ReadConfFiles();
+
+ public:
+
+ virtual bool ReadyFileList(OpProgress &Progress);
+ virtual bool ReadyPkgCache(OpProgress &Progress);
+ virtual bool LoadChanges();
+
+ debDpkgDB();
+ virtual ~debDpkgDB();
+};
+
+#endif
diff --git a/apt-inst/dirstream.cc b/apt-inst/dirstream.cc
new file mode 100644
index 00000000..41dbf440
--- /dev/null
+++ b/apt-inst/dirstream.cc
@@ -0,0 +1,103 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: dirstream.cc,v 1.2 2001/02/20 07:03:16 jgg Exp $
+/* ######################################################################
+
+ Directory Stream
+
+ This class provides a simple basic extractor that can be used for
+ a number of purposes.
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#ifdef __GNUG__
+#pragma implementation "apt-pkg/dirstream.h"
+#endif
+
+#include <apt-pkg/dirstream.h>
+#include <apt-pkg/error.h>
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <utime.h>
+#include <unistd.h>
+ /*}}}*/
+
+// DirStream::DoItem - Process an item /*{{{*/
+// ---------------------------------------------------------------------
+/* This is a very simple extractor, it does not deal with things like
+ overwriting directories with files and so on. */
+bool pkgDirStream::DoItem(Item &Itm,int &Fd)
+{
+ switch (Itm.Type)
+ {
+ case Item::File:
+ {
+ /* Open the output file, NDELAY is used to prevent this from
+ blowing up on device special files.. */
+ int iFd = open(Itm.Name,O_NDELAY|O_WRONLY|O_CREAT|O_TRUNC|O_APPEND,
+ Itm.Mode);
+ if (iFd < 0)
+ return _error->Errno("open","Failed write file %s",
+ Itm.Name);
+
+ // fchmod deals with umask and fchown sets the ownership
+ if (fchmod(iFd,Itm.Mode) != 0)
+ return _error->Errno("fchmod","Failed write file %s",
+ Itm.Name);
+ if (fchown(iFd,Itm.UID,Itm.GID) != 0 && errno != EPERM)
+ return _error->Errno("fchown","Failed write file %s",
+ Itm.Name);
+ Fd = iFd;
+ return true;
+ }
+
+ case Item::HardLink:
+ case Item::SymbolicLink:
+ case Item::CharDevice:
+ case Item::BlockDevice:
+ case Item::Directory:
+ case Item::FIFO:
+ break;
+ }
+
+ return true;
+}
+ /*}}}*/
+// DirStream::FinishedFile - Finished processing a file /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool pkgDirStream::FinishedFile(Item &Itm,int Fd)
+{
+ if (Fd < 0)
+ return true;
+
+ if (close(Fd) != 0)
+ return _error->Errno("close","Failed to close file %s",Itm.Name);
+
+ /* Set the modification times. The only way it can fail is if someone
+ has futzed with our file, which is intolerable :> */
+ struct utimbuf Time;
+ Time.actime = Itm.MTime;
+ Time.modtime = Itm.MTime;
+ if (utime(Itm.Name,&Time) != 0)
+ _error->Errno("utime","Failed to close file %s",Itm.Name);
+
+ return true;
+}
+ /*}}}*/
+// DirStream::Fail - Failed processing a file /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool pkgDirStream::Fail(Item &Itm,int Fd)
+{
+ if (Fd < 0)
+ return true;
+
+ close(Fd);
+ return false;
+}
+ /*}}}*/
diff --git a/apt-inst/dirstream.h b/apt-inst/dirstream.h
new file mode 100644
index 00000000..dfb480bd
--- /dev/null
+++ b/apt-inst/dirstream.h
@@ -0,0 +1,61 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: dirstream.h,v 1.2 2001/02/20 07:03:16 jgg Exp $
+/* ######################################################################
+
+ Directory Stream
+
+ When unpacking the contents of the archive are passed into a directory
+ stream class for analysis and processing. The class controls all aspects
+ of actually writing the directory stream from disk. The low level
+ archive handlers are only responsible for decoding the archive format
+ and sending events (via method calls) to the specified directory
+ stream.
+
+ When unpacking a real file the archive handler is passed back a file
+ handle to write the data to, this is to support strange
+ archives+unpacking methods. If that fd is -1 then the file data is
+ simply ignored.
+
+ The provided defaults do the 'Right Thing' for a normal unpacking
+ process (ie 'tar')
+
+ ##################################################################### */
+ /*}}}*/
+#ifndef PKGLIB_DIRSTREAM_H
+#define PKGLIB_DIRSTREAM_H
+
+#ifdef __GNUG__
+#pragma interface "apt-pkg/dirstream.h"
+#endif
+
+class pkgDirStream
+{
+ public:
+
+ // All possible information about a component
+ struct Item
+ {
+ enum Type_t {File, HardLink, SymbolicLink, CharDevice, BlockDevice,
+ Directory, FIFO} Type;
+ char *Name;
+ char *LinkTarget;
+ unsigned long Mode;
+ unsigned long UID;
+ unsigned long GID;
+ unsigned long Size;
+ unsigned long MTime;
+ unsigned long Major;
+ unsigned long Minor;
+ };
+
+ virtual bool DoItem(Item &Itm,int &Fd);
+ virtual bool Fail(Item &Itm,int Fd);
+ virtual bool FinishedFile(Item &Itm,int Fd);
+ virtual bool Process(Item &Itm,const unsigned char *Data,
+ unsigned long Size,unsigned long Pos) {return true;};
+
+ virtual ~pkgDirStream() {};
+};
+
+#endif
diff --git a/apt-inst/dpkg-diffs.txt b/apt-inst/dpkg-diffs.txt
new file mode 100644
index 00000000..d161055f
--- /dev/null
+++ b/apt-inst/dpkg-diffs.txt
@@ -0,0 +1,5 @@
+- Replacing directories with files
+ dpkg permits this with the weak condition that the directory is owned only
+ by the package. APT requires that the directory have no files that are not
+ owned by the package. Replaces are specifically not checked to prevent
+ file list corruption.
diff --git a/apt-inst/extract.cc b/apt-inst/extract.cc
new file mode 100644
index 00000000..a3c06c08
--- /dev/null
+++ b/apt-inst/extract.cc
@@ -0,0 +1,509 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: extract.cc,v 1.2 2001/02/20 07:03:16 jgg Exp $
+/* ######################################################################
+
+ Archive Extraction Directory Stream
+
+ Extraction for each file is a bit of an involved process. Each object
+ undergoes an atomic backup, overwrite, erase sequence. First the
+ object is unpacked to '.dpkg.new' then the original is hardlinked to
+ '.dpkg.tmp' and finally the new object is renamed to overwrite the old
+ one. From an external perspective the file never ceased to exist.
+ After the archive has been sucessfully unpacked the .dpkg.tmp files
+ are erased. A failure causes all the .dpkg.tmp files to be restored.
+
+ Decisions about unpacking go like this:
+ - Store the original filename in the file listing
+ - Resolve any diversions that would effect this file, all checks
+ below apply to the diverted name, not the real one.
+ - Resolve any symlinked configuration files.
+ - If the existing file does not exist then .dpkg-tmp is checked for.
+ [Note, this is reduced to only check if a file was expected to be
+ there]
+ - If the existing link/file is not a directory then it is replaced
+ irregardless
+ - If the existing link/directory is being replaced by a directory then
+ absolutely nothing happens.
+ - If the existing link/directory is being replaced by a link then
+ absolutely nothing happens.
+ - If the existing link/directory is being replaced by a non-directory
+ then this will abort if the package is not the sole owner of the
+ directory. [Note, this is changed to not happen if the directory
+ non-empty - that is, it only includes files that are part of this
+ package - prevents removing user files accidentally.]
+ - If the non-directory exists in the listing database and it
+ does not belong to the current package then an overwrite condition
+ is invoked.
+
+ As we unpack we record the file list differences in the FL cache. If
+ we need to unroll the the FL cache knows which files have been unpacked
+ and can undo. When we need to erase then it knows which files have not
+ been unpacked.
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#ifdef __GNUG__
+#pragma implementation "apt-pkg/extract.h"
+#endif
+#include <apt-pkg/extract.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/debversion.h>
+
+#include <sys/stat.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <dirent.h>
+ /*}}}*/
+
+static const char *TempExt = "dpkg-tmp";
+//static const char *NewExt = "dpkg-new";
+
+// Extract::pkgExtract - Constructor /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+pkgExtract::pkgExtract(pkgFLCache &FLCache,pkgCache::VerIterator Ver) :
+ FLCache(FLCache), Ver(Ver)
+{
+ FLPkg = FLCache.GetPkg(Ver.ParentPkg().Name(),true);
+ if (FLPkg.end() == true)
+ return;
+ Debug = true;
+}
+ /*}}}*/
+// Extract::DoItem - Handle a single item from the stream /*{{{*/
+// ---------------------------------------------------------------------
+/* This performs the setup for the extraction.. */
+bool pkgExtract::DoItem(Item &Itm,int &Fd)
+{
+ char Temp[sizeof(FileName)];
+
+ /* Strip any leading/trailing /s from the filename, then copy it to the
+ temp buffer and re-apply the leading / We use a class variable
+ to store the new filename for use by the three extraction funcs */
+ char *End = FileName+1;
+ const char *I = Itm.Name;
+ for (; *I != 0 && *I == '/'; I++);
+ *FileName = '/';
+ for (; *I != 0 && End < FileName + sizeof(FileName); I++, End++)
+ *End = *I;
+ if (End + 20 >= FileName + sizeof(FileName))
+ return _error->Error("The path %s is too long",Itm.Name);
+ for (; End > FileName && End[-1] == '/'; End--);
+ *End = 0;
+ Itm.Name = FileName;
+
+ /* Lookup the file. Nde is the file [group] we are going to write to and
+ RealNde is the actual node we are manipulating. Due to diversions
+ they may be entirely different. */
+ pkgFLCache::NodeIterator Nde = FLCache.GetNode(Itm.Name,End,0,false,false);
+ pkgFLCache::NodeIterator RealNde = Nde;
+
+ // See if the file is already in the file listing
+ unsigned long FileGroup = RealNde->File;
+ for (; RealNde.end() == false && FileGroup == RealNde->File; RealNde++)
+ if (RealNde.RealPackage() == FLPkg)
+ break;
+
+ // Nope, create an entry
+ if (RealNde.end() == true)
+ {
+ RealNde = FLCache.GetNode(Itm.Name,End,FLPkg.Offset(),true,false);
+ if (RealNde.end() == true)
+ return false;
+ RealNde->Flags |= pkgFLCache::Node::NewFile;
+ }
+
+ /* Check if this entry already was unpacked. The only time this should
+ ever happen is if someone has hacked tar to support capabilities, in
+ which case this needs to be modified anyhow.. */
+ if ((RealNde->Flags & pkgFLCache::Node::Unpacked) ==
+ pkgFLCache::Node::Unpacked)
+ return _error->Error("Unpacking %s more than once",Itm.Name);
+
+ if (Nde.end() == true)
+ Nde = RealNde;
+
+ /* Consider a diverted file - We are not permitted to divert directories,
+ but everything else is fair game (including conf files!) */
+ if ((Nde->Flags & pkgFLCache::Node::Diversion) != 0)
+ {
+ if (Itm.Type == Item::Directory)
+ return _error->Error("The directory %s is diverted",Itm.Name);
+
+ /* A package overwriting a diversion target is just the same as
+ overwriting a normally owned file and is checked for below in
+ the overwrites mechanism */
+
+ /* If this package is trying to overwrite the target of a diversion,
+ that is never, ever permitted */
+ pkgFLCache::DiverIterator Div = Nde.Diversion();
+ if (Div.DivertTo() == Nde)
+ return _error->Error("The package is trying to write to the "
+ "diversion target %s/%s",Nde.DirN(),Nde.File());
+
+ // See if it is us and we are following it in the right direction
+ if (Div->OwnerPkg != FLPkg.Offset() && Div.DivertFrom() == Nde)
+ {
+ Nde = Div.DivertTo();
+ End = FileName + snprintf(FileName,sizeof(FileName)-20,"%s/%s",
+ Nde.DirN(),Nde.File());
+ if (End <= FileName)
+ return _error->Error("The diversion path is too long");
+ }
+ }
+
+ // Deal with symlinks and conf files
+ if ((RealNde->Flags & pkgFLCache::Node::NewConfFile) ==
+ pkgFLCache::Node::NewConfFile)
+ {
+ string Res = flNoLink(Itm.Name);
+ if (Res.length() > sizeof(FileName))
+ return _error->Error("The path %s is too long",Res.c_str());
+ if (Debug == true)
+ clog << "Followed conf file from " << FileName << " to " << Res << endl;
+ Itm.Name = strcpy(FileName,Res.c_str());
+ }
+
+ /* Get information about the existing file, and attempt to restore
+ a backup if it does not exist */
+ struct stat LExisting;
+ bool EValid = false;
+ if (lstat(Itm.Name,&LExisting) != 0)
+ {
+ // This is bad news.
+ if (errno != ENOENT)
+ return _error->Errno("stat","Failed to stat %s",Itm.Name);
+
+ // See if we can recover the backup file
+ if (Nde.end() == false)
+ {
+ snprintf(Temp,sizeof(Temp),"%s.%s",Itm.Name,TempExt);
+ if (rename(Temp,Itm.Name) != 0 && errno != ENOENT)
+ return _error->Errno("rename","Failed to rename %s to %s",
+ Temp,Itm.Name);
+ if (stat(Itm.Name,&LExisting) != 0)
+ {
+ if (errno != ENOENT)
+ return _error->Errno("stat","Failed to stat %s",Itm.Name);
+ }
+ else
+ EValid = true;
+ }
+ }
+ else
+ EValid = true;
+
+ /* If the file is a link we need to stat its destination, get the
+ existing file modes */
+ struct stat Existing = LExisting;
+ if (EValid == true && S_ISLNK(Existing.st_mode))
+ {
+ if (stat(Itm.Name,&Existing) != 0)
+ {
+ if (errno != ENOENT)
+ return _error->Errno("stat","Failed to stat %s",Itm.Name);
+ Existing = LExisting;
+ }
+ }
+
+ // We pretend a non-existing file looks like it is a normal file
+ if (EValid == false)
+ Existing.st_mode = S_IFREG;
+
+ /* Okay, at this point 'Existing' is the stat information for the
+ real non-link file */
+
+ /* The only way this can be a no-op is if a directory is being
+ replaced by a directory or by a link */
+ if (S_ISDIR(Existing.st_mode) != 0 &&
+ (Itm.Type == Item::Directory || Itm.Type == Item::SymbolicLink))
+ return true;
+
+ /* Non-Directory being replaced by non-directory. We check for over
+ writes here. */
+ if (Nde.end() == false)
+ {
+ if (HandleOverwrites(Nde) == false)
+ return false;
+ }
+
+ /* Directory being replaced by a non-directory - this needs to see if
+ the package is the owner and then see if the directory would be
+ empty after the package is removed [ie no user files will be
+ erased] */
+ if (S_ISDIR(Existing.st_mode) != 0)
+ {
+ if (CheckDirReplace(Itm.Name) == false)
+ return _error->Error("The directory %s is being replaced by a non-directory",Itm.Name);
+ }
+
+ if (Debug == true)
+ clog << "Extract " << string(Itm.Name,End) << endl;
+/* if (Count != 0)
+ return _error->Error("Done");*/
+
+ return true;
+}
+ /*}}}*/
+// Extract::Finished - Sequence finished, erase the temp files /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool pkgExtract::Finished()
+{
+ return true;
+}
+ /*}}}*/
+// Extract::Aborted - Sequence aborted, undo all our unpacking /*{{{*/
+// ---------------------------------------------------------------------
+/* This undoes everything that was done by all calls to the DoItem method
+ and restores the File Listing cache to its original form. It bases its
+ actions on the flags value for each node in the cache. */
+bool pkgExtract::Aborted()
+{
+ if (Debug == true)
+ clog << "Aborted, backing out" << endl;
+
+ pkgFLCache::NodeIterator Files = FLPkg.Files();
+ map_ptrloc *Last = &FLPkg->Files;
+
+ /* Loop over all files, restore those that have been unpacked from their
+ dpkg-tmp entires */
+ while (Files.end() == false)
+ {
+ // Locate the hash bucket for the node and locate its group head
+ pkgFLCache::NodeIterator Nde(FLCache,FLCache.HashNode(Files));
+ for (; Nde.end() == false && Files->File != Nde->File; Nde++);
+ if (Nde.end() == true)
+ return _error->Error("Failed to locate node in its hash bucket");
+
+ if (snprintf(FileName,sizeof(FileName)-20,"%s/%s",
+ Nde.DirN(),Nde.File()) <= 0)
+ return _error->Error("The path is too long");
+
+ // Deal with diversions
+ if ((Nde->Flags & pkgFLCache::Node::Diversion) != 0)
+ {
+ pkgFLCache::DiverIterator Div = Nde.Diversion();
+
+ // See if it is us and we are following it in the right direction
+ if (Div->OwnerPkg != FLPkg.Offset() && Div.DivertFrom() == Nde)
+ {
+ Nde = Div.DivertTo();
+ if (snprintf(FileName,sizeof(FileName)-20,"%s/%s",
+ Nde.DirN(),Nde.File()) <= 0)
+ return _error->Error("The diversion path is too long");
+ }
+ }
+
+ // Deal with overwrites+replaces
+ for (; Nde.end() == false && Files->File == Nde->File; Nde++)
+ {
+ if ((Nde->Flags & pkgFLCache::Node::Replaced) ==
+ pkgFLCache::Node::Replaced)
+ {
+ if (Debug == true)
+ clog << "De-replaced " << FileName << " from " << Nde.RealPackage()->Name << endl;
+ Nde->Flags &= ~pkgFLCache::Node::Replaced;
+ }
+ }
+
+ // Undo the change in the filesystem
+ if (Debug == true)
+ clog << "Backing out " << FileName;
+
+ // Remove a new node
+ if ((Files->Flags & pkgFLCache::Node::NewFile) ==
+ pkgFLCache::Node::NewFile)
+ {
+ if (Debug == true)
+ clog << " [new node]" << endl;
+ pkgFLCache::Node *Tmp = Files;
+ Files++;
+ *Last = Tmp->NextPkg;
+ Tmp->NextPkg = 0;
+
+ FLCache.DropNode(Tmp - FLCache.NodeP);
+ }
+ else
+ {
+ if (Debug == true)
+ clog << endl;
+
+ Last = &Files->NextPkg;
+ Files++;
+ }
+ }
+
+ return true;
+}
+ /*}}}*/
+// Extract::Fail - Extraction of a file Failed /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool pkgExtract::Fail(Item &Itm,int Fd)
+{
+ return pkgDirStream::Fail(Itm,Fd);
+}
+ /*}}}*/
+// Extract::FinishedFile - Finished a file /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+bool pkgExtract::FinishedFile(Item &Itm,int Fd)
+{
+ return pkgDirStream::FinishedFile(Itm,Fd);
+}
+ /*}}}*/
+// Extract::HandleOverwrites - See if a replaces covers this overwrite /*{{{*/
+// ---------------------------------------------------------------------
+/* Check if the file is in a package that is being replaced by this
+ package or if the file is being overwritten. Note that if the file
+ is really a directory but it has been erased from the filesystem
+ this will fail with an overwrite message. This is a limitation of the
+ dpkg file information format.
+
+ XX If a new package installs and another package replaces files in this
+ package what should we do? */
+bool pkgExtract::HandleOverwrites(pkgFLCache::NodeIterator Nde,
+ bool DiverCheck)
+{
+ pkgFLCache::NodeIterator TmpNde = Nde;
+ unsigned long DiverOwner = 0;
+ unsigned long FileGroup = Nde->File;
+ const char *FirstOwner = 0;
+ for (; Nde.end() == false && FileGroup == Nde->File; Nde++)
+ {
+ if ((Nde->Flags & pkgFLCache::Node::Diversion) != 0)
+ {
+ /* Store the diversion owner if this is the forward direction
+ of the diversion */
+ if (DiverCheck == true)
+ DiverOwner = Nde.Diversion()->OwnerPkg;
+ continue;
+ }
+
+ pkgFLCache::PkgIterator FPkg(FLCache,Nde.RealPackage());
+ if (FPkg.end() == true || FPkg == FLPkg)
+ continue;
+
+ /* This tests trips when we are checking a diversion to see
+ if something has already been diverted by this diversion */
+ if (FPkg.Offset() == DiverOwner)
+ continue;
+ FirstOwner = FPkg.Name();
+
+ // Now see if this package matches one in a replace depends
+ pkgCache::DepIterator Dep = Ver.DependsList();
+ bool Ok = false;
+ for (; Dep.end() == false; Dep++)
+ {
+ if (Dep->Type != pkgCache::Dep::Replaces)
+ continue;
+
+ // Does the replaces apply to this package?
+ if (strcmp(Dep.TargetPkg().Name(),FPkg.Name()) != 0)
+ continue;
+
+ /* Check the version for match. I do not think CurrentVer can be
+ 0 if we are here.. */
+ pkgCache::PkgIterator Pkg = Dep.TargetPkg();
+ if (Pkg->CurrentVer == 0)
+ {
+ _error->Warning("Overwrite package match with no version for %s",Pkg.Name());
+ continue;
+ }
+
+ // Replaces is met
+ if (debVS.CheckDep(Pkg.CurrentVer().VerStr(),Dep->CompareOp,Dep.TargetVer()) == true)
+ {
+ if (Debug == true)
+ clog << "Replaced file " << Nde.DirN() << '/' << Nde.File() << " from " << Pkg.Name() << endl;
+ Nde->Flags |= pkgFLCache::Node::Replaced;
+ Ok = true;
+ break;
+ }
+ }
+
+ // Negative Hit
+ if (Ok == false)
+ return _error->Error("File %s/%s overwrites the one in the package %s",
+ Nde.DirN(),Nde.File(),FPkg.Name());
+ }
+
+ /* If this is a diversion we might have to recurse to process
+ the other side of it */
+ if ((TmpNde->Flags & pkgFLCache::Node::Diversion) != 0)
+ {
+ pkgFLCache::DiverIterator Div = TmpNde.Diversion();
+ if (Div.DivertTo() == TmpNde)
+ return HandleOverwrites(Div.DivertFrom(),true);
+ }
+
+ return true;
+}
+ /*}}}*/
+// Extract::CheckDirReplace - See if this directory can be erased /*{{{*/
+// ---------------------------------------------------------------------
+/* If this directory is owned by a single package and that package is
+ replacing it with something non-directoryish then dpkg allows this.
+ We increase the requirement to be that the directory is non-empty after
+ the package is removed */
+bool pkgExtract::CheckDirReplace(string Dir,unsigned int Depth)
+{
+ // Looping?
+ if (Depth > 40)
+ return false;
+
+ if (Dir[Dir.size() - 1] != '/')
+ Dir += '/';
+
+ DIR *D = opendir(Dir.c_str());
+ if (D == 0)
+ return _error->Errno("opendir","Unable to read %s",Dir.c_str());
+
+ string File;
+ for (struct dirent *Dent = readdir(D); Dent != 0; Dent = readdir(D))
+ {
+ // Skip some files
+ if (strcmp(Dent->d_name,".") == 0 ||
+ strcmp(Dent->d_name,"..") == 0)
+ continue;
+
+ // Look up the node
+ File = Dir + Dent->d_name;
+ pkgFLCache::NodeIterator Nde = FLCache.GetNode(File.begin(),
+ File.end(),0,false,false);
+
+ // The file is not owned by this package
+ if (Nde.end() != false || Nde.RealPackage() != FLPkg)
+ {
+ closedir(D);
+ return false;
+ }
+
+ // See if it is a directory
+ struct stat St;
+ if (lstat(File.c_str(),&St) != 0)
+ {
+ closedir(D);
+ return _error->Errno("lstat","Unable to stat %s",File.c_str());
+ }
+
+ // Recurse down directories
+ if (S_ISDIR(St.st_mode) != 0)
+ {
+ if (CheckDirReplace(File,Depth + 1) == false)
+ {
+ closedir(D);
+ return false;
+ }
+ }
+ }
+
+ // No conflicts
+ closedir(D);
+ return true;
+}
+ /*}}}*/
diff --git a/apt-inst/extract.h b/apt-inst/extract.h
new file mode 100644
index 00000000..a9152a26
--- /dev/null
+++ b/apt-inst/extract.h
@@ -0,0 +1,52 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: extract.h,v 1.2 2001/02/20 07:03:16 jgg Exp $
+/* ######################################################################
+
+ Archive Extraction Directory Stream
+
+ This Directory Stream implements extraction of an archive into the
+ filesystem. It makes the choices on what files should be unpacked and
+ replaces as well as guiding the actual unpacking.
+
+ When the unpacking sequence is completed one of the two functions,
+ Finished or Aborted must be called.
+
+ ##################################################################### */
+ /*}}}*/
+#ifndef PKGLIB_EXTRACT_H
+#define PKGLIB_EXTRACT_H
+
+#ifdef __GNUG__
+#pragma interface "apt-pkg/extract.h"
+#endif
+
+#include <apt-pkg/dirstream.h>
+#include <apt-pkg/filelist.h>
+#include <apt-pkg/pkgcache.h>
+
+class pkgExtract : public pkgDirStream
+{
+ pkgFLCache &FLCache;
+ pkgCache::VerIterator Ver;
+ pkgFLCache::PkgIterator FLPkg;
+ char FileName[1024];
+ bool Debug;
+
+ bool HandleOverwrites(pkgFLCache::NodeIterator Nde,
+ bool DiverCheck = false);
+ bool CheckDirReplace(string Dir,unsigned int Depth = 0);
+
+ public:
+
+ virtual bool DoItem(Item &Itm,int &Fd);
+ virtual bool Fail(Item &Itm,int Fd);
+ virtual bool FinishedFile(Item &Itm,int Fd);
+
+ bool Finished();
+ bool Aborted();
+
+ pkgExtract(pkgFLCache &FLCache,pkgCache::VerIterator Ver);
+};
+
+#endif
diff --git a/apt-inst/filelist.cc b/apt-inst/filelist.cc
new file mode 100644
index 00000000..211fc935
--- /dev/null
+++ b/apt-inst/filelist.cc
@@ -0,0 +1,588 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: filelist.cc,v 1.2 2001/02/20 07:03:16 jgg Exp $
+/* ######################################################################
+
+ File Listing - Manages a Cache of File -> Package names.
+
+ Diversions add some signficant complexity to the system. To keep
+ storage space down in the very special case of a diverted file no
+ extra bytes are allocated in the Node structure. Instead a diversion
+ is inserted directly into the hash table and its flag bit set. Every
+ lookup for that filename will always return the diversion.
+
+ The hash buckets are stored in sorted form, with diversions having
+ the higest sort order. Identical files are assigned the same file
+ pointer, thus after a search all of the nodes owning that file can be
+ found by iterating down the bucket.
+
+ Re-updates of diversions (another extremely special case) are done by
+ marking all diversions as untouched, then loading the entire diversion
+ list again, touching each diversion and then finally going back and
+ releasing all untouched diversions. It is assumed that the diversion
+ table will always be quite small and be a very irregular case.
+
+ Diversions that are user-installed are represented by a package with
+ an empty name string.
+
+ Conf files are handled like diversions by changing the meaning of the
+ Pointer field to point to a conf file entry - again to reduce over
+ head for a special case.
+
+ ##################################################################### */
+ /*}}}*/
+// Include Files /*{{{*/
+#ifdef __GNUG__
+#pragma implementation "apt-pkg/filelist.h"
+#endif
+
+#include <apt-pkg/filelist.h>
+#include <apt-pkg/mmap.h>
+#include <apt-pkg/error.h>
+#include <apt-pkg/strutl.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <iostream>
+ /*}}}*/
+
+// FlCache::Header::Header - Constructor /*{{{*/
+// ---------------------------------------------------------------------
+/* Initialize the header variables. These are the defaults used when
+ creating new caches */
+pkgFLCache::Header::Header()
+{
+ Signature = 0xEA3F1295;
+
+ /* Whenever the structures change the major version should be bumped,
+ whenever the generator changes the minor version should be bumped. */
+ MajorVersion = 1;
+ MinorVersion = 0;
+ Dirty = true;
+
+ HeaderSz = sizeof(pkgFLCache::Header);
+ NodeSz = sizeof(pkgFLCache::Node);
+ DirSz = sizeof(pkgFLCache::Directory);
+ PackageSz = sizeof(pkgFLCache::Package);
+ DiversionSz = sizeof(pkgFLCache::Diversion);
+ ConfFileSz = sizeof(pkgFLCache::ConfFile);
+
+ NodeCount = 0;
+ DirCount = 0;
+ PackageCount = 0;
+ DiversionCount = 0;
+ ConfFileCount = 0;
+ HashSize = 1 << 14;
+
+ FileHash = 0;
+ DirTree = 0;
+ Packages = 0;
+ Diversions = 0;
+ UniqNodes = 0;
+ memset(Pools,0,sizeof(Pools));
+}
+ /*}}}*/
+// FLCache::Header::CheckSizes - Check if the two headers have same *sz /*{{{*/
+// ---------------------------------------------------------------------
+/* Compare to make sure we are matching versions */
+bool pkgFLCache::Header::CheckSizes(Header &Against) const
+{
+ if (HeaderSz == Against.HeaderSz &&
+ NodeSz == Against.NodeSz &&
+ DirSz == Against.DirSz &&
+ DiversionSz == Against.DiversionSz &&
+ PackageSz == Against.PackageSz &&
+ ConfFileSz == Against.ConfFileSz)
+ return true;
+ return false;
+}
+ /*}}}*/
+
+// FLCache::pkgFLCache - Constructor /*{{{*/
+// ---------------------------------------------------------------------
+/* If this is a new cache then a new header and hash table are instantaited
+ otherwise the existing ones are mearly attached */
+pkgFLCache::pkgFLCache(DynamicMMap &Map) : Map(Map)
+{
+ if (_error->PendingError() == true)
+ return;
+
+ LastTreeLookup = 0;
+ LastLookupSize = 0;
+
+ // Apply the typecasts
+ HeaderP = (Header *)Map.Data();
+ NodeP = (Node *)Map.Data();
+ DirP = (Directory *)Map.Data();
+ DiverP = (Diversion *)Map.Data();
+ PkgP = (Package *)Map.Data();
+ ConfP = (ConfFile *)Map.Data();
+ StrP = (char *)Map.Data();
+ AnyP = (unsigned char *)Map.Data();
+
+ // New mapping, create the basic cache structures
+ if (Map.Size() == 0)
+ {
+ Map.RawAllocate(sizeof(pkgFLCache::Header));
+ *HeaderP = pkgFLCache::Header();
+ HeaderP->FileHash = Map.RawAllocate(sizeof(pkgFLCache::Node)*HeaderP->HashSize,
+ sizeof(pkgFLCache::Node))/sizeof(pkgFLCache::Node);
+ }
+
+ FileHash = NodeP + HeaderP->FileHash;
+
+ // Setup the dynamic map manager
+ HeaderP->Dirty = true;
+ Map.Sync(0,sizeof(pkgFLCache::Header));
+ Map.UsePools(*HeaderP->Pools,sizeof(HeaderP->Pools)/sizeof(HeaderP->Pools[0]));
+}
+ /*}}}*/
+// FLCache::TreeLookup - Perform a lookup in a generic tree /*{{{*/
+// ---------------------------------------------------------------------
+/* This is a simple generic tree lookup. The first three entries of
+ the Directory structure are used as a template, but any other similar
+ structure could be used in it's place. */
+map_ptrloc pkgFLCache::TreeLookup(map_ptrloc *Base,const char *Text,
+ const char *TextEnd,unsigned long Size,
+ unsigned int *Count,bool Insert)
+{
+ pkgFLCache::Directory *Dir;
+
+ // Check our last entry cache
+ if (LastTreeLookup != 0 && LastLookupSize == Size)
+ {
+ Dir = (pkgFLCache::Directory *)(AnyP + LastTreeLookup*Size);
+ if (stringcmp(Text,TextEnd,StrP + Dir->Name) == 0)
+ return LastTreeLookup;
+ }
+
+ while (1)
+ {
+ // Allocate a new one
+ if (*Base == 0)
+ {
+ if (Insert == false)
+ return 0;
+
+ *Base = Map.Allocate(Size);
+ if (*Base == 0)
+ return 0;
+
+ (*Count)++;
+ Dir = (pkgFLCache::Directory *)(AnyP + *Base*Size);
+ Dir->Name = Map.WriteString(Text,TextEnd - Text);
+ LastTreeLookup = *Base;
+ LastLookupSize = Size;
+ return *Base;
+ }
+
+ // Compare this node
+ Dir = (pkgFLCache::Directory *)(AnyP + *Base*Size);
+ int Res = stringcmp(Text,TextEnd,StrP + Dir->Name);
+ if (Res == 0)
+ {
+ LastTreeLookup = *Base;
+ LastLookupSize = Size;
+ return *Base;
+ }
+
+ if (Res > 0)
+ Base = &Dir->Left;
+ if (Res < 0)
+ Base = &Dir->Right;
+ }
+}
+ /*}}}*/
+// FLCache::PrintTree - Print out a tree /*{{{*/
+// ---------------------------------------------------------------------
+/* This is a simple generic tree dumper, ment for debugging. */
+void pkgFLCache::PrintTree(map_ptrloc Base,unsigned long Size)
+{
+ if (Base == 0)
+ return;
+
+ pkgFLCache::Directory *Dir = (pkgFLCache::Directory *)(AnyP + Base*Size);
+ PrintTree(Dir->Left,Size);
+ cout << (StrP + Dir->Name) << endl;
+ PrintTree(Dir->Right,Size);
+}
+ /*}}}*/
+// FLCache::GetPkg - Get a package pointer /*{{{*/
+// ---------------------------------------------------------------------
+/* Locate a package by name in it's tree, this is just a wrapper for
+ TreeLookup */
+pkgFLCache::PkgIterator pkgFLCache::GetPkg(const char *Name,const char *NameEnd,
+ bool Insert)
+{
+ if (NameEnd == 0)
+ NameEnd = Name + strlen(Name);
+
+ map_ptrloc Pos = TreeLookup(&HeaderP->Packages,Name,NameEnd,
+ sizeof(pkgFLCache::Package),
+ &HeaderP->PackageCount,Insert);
+ if (Pos == 0)
+ return pkgFLCache::PkgIterator();
+ return pkgFLCache::PkgIterator(*this,PkgP + Pos);
+}
+ /*}}}*/
+// FLCache::GetNode - Get the node associated with the filename /*{{{*/
+// ---------------------------------------------------------------------
+/* Lookup a node in the hash table. If Insert is true then a new node is
+ always inserted. The hash table can have multiple instances of a
+ single name available. A search returns the first. It is important
+ that additions for the same name insert after the first entry of
+ the name group. */
+pkgFLCache::NodeIterator pkgFLCache::GetNode(const char *Name,
+ const char *NameEnd,
+ map_ptrloc Loc,
+ bool Insert,bool Divert)
+{
+ // Split the name into file and directory, hashing as it is copied
+ const char *File = Name;
+ unsigned long HashPos = 0;
+ for (const char *I = Name; I < NameEnd; I++)
+ {
+ HashPos = 1637*HashPos + *I;
+ if (*I == '/')
+ File = I;
+ }
+
+ // Search for it
+ Node *Hash = NodeP + HeaderP->FileHash + (HashPos % HeaderP->HashSize);
+ int Res = 0;
+ map_ptrloc FilePtr = 0;
+ while (Hash->Pointer != 0)
+ {
+ // Compare
+ Res = stringcmp(File+1,NameEnd,StrP + Hash->File);
+ if (Res == 0)
+ Res = stringcmp(Name,File,StrP + DirP[Hash->Dir].Name);
+
+ // Diversion?
+ if (Res == 0 && Insert == true)
+ {
+ /* Dir and File match exactly, we need to reuse the file name
+ when we link it in */
+ FilePtr = Hash->File;
+ Res = Divert - ((Hash->Flags & Node::Diversion) == Node::Diversion);
+ }
+
+ // Is a match
+ if (Res == 0)
+ {
+ if (Insert == false)
+ return NodeIterator(*this,Hash);
+
+ // Only one diversion per name!
+ if (Divert == true)
+ return NodeIterator(*this,Hash);
+ break;
+ }
+
+ // Out of sort order
+ if (Res > 0)
+ break;
+
+ if (Hash->Next != 0)
+ Hash = NodeP + Hash->Next;
+ else
+ break;
+ }
+
+ // Fail, not found
+ if (Insert == false)
+ return NodeIterator(*this);
+
+ // Find a directory node
+ map_ptrloc Dir = TreeLookup(&HeaderP->DirTree,Name,File,
+ sizeof(pkgFLCache::Directory),
+ &HeaderP->DirCount,true);
+ if (Dir == 0)
+ return NodeIterator(*this);
+
+ // Allocate a new node
+ if (Hash->Pointer != 0)
+ {
+ // Overwrite or append
+ if (Res > 0)
+ {
+ Node *Next = NodeP + Map.Allocate(sizeof(*Hash));
+ if (Next == NodeP)
+ return NodeIterator(*this);
+ *Next = *Hash;
+ Hash->Next = Next - NodeP;
+ }
+ else
+ {
+ unsigned long NewNext = Map.Allocate(sizeof(*Hash));
+ if (NewNext == 0)
+ return NodeIterator(*this);
+ NodeP[NewNext].Next = Hash->Next;
+ Hash->Next = NewNext;
+ Hash = NodeP + Hash->Next;
+ }
+ }
+
+ // Insert into the new item
+ Hash->Dir = Dir;
+ Hash->Pointer = Loc;
+ Hash->Flags = 0;
+ if (Divert == true)
+ Hash->Flags |= Node::Diversion;
+
+ if (FilePtr != 0)
+ Hash->File = FilePtr;
+ else
+ {
+ HeaderP->UniqNodes++;
+ Hash->File = Map.WriteString(File+1,NameEnd - File-1);
+ }
+
+ // Link the node to the package list
+ if (Divert == false && Loc == 0)
+ {
+ Hash->Next = PkgP[Loc].Files;
+ PkgP[Loc].Files = Hash - NodeP;
+ }
+
+ HeaderP->NodeCount++;
+ return NodeIterator(*this,Hash);
+}
+ /*}}}*/
+// FLCache::HashNode - Return the hash bucket for the node /*{{{*/
+// ---------------------------------------------------------------------
+/* This is one of two hashing functions. The other is inlined into the
+ GetNode routine. */
+pkgFLCache::Node *pkgFLCache::HashNode(NodeIterator const &Nde)
+{
+ // Hash the node
+ unsigned long HashPos = 0;
+ for (const char *I = Nde.DirN(); *I != 0; I++)
+ HashPos = 1637*HashPos + *I;
+ HashPos = 1637*HashPos + '/';
+ for (const char *I = Nde.File(); *I != 0; I++)
+ HashPos = 1637*HashPos + *I;
+ return NodeP + HeaderP->FileHash + (HashPos % HeaderP->HashSize);
+}
+ /*}}}*/
+// FLCache::DropNode - Drop a node from the hash table /*{{{*/
+// ---------------------------------------------------------------------
+/* This erases a node from the hash table. Note that this does not unlink
+ the node from the package linked list. */
+void pkgFLCache::DropNode(map_ptrloc N)
+{
+ if (N == 0)
+ return;
+
+ NodeIterator Nde(*this,NodeP + N);
+
+ if (Nde->NextPkg != 0)
+ _error->Warning("DropNode called on still linked node");
+
+ // Locate it in the hash table
+ Node *Last = 0;
+ Node *Hash = HashNode(Nde);
+ while (Hash->Pointer != 0)
+ {
+ // Got it
+ if (Hash == Nde)
+ {
+ // Top of the bucket..
+ if (Last == 0)
+ {
+ Hash->Pointer = 0;
+ if (Hash->Next == 0)
+ return;
+ *Hash = NodeP[Hash->Next];
+ // Release Hash->Next
+ return;
+ }
+ Last->Next = Hash->Next;
+ // Release Hash
+ return;
+ }
+
+ Last = Hash;
+ if (Hash->Next != 0)
+ Hash = NodeP + Hash->Next;
+ else
+ break;
+ }
+
+ _error->Error("Failed to locate the hash element!");
+}
+ /*}}}*/
+// FLCache::BeginDiverLoad - Start reading new diversions /*{{{*/
+// ---------------------------------------------------------------------
+/* Tag all the diversions as untouched */
+void pkgFLCache::BeginDiverLoad()
+{
+ for (DiverIterator I = DiverBegin(); I.end() == false; I++)
+ I->Flags = 0;
+}
+ /*}}}*/
+// FLCache::FinishDiverLoad - Finish up a new diversion load /*{{{*/
+// ---------------------------------------------------------------------
+/* This drops any untouched diversions. In effect removing any diversions
+ that where not loaded (ie missing from the diversion file) */
+void pkgFLCache::FinishDiverLoad()
+{
+ map_ptrloc *Cur = &HeaderP->Diversions;
+ while (*Cur != 0)
+ {
+ Diversion *Div = DiverP + *Cur;
+ if ((Div->Flags & Diversion::Touched) == Diversion::Touched)
+ {
+ Cur = &Div->Next;
+ continue;
+ }
+
+ // Purge!
+ DropNode(Div->DivertTo);
+ DropNode(Div->DivertFrom);
+ *Cur = Div->Next;
+ }
+}
+ /*}}}*/
+// FLCache::AddDiversion - Add a new diversion /*{{{*/
+// ---------------------------------------------------------------------
+/* Add a new diversion to the diverion tables and make sure that it is
+ unique and non-chaining. */
+bool pkgFLCache::AddDiversion(PkgIterator const &Owner,
+ const char *From,const char *To)
+{
+ /* Locate the two hash nodes we are going to manipulate. If there
+ are pre-existing diversions then they will be returned */
+ NodeIterator FromN = GetNode(From,From+strlen(From),0,true,true);
+ NodeIterator ToN = GetNode(To,To+strlen(To),0,true,true);
+ if (FromN.end() == true || ToN.end() == true)
+ return _error->Error("Failed to allocate diversion");
+
+ // Should never happen
+ if ((FromN->Flags & Node::Diversion) != Node::Diversion ||
+ (ToN->Flags & Node::Diversion) != Node::Diversion)
+ return _error->Error("Internal Error in AddDiversion");
+
+ // Now, try to reclaim an existing diversion..
+ map_ptrloc Diver = 0;
+ if (FromN->Pointer != 0)
+ Diver = FromN->Pointer;
+
+ /* Make sure from and to point to the same diversion, if they dont
+ then we are trying to intermix diversions - very bad */
+ if (ToN->Pointer != 0 && ToN->Pointer != Diver)
+ {
+ // It could be that the other diversion is no longer in use
+ if ((DiverP[ToN->Pointer].Flags & Diversion::Touched) == Diversion::Touched)
+ return _error->Error("Trying to overwrite a diversion, %s -> %s and %s/%s",
+ From,To,ToN.File(),ToN.Dir().Name());
+
+ // We can erase it.
+ Diversion *Div = DiverP + ToN->Pointer;
+ ToN->Pointer = 0;
+
+ if (Div->DivertTo == ToN.Offset())
+ Div->DivertTo = 0;
+ if (Div->DivertFrom == ToN.Offset())
+ Div->DivertFrom = 0;
+
+ // This diversion will be cleaned up by FinishDiverLoad
+ }
+
+ // Allocate a new diversion
+ if (Diver == 0)
+ {
+ Diver = Map.Allocate(sizeof(Diversion));
+ if (Diver == 0)
+ return false;
+ DiverP[Diver].Next = HeaderP->Diversions;
+ HeaderP->Diversions = Diver;
+ HeaderP->DiversionCount++;
+ }
+
+ // Can only have one diversion of the same files
+ Diversion *Div = DiverP + Diver;
+ if ((Div->Flags & Diversion::Touched) == Diversion::Touched)
+ return _error->Error("Double add of diversion %s -> %s",From,To);
+
+ // Setup the From/To links
+ if (Div->DivertFrom != FromN.Offset() && Div->DivertFrom != ToN.Offset())
+ DropNode(Div->DivertFrom);
+ Div->DivertFrom = FromN.Offset();
+ if (Div->DivertTo != FromN.Offset() && Div->DivertTo != ToN.Offset())
+ DropNode(Div->DivertTo);
+ Div->DivertTo = ToN.Offset();
+
+ // Link it to the two nodes
+ FromN->Pointer = Diver;
+ ToN->Pointer = Diver;
+
+ // And the package
+ Div->OwnerPkg = Owner.Offset();
+ Div->Flags |= Diversion::Touched;
+
+ return true;
+}
+ /*}}}*/
+// FLCache::AddConfFile - Add a new configuration file /*{{{*/
+// ---------------------------------------------------------------------
+/* This simply adds a new conf file node to the hash table. This is only
+ used by the status file reader. It associates a hash with each conf
+ file entry that exists in the status file and the list file for
+ the proper package. Duplicate conf files (across packages) are left
+ up to other routines to deal with. */
+bool pkgFLCache::AddConfFile(const char *Name,const char *NameEnd,
+ PkgIterator const &Owner,
+ const unsigned char *Sum)
+{
+ NodeIterator Nde = GetNode(Name,NameEnd,0,false,false);
+ if (Nde.end() == true)
+ return true;
+
+ unsigned long File = Nde->File;
+ for (; Nde->File == File && Nde.end() == false; Nde++)
+ {
+ if (Nde.RealPackage() != Owner)
+ continue;
+
+ if ((Nde->Flags & Node::ConfFile) == Node::ConfFile)
+ return _error->Error("Duplicate conf file %s/%s",Nde.DirN(),Nde.File());
+
+ // Allocate a new conf file structure
+ map_ptrloc Conf = Map.Allocate(sizeof(ConfFile));
+ if (Conf == 0)
+ return false;
+ ConfP[Conf].OwnerPkg = Owner.Offset();
+ memcpy(ConfP[Conf].MD5,Sum,sizeof(ConfP[Conf].MD5));
+
+ Nde->Pointer = Conf;
+ Nde->Flags |= Node::ConfFile;
+ return true;
+ }
+
+ /* This means the conf file has been replaced, but the entry in the
+ status file was not updated */
+ return true;
+}
+ /*}}}*/
+
+// NodeIterator::RealPackage - Return the package for this node /*{{{*/
+// ---------------------------------------------------------------------
+/* Since the package pointer is indirected in all sorts of interesting ways
+ this is used to get a pointer to the owning package */
+pkgFLCache::Package *pkgFLCache::NodeIterator::RealPackage() const
+{
+ if (Nde->Pointer == 0)
+ return 0;
+
+ if ((Nde->Flags & Node::ConfFile) == Node::ConfFile)
+ return Owner->PkgP + Owner->ConfP[Nde->Pointer].OwnerPkg;
+
+ // Diversions are ignored
+ if ((Nde->Flags & Node::Diversion) == Node::Diversion)
+ return 0;
+
+ return Owner->PkgP + Nde->Pointer;
+}
+ /*}}}*/
diff --git a/apt-inst/filelist.h b/apt-inst/filelist.h
new file mode 100644
index 00000000..7536a2f6
--- /dev/null
+++ b/apt-inst/filelist.h
@@ -0,0 +1,314 @@
+// -*- mode: cpp; mode: fold -*-
+// Description /*{{{*/
+// $Id: filelist.h,v 1.2 2001/02/20 07:03:16 jgg Exp $
+/* ######################################################################
+
+ File Listing - Manages a Cache of File -> Package names.
+
+ This is identical to the Package cache, except that the generator
+ (which is much simpler) is integrated directly into the main class,
+ and it has been designed to handle live updates.
+
+ The storage content of the class is maintained in a memory map and is
+ written directly to the file system. Performance is traded against
+ space to give something that performs well and remains small.
+ The average per file usage is 32 bytes which yeilds about a meg every
+ 36k files. Directory paths are collected into a binary tree and stored
+ only once, this offsets the cost of the hash nodes enough to keep
+ memory usage slightly less than the sum of the filenames.
+
+ The file names are stored into a fixed size chained hash table that is
+ linked to the package name and to the directory component.
+
+ Each file node has a set of associated flags that indicate the current
+ state of the file.
+
+ ##################################################################### */
+ /*}}}*/
+#ifndef PKGLIB_FILELIST_H
+#define PKGLIB_FILELIST_H
+
+#ifdef __GNUG__
+#pragma interface "apt-pkg/filelist.h"
+#endif
+
+#include <apt-pkg/mmap.h>
+
+class pkgFLCache
+{
+ public:
+ struct Header;
+ struct Node;
+ struct Directory;
+ struct Package;
+ struct Diversion;
+ struct ConfFile;
+
+ class NodeIterator;
+ class DirIterator;
+ class PkgIterator;
+ class DiverIterator;
+
+ protected:
+ string CacheFile;
+ DynamicMMap &Map;
+ map_ptrloc LastTreeLookup;
+ unsigned long LastLookupSize;
+
+ // Helpers for the addition algorithms
+ map_ptrloc TreeLookup(map_ptrloc *Base,const char *Text,const char *TextEnd,
+ unsigned long Size,unsigned int *Count = 0,
+ bool Insert = false);
+
+ public:
+
+ // Pointers to the arrays of items
+ Header *HeaderP;
+ Node *NodeP;
+ Directory *DirP;
+ Package *PkgP;
+ Diversion *DiverP;
+ ConfFile *ConfP;
+ char *StrP;
+ unsigned char *AnyP;
+
+ // Quick accessors
+ Node *FileHash;
+
+ // Accessors
+ Header &Head() {return *HeaderP;};
+ void PrintTree(map_ptrloc Base,unsigned long Size);
+
+ // Add/Find things
+ PkgIterator GetPkg(const char *Name,const char *End,bool Insert);
+ inline PkgIterator GetPkg(const char *Name,bool Insert);
+ NodeIterator GetNode(const char *Name,
+ const char *NameEnd,
+ map_ptrloc Loc,
+ bool Insert,bool Divert);
+ Node *HashNode(NodeIterator const &N);
+ void DropNode(map_ptrloc Node);
+
+ inline DiverIterator DiverBegin();
+
+ // Diversion control
+ void BeginDiverLoad();
+ void FinishDiverLoad();
+ bool AddDiversion(PkgIterator const &Owner,const char *From,
+ const char *To);
+ bool AddConfFile(const char *Name,const char *NameEnd,
+ PkgIterator const &Owner,const unsigned char *Sum);
+
+ pkgFLCache(DynamicMMap &Map);
+// ~pkgFLCache();
+};
+
+struct pkgFLCache::Header
+{
+ // Signature information
+ unsigned long Signature;
+ short MajorVersion;
+ short MinorVersion;
+ bool Dirty;
+
+ // Size of structure values
+ unsigned HeaderSz;
+ unsigned NodeSz;
+ unsigned DirSz;
+ unsigned PackageSz;
+ unsigned DiversionSz;
+ unsigned ConfFileSz;
+
+ // Structure Counts;
+ unsigned int NodeCount;
+ unsigned int DirCount;
+ unsigned int PackageCount;
+ unsigned int DiversionCount;
+ unsigned int ConfFileCount;
+ unsigned int HashSize;
+ unsigned long UniqNodes;
+
+ // Offsets
+ map_ptrloc FileHash;
+ map_ptrloc DirTree;
+ map_ptrloc Packages;
+ map_ptrloc Diversions;
+
+ /* Allocation pools, there should be one of these for each structure
+ excluding the header */
+ DynamicMMap::Pool Pools[5];
+
+ bool CheckSizes(Header &Against) const;
+ Header();
+};
+
+/* The bit field is used to advoid incurring an extra 4 bytes x 40000,
+ Pointer is the most infrequently used member of the structure */
+struct pkgFLCache::Node
+{
+ map_ptrloc Dir; // Dir
+ map_ptrloc File; // String
+ unsigned Pointer:24; // Package/Diversion/ConfFile
+ unsigned Flags:8; // Package
+ map_ptrloc Next; // Node
+ map_ptrloc NextPkg; // Node
+
+ enum Flags {Diversion = (1<<0),ConfFile = (1<<1),
+ NewConfFile = (1<<2),NewFile = (1<<3),
+ Unpacked = (1<<4),Replaced = (1<<5)};
+};
+
+struct pkgFLCache::Directory
+{
+ map_ptrloc Left; // Directory
+ map_ptrloc Right; // Directory
+ map_ptrloc Name; // String
+};
+
+struct pkgFLCache::Package
+{
+ map_ptrloc Left; // Package
+ map_ptrloc Right; // Package
+ map_ptrloc Name; // String
+ map_ptrloc Files; // Node
+};
+
+struct pkgFLCache::Diversion
+{
+ map_ptrloc OwnerPkg; // Package
+ map_ptrloc DivertFrom; // Node
+ map_ptrloc DivertTo; // String
+
+ map_ptrloc Next; // Diversion
+ unsigned long Flags;
+
+ enum Flags {Touched = (1<<0)};
+};
+
+struct pkgFLCache::ConfFile
+{
+ map_ptrloc OwnerPkg; // Package
+ unsigned char MD5[16];
+};
+
+class pkgFLCache::PkgIterator
+{
+ Package *Pkg;
+ pkgFLCache *Owner;
+
+ public:
+
+ inline bool end() const {return Owner == 0 || Pkg == Owner->PkgP?true:false;}
+
+ // Accessors
+ inline Package *operator ->() {return Pkg;};
+ inline Package const *operator ->() const {return Pkg;};
+ inline Package const &operator *() const {return *Pkg;};
+ inline operator Package *() {return Pkg == Owner->PkgP?0:Pkg;};
+ inline operator Package const *() const {return Pkg == Owner->PkgP?0:Pkg;};
+
+ inline unsigned long Offset() const {return Pkg - Owner->PkgP;};
+ inline const char *Name() const {return Pkg->Name == 0?0:Owner->StrP + Pkg->Name;};
+ inline pkgFLCache::NodeIterator Files() const;
+
+ PkgIterator() : Pkg(0), Owner(0) {};
+ PkgIterator(pkgFLCache &Owner,Package *Trg) : Pkg(Trg), Owner(&Owner) {};
+};
+
+class pkgFLCache::DirIterator
+{
+ Directory *Dir;
+ pkgFLCache *Owner;
+
+ public:
+
+ // Accessors
+ inline Directory *operator ->() {return Dir;};
+ inline Directory const *operator ->() const {return Dir;};
+ inline Directory const &operator *() const {return *Dir;};
+ inline operator Directory *() {return Dir == Owner->DirP?0:Dir;};
+ inline operator Directory const *() const {return Dir == Owner->DirP?0:Dir;};
+
+ inline const char *Name() const {return Dir->Name == 0?0:Owner->StrP + Dir->Name;};
+
+ DirIterator() : Dir(0), Owner(0) {};
+ DirIterator(pkgFLCache &Owner,Directory *Trg) : Dir(Trg), Owner(&Owner) {};
+};
+
+class pkgFLCache::DiverIterator
+{
+ Diversion *Diver;
+ pkgFLCache *Owner;
+
+ public:
+
+ // Iteration
+ void operator ++(int) {if (Diver != Owner->DiverP) Diver = Owner->DiverP + Diver->Next;};
+ inline void operator ++() {operator ++(0);};
+ inline bool end() const {return Owner == 0 || Diver == Owner->DiverP;};
+
+ // Accessors
+ inline Diversion *operator ->() {return Diver;};
+ inline Diversion const *operator ->() const {return Diver;};
+ inline Diversion const &operator *() const {return *Diver;};
+ inline operator Diversion *() {return Diver == Owner->DiverP?0:Diver;};
+ inline operator Diversion const *() const {return Diver == Owner->DiverP?0:Diver;};
+
+ inline PkgIterator OwnerPkg() const {return PkgIterator(*Owner,Owner->PkgP + Diver->OwnerPkg);};
+ inline NodeIterator DivertFrom() const;
+ inline NodeIterator DivertTo() const;
+
+ DiverIterator() : Diver(0), Owner(0) {};
+ DiverIterator(pkgFLCache &Owner,Diversion *Trg) : Diver(Trg), Owner(&Owner) {};
+};
+
+class pkgFLCache::NodeIterator
+{
+ Node *Nde;
+ enum {NdePkg, NdeHash} Type;
+ pkgFLCache *Owner;
+
+ public:
+
+ // Iteration
+ void operator ++(int) {if (Nde != Owner->NodeP) Nde = Owner->NodeP +
+ (Type == NdePkg?Nde->NextPkg:Nde->Next);};
+ inline void operator ++() {operator ++(0);};
+ inline bool end() const {return Owner == 0 || Nde == Owner->NodeP;};
+
+ // Accessors
+ inline Node *operator ->() {return Nde;};
+ inline Node const *operator ->() const {return Nde;};
+ inline Node const &operator *() const {return *Nde;};
+ inline operator Node *() {return Nde == Owner->NodeP?0:Nde;};
+ inline operator Node const *() const {return Nde == Owner->NodeP?0:Nde;};
+ inline unsigned long Offset() const {return Nde - Owner->NodeP;};
+ inline DirIterator Dir() const {return DirIterator(*Owner,Owner->DirP + Nde->Dir);};
+ inline DiverIterator Diversion() const {return DiverIterator(*Owner,Owner->DiverP + Nde->Pointer);};
+ inline const char *File() const {return Nde->File == 0?0:Owner->StrP + Nde->File;};
+ inline const char *DirN() const {return Owner->StrP + Owner->DirP[Nde->Dir].Name;};
+ Package *RealPackage() const;
+
+ NodeIterator() : Nde(0), Type(NdeHash), Owner(0) {};
+ NodeIterator(pkgFLCache &Owner) : Nde(Owner.NodeP), Type(NdeHash), Owner(&Owner) {};
+ NodeIterator(pkgFLCache &Owner,Node *Trg) : Nde(Trg), Type(NdeHash), Owner(&Owner) {};
+ NodeIterator(pkgFLCache &Owner,Node *Trg,Package *) : Nde(Trg), Type(NdePkg), Owner(&Owner) {};
+};
+
+/* Inlines with forward references that cannot be included directly in their
+ respsective classes */
+inline pkgFLCache::NodeIterator pkgFLCache::DiverIterator::DivertFrom() const
+ {return NodeIterator(*Owner,Owner->NodeP + Diver->DivertFrom);};
+inline pkgFLCache::NodeIterator pkgFLCache::DiverIterator::DivertTo() const
+ {return NodeIterator(*Owner,Owner->NodeP + Diver->DivertTo);};
+
+inline pkgFLCache::NodeIterator pkgFLCache::PkgIterator::Files() const
+ {return NodeIterator(*Owner,Owner->NodeP + Pkg->Files,Pkg);};
+
+inline pkgFLCache::DiverIterator pkgFLCache::DiverBegin()
+ {return DiverIterator(*this,DiverP + HeaderP->Diversions);};
+
+inline pkgFLCache::PkgIterator pkgFLCache::GetPkg(const char *Name,bool Insert)
+ {return GetPkg(Name,Name+strlen(Name),Insert);};
+
+#endif
diff --git a/apt-inst/makefile b/apt-inst/makefile
new file mode 100644
index 00000000..4a0981f2
--- /dev/null
+++ b/apt-inst/makefile
@@ -0,0 +1,30 @@
+# -*- make -*-
+BASE=..
+SUBDIR=apt-inst
+
+# Header location
+SUBDIRS = contrib deb
+HEADER_TARGETDIRS = apt-pkg
+
+# Bring in the default rules
+include ../buildlib/defaults.mak
+
+# The library name
+LIBRARY=apt-inst
+MAJOR=1.0
+MINOR=0
+SLIBS=$(PTHREADLIB)
+
+# Source code for the contributed non-core things
+SOURCE = contrib/extracttar.cc contrib/arfile.cc
+
+# Source code for the main library
+SOURCE+= filelist.cc database.cc dirstream.cc extract.cc \
+ deb/dpkgdb.cc deb/debfile.cc
+
+# Public header files
+HEADERS = extracttar.h arfile.h filelist.h database.h extract.h \
+ dpkgdb.h dirstream.h debfile.h
+
+HEADERS := $(addprefix apt-pkg/,$(HEADERS))
+include $(LIBRARY_H)