Add NetBSD rdigest from

<ftp://ftp.netbsd.org/pub/NetBSD/packages/pkgsrc/pkgtools/rdigest/README.html>
This commit is contained in:
Oliver Eikemeier 2004-07-02 16:43:22 +00:00
parent a1c672573d
commit b74b2eb98d
Notes: svn2git 2021-03-31 03:12:20 +00:00
svn path=/head/; revision=112785
6 changed files with 421 additions and 0 deletions

View file

@ -357,6 +357,7 @@
SUBDIR += rain
SUBDIR += rats
SUBDIR += rc5pipe
SUBDIR += rdigest
SUBDIR += rid
SUBDIR += rkhunter
SUBDIR += rng_82802

20
security/rdigest/Makefile Normal file
View file

@ -0,0 +1,20 @@
# New ports collection makefile for: rdigest
# Date created: 2004-06-24
# Whom: eik
#
# $FreeBSD$
#
PORTNAME= rdigest
COMMENT= Recursive message digest wrapper utility
DESCR= ${.CURDIR}/pkg-descr
CONFLICTS= # none
PATCHDIR= ${.CURDIR}/files
MASTERDIR= ${.CURDIR}/../digest
.include "${MASTERDIR}/Makefile"

View file

@ -0,0 +1,60 @@
$NetBSD: patch-aa,v 1.1.1.1 2003/07/24 05:21:05 atatat Exp $
--- Makefile.in.orig 2002-12-20 23:06:12.000000000 -0500
+++ Makefile.in
@@ -56,7 +56,7 @@ missing mkinstalldirs regress.sh
.c.o:
$(COMPILE) -c $< -o $@
-all: digest
+all: rdigest
digest-types.h: bits
./bits digest-types.h
@@ -64,10 +64,10 @@ digest-types.h: bits
bits: bits.o
$(LINK) $(bits_OBJS) $(LIBS)
-digest: $(digest_OBJS)
+rdigest: $(digest_OBJS)
$(LINK) $(digest_OBJS) $(LIBS)
-check: digest
+check: rdigest
@SHELL@ $(srcdir)/regress.sh
digest.o: digest-types.h
@@ -81,7 +81,7 @@ sha1.o: digest-types.h
sha1hl.o: digest-types.h
clean:
- rm -f *.o digest bits digest-types.h
+ rm -f *.o rdigest bits digest-types.h
distclean: clean
rm -f Makefile config.h
@@ -90,18 +90,18 @@ distclean: clean
maintainer-clean: distclean
rm -f configure config.h.in
-install: digest
+install: rdigest
$(mkinstalldirs) $(DESTDIR)$(bindir)
- @f=`echo digest|sed '$(transform)'`; \
- echo "$(INSTALL_PROGRAM) digest $(DESTDIR)$(bindir)/$$f"; \
- $(INSTALL_PROGRAM) digest $(DESTDIR)$(bindir)/$$f
+ @f=`echo rdigest|sed '$(transform)'`; \
+ echo "$(INSTALL_PROGRAM) rdigest $(DESTDIR)$(bindir)/$$f"; \
+ $(INSTALL_PROGRAM) rdigest $(DESTDIR)$(bindir)/$$f
$(mkinstalldirs) $(DESTDIR)$(mandir)/man1
- @f=`echo digest.1|sed '$(transform)'`; \
+ @f=`echo rdigest.1|sed '$(transform)'`; \
echo "$(INSTALL_DATA) digest.1 $(DESTDIR)$(mandir)/man1/$$f"; \
$(INSTALL_DATA) digest.1 $(DESTDIR)$(mandir)/man1/$$f
uninstall:
- @f=`echo digest|sed '$(transform)'`; \
+ @f=`echo rdigest|sed '$(transform)'`; \
echo " rm -f $(DESTDIR)$(bindir)/$$f"; \
rm -f $(DESTDIR)$(bindir)/$$f

View file

@ -0,0 +1,89 @@
$NetBSD: patch-ab,v 1.1.1.1 2003/07/24 05:21:07 atatat Exp $
--- digest.1.orig 2002-02-16 15:24:32.000000000 -0500
+++ digest.1
@@ -31,21 +31,29 @@
.\" SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
.\"
-.Dd February 28, 2001
-.Dt DIGEST 1
+.Dd July 24, 2003
+.Dt RDIGEST 1
.Os
.Sh NAME
-.Nm digest
-.Nd calculate message digests
+.Nm rdigest
+.Nd calculate message digests of files and directories
.Sh SYNOPSIS
.Nm
+.Op Fl x Ar directory
.Ar algorithm
.Op file ...
.Sh DESCRIPTION
The
.Nm
-utility calculates message digests of files or,
+utility calculates message digests of files, directories, or,
if no file is specified, standard input.
+The
+.Fl x
+option can be used as many times as required to indicate directories
+not to be included in a directory's hash.
+This allows you to skip, eg, CVS subdirectories in a given tree, which
+can have relatively volatile contents not pertinent to the hash.
+.Pp
The list of possible algorithms is:
.Bl -tag -width Ds
.It md5
@@ -81,11 +89,32 @@ algorithm implementations, which are loc
C library, and was designed to be scalable as new message digest
algorithms are developed.
.Pp
+The checksum for a directory is implemented as the hash over a list of
+one or two hashes for each of the items in the tree.
+.Bl -bullet
+.It
+For a file, the string
+.Li Dq f \&
+along with the path make up the data for one hash, and the contents of
+the file is used for the second hash.
+.It
+For a symbolic link, the string
+.Li Dq l \&
+along with the path to the symbolic link make up the data for one
+hash, and the target of the symbolic link is used for the second hash.
+.It
+For a directory, the string
+.Li Dq d \&
+along with the path make up the data for one hash.
+There is no second hash for a directory.
+.El
+.Pp
The
.Nm
utility exits 0 on success, and >0 if an error occurs.
.Sh SEE ALSO
.Xr cksum 1 ,
+.Xr digest 1 ,
.Xr md5 3 ,
.Xr rmd160 3 ,
.Xr sha1 3
@@ -93,9 +122,13 @@ utility exits 0 on success, and >0 if an
The
.Nm
utility first appeared in
-.Nx 1.6 .
+.Nx 2.0 .
.Sh AUTHORS
The
+.Xr digest 1
+utility was written by Alistair G. Crooks \*[Lt]agc@netbsd.org\*[Gt].
.Nm
-utility was written by
-.An Alistair G. Crooks Aq agc@netbsd.org .
+was implemented by Andrew Brown \*[Lt]atatat@netbsd.org\*[Gt] as a
+patch to
+.Xr digest 1
+to add recursive capabilities.

View file

@ -0,0 +1,204 @@
$NetBSD: patch-ac,v 1.1.1.1 2003/07/24 05:21:07 atatat Exp $
--- digest.c.orig 2003-07-23 20:27:09.000000000 -0400
+++ digest.c
@@ -43,9 +43,14 @@ __RCSID("$NetBSD: digest.c,v 1.8 2003/07
#endif
+#include <sys/queue.h>
+#include <sys/stat.h>
+
#ifdef HAVE_ERRNO_H
#include <errno.h>
#endif
+#include <fcntl.h>
+#include <fts.h>
#ifdef HAVE_LOCALE_H
#include <locale.h>
#endif
@@ -147,21 +152,172 @@ digest_file(char *fn, alg_t *alg)
return (rc);
}
+struct excl {
+ LIST_ENTRY(excl) n;
+ const char *p;
+};
+
+LIST_HEAD(, excl) excl = LIST_HEAD_INITIALIZER(excl);
+
+static void
+exclude(const char *p)
+{
+ struct excl *e;
+
+ e = malloc(sizeof(struct excl));
+ e->p = p;
+ LIST_INSERT_HEAD(&excl, e, n);
+}
+
+static int
+skip(const char *p)
+{
+ struct excl *e;
+
+ LIST_FOREACH(e, &excl, n)
+ if (strcmp(e->p, p) == 0)
+ return (1);
+
+ return (0);
+}
+
+static int
+compar(const FTSENT **fa, const FTSENT **fb)
+{
+ return (strcmp((*fa)->fts_name, (*fb)->fts_name));
+}
+
+static int
+digest_directory(char *dn, alg_t *alg)
+{
+ char in[BUFSIZ * 20], dot[2];
+ char *digest;
+ int cc, rc, l, fd, cwd;
+ char *pathlist[2];
+ FTS *ftsp;
+ FTSENT *f;
+
+ rc = 1;
+ l = alg->hash_len * 2;
+ digest = malloc(l + 1);
+ sprintf(dot, ".");
+ pathlist[0] = dot;
+ pathlist[1] = NULL;
+
+ if ((cwd = open(".", O_RDONLY)) == -1 ||
+ chdir(dn) == -1 ||
+ (ftsp = fts_open(pathlist,
+ FTS_COMFOLLOW | FTS_NOCHDIR | FTS_PHYSICAL,
+ compar)) == NULL) {
+ (void) fprintf(stderr, "%s\n", dn);
+ free(digest);
+ return (0);
+ }
+
+ (*alg->hash_init)(&alg->hash_ctx);
+
+ while ((f = fts_read(ftsp)) != NULL) {
+ /* skip the second pass on a directory */
+ if (f->fts_info == FTS_DP)
+ continue;
+
+ /* skip directories named CVS, RCS, or SCCS */
+ if ((f->fts_info == FTS_NS ||
+ S_ISDIR(f->fts_statp->st_mode)) &&
+ skip(f->fts_name)) {
+ fts_set(ftsp, f, FTS_SKIP);
+ continue;
+ }
+
+ /* try to handle things based on stat info */
+ if (f->fts_info != FTS_NS) {
+ /* only mention directories */
+ if (S_ISDIR(f->fts_statp->st_mode)) {
+ (*alg->hash_init)(&alg->hash_ctx2);
+ (*alg->hash_update)(&alg->hash_ctx2, "d ", 2);
+ (*alg->hash_update)(&alg->hash_ctx2, f->fts_path, f->fts_pathlen);
+ (*alg->hash_end)(&alg->hash_ctx2, digest);
+ digest[l] = '\n';
+ (*alg->hash_update)(&alg->hash_ctx, digest, l + 1);
+
+ /* hash the filename and then the contents separately */
+ } else if (S_ISREG(f->fts_statp->st_mode)) {
+ if ((fd = open(f->fts_path, O_RDONLY)) != -1) {
+ (*alg->hash_init)(&alg->hash_ctx2);
+ (*alg->hash_update)(&alg->hash_ctx2, "f ", 2);
+ (*alg->hash_update)(&alg->hash_ctx2, f->fts_path, f->fts_pathlen);
+ (*alg->hash_end)(&alg->hash_ctx2, &digest[0]);
+ digest[l] = '\n';
+ (*alg->hash_update)(&alg->hash_ctx, digest, 33);
+
+ (*alg->hash_init)(&alg->hash_ctx2);
+ while ((cc = read(fd, in, sizeof(in))) > 0) {
+ (*alg->hash_update)(&alg->hash_ctx2, in, cc);
+ }
+ close(fd);
+ (*alg->hash_end)(&alg->hash_ctx2, digest);
+ digest[l] = '\n';
+ (*alg->hash_update)(&alg->hash_ctx, digest, l + 1);
+ } else {
+ (void) fprintf(stderr, "%s\n", f->fts_path);
+ rc = 0;
+ }
+
+ /* hash in symlinks as well, along with the link contents */
+ } else if (S_ISLNK(f->fts_statp->st_mode)) {
+ if ((cc = readlink(f->fts_path, in, sizeof(in))) > 0) {
+ (*alg->hash_init)(&alg->hash_ctx2);
+ (*alg->hash_update)(&alg->hash_ctx2, "l ", 2);
+ (*alg->hash_update)(&alg->hash_ctx2, f->fts_path, f->fts_pathlen);
+ (*alg->hash_end)(&alg->hash_ctx2, digest);
+ digest[l] = '\n';
+ (*alg->hash_update)(&alg->hash_ctx, digest, l + 1);
+
+ (*alg->hash_init)(&alg->hash_ctx2);
+ (*alg->hash_update)(&alg->hash_ctx2, in, cc);
+ (*alg->hash_end)(&alg->hash_ctx2, digest);
+ digest[l] = '\n';
+ (*alg->hash_update)(&alg->hash_ctx, digest, l + 1);
+ } else {
+ (void) fprintf(stderr, "%s\n", f->fts_path);
+ rc = 0;
+ }
+ }
+ }
+ }
+
+ fts_close(ftsp);
+ fchdir(cwd);
+ close(cwd);
+
+ if (rc == 1) {
+ (*alg->hash_end)(&alg->hash_ctx, digest);
+ (void) printf("%s (%s) = %s\n", alg->name, dn, digest);
+ }
+
+ free(digest);
+ return (rc);
+}
+
int
main(int argc, char **argv)
{
alg_t *alg;
int rval;
int i;
+ struct stat st;
#ifdef HAVE_SETLOCALE
(void) setlocale(LC_ALL, "");
#endif
- while ((i = getopt(argc, argv, "V")) != -1) {
+ while ((i = getopt(argc, argv, "Vx:")) != -1) {
switch(i) {
case 'V':
printf("%s\n", VERSION);
return EXIT_SUCCESS;
+ case 'x':
+ exclude(optarg);
+ break;
}
}
argc -= optind;
@@ -186,7 +342,9 @@ main(int argc, char **argv)
}
} else {
for (i = 0 ; i < argc ; i++) {
- if (!digest_file(argv[i], alg)) {
+ if (stat(argv[i], &st) == -1 ||
+ (S_ISREG(st.st_mode) && !digest_file(argv[i], alg)) ||
+ (S_ISDIR(st.st_mode) && !digest_directory(argv[i], alg))) {
(void) fprintf(stderr, "%s\n", argv[i]);
rval = EXIT_FAILURE;
}

View file

@ -0,0 +1,47 @@
This utility is a wrapper for the md5(3), sha1(3) and rmd160(3)
routines.
It is remarkably similar to the digest package, except that it
can recursively checksum directory trees.
While a simple checksum on a tar file is usually satisfactory, once
said archive has been extracted, it is virtually impossible to
repackage the extracted tree in a form that can yield a repeatable
checksum. To be specific, changes in timestamps and ownership, and
changes in file ordering within directories can affect the checksum of
the archive, while not really impacting the actual code at all.
The algorithm used to checksum a directory in this implementation is
as follows:
cd into directory
initialize MASTER_HASH
walk directory tree, sorting all entries
foreach entry
if it is a directory:
skip it if it is named "RCS", "CVS", or "SCCS"
initialize SLAVE_HASH
add the string "d " into SLAVE_HASH
add the pathname of the directory into SLAVE_HASH
finish SLAVE_HASH
fold the hexified SLAVE_HASH result into MASTER_HASH
if it is a file:
initialize SLAVE_HASH
add the string "f " into SLAVE_HASH
add the pathname of the file into SLAVE_HASH
finish SLAVE_HASH
fold the hexified SLAVE_HASH result into MASTER_HASH
initialize SLAVE_HASH
add the contents of the file into SLAVE_HASH
finish SLAVE_HASH
fold the hexified SLAVE_HASH result into MASTER_HASH
if it is a symbolic link
initialize SLAVE_HASH
add the string "l " into SLAVE_HASH
add the pathname of the link into SLAVE_HASH
finish SLAVE_HASH
fold the hexified SLAVE_HASH result into MASTER_HASH
initialize SLAVE_HASH
add the contents of the link into SLAVE_HASH
finish SLAVE_HASH
fold the hexified SLAVE_HASH result into MASTER_HASH