#!/usr/bin/perl ## ---------- ---------- ---------- ---------- ---------- ---------- ## snapback -- 15.May.2002 -- Art Mulder ## - rsync and hard-link backup script ## - based on research by Mike Rubel ## www.http://www.mikerubel.org/computers/rsync_snapshots/ ## - This works on a "pull" basis. The backup server runs this ## script and rsync's to the client machine(s) to backup the ## requested directories. ## ## - NOT MUCH ERROR CHECKING!!! ## ---------- ---------- ---------- ---------- ---------- ---------- ## Logic Layout: ## - startup, usage, read config file ## - rotate the snapshot directories ## - rsync the client/backup directories ## - create daily/wkly/monthly backup-link directories if needed ## - notify admin's of log results. (flag errors?) ## ---------- ---------- ---------- ---------- ---------- ---------- ## TODO: Is there a better way of catching the cp/rsync errors? ## ---------- ---------- ---------- ---------- ---------- ---------- ## Variables and other Setups use Getopt::Std; ## standard command-line processing functions use Sys::Hostname; $rm = "/bin/rm"; ## location of rm/mv/cp $mv = "/bin/mv"; $cp = "/bin/cp"; ## Tool to "rotate" logfiles. $CYCLELOG = "/local/sbin/cyclelog -n 15 -Z "; ## 15 files, gzipped. $rsync = "/usr/bin/rsync"; ## location of rsync, plus options... $r_opts= "-av --force --delete-excluded --one-file-system --delete"; $myname = $0; $myhost = hostname(); #---------- ---------- ---------- ---------- ---------- ---------- # Process command-line Arguments + Options getopts('c:d') || usage; # ignore the rest of the command line. # TODO: for error checking maybe we shouldn't ignore it... $debug = 0; $config = "/etc/snapback.conf"; $hourly = $daily = $weekly =0; if ($opt_d) { $debug = 1;} if ($opt_c) { $config = $opt_c;} #---------- ---------- ---------- ---------- ---------- ---------- # Process config file require $config; # Sanity-check the config file. die "Malformed config file: hr_backup must be >0\n" unless ($hr_backup); die "Missing destination $dest\n" unless (-d $dest); #---------- ---------- ---------- ---------- ---------- ---------- # Set up logging $errors_logged = 0; $rsync_log = "/tmp/rsync_log$$"; system ("touch $rsync_log"); ## $main_log = "/tmp/snap_log$$"; ## open MAIN_LOG, ">$main_log" or die "Can't open $main_log"; system ("$CYCLELOG $logfile"); open LOGFILE, ">$logfile" or die "Can't open $logfile"; print LOGFILE "$myname : $myhost\n"; ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time); print LOGFILE "begin @ $hour:$min $mday/$mon/",($year + 1900),"...\n"; #---------- ---------- ---------- ---------- ---------- ---------- # Main Body - Loop through the list of clients $host = 0; ## indexes into @clients 2d array. $dir = 1; ## for $i (0 .. $#clients) { if ($debug) {print "DEBUG: $i: $clients[$i][$host], $clients[$i][$dir]\n"; } do_backup($clients[$i][$host], $clients[$i][$dir]); } ## clean up log files ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time); print LOGFILE "... end @ $hour:$min $mday/$mon/",($year + 1900),"\n"; close LOGFILE; ## TODO check the rsync log file for errors from rsync ## - set errors_logged if so ## - create new log file, important info at top, rsync detail at bottom `cat $rsync_log >> $logfile`; ## email results to admin's if ($always_email || $errors_logged) { `cat $logfile | mail -s "$myhost: $myname backup log" $admins`; } ## END ## ---------- ---------- ---------- ---------- ---------- ---------- ## ---------- ---------- ---------- ---------- ---------- ---------- ## ---------- ---------- ---------- ---------- ---------- ---------- sub usage { printf "\n$myname: Rsync/Hard-Link/Snapshot-like backup script.\n\n"; printf "USAGE: $myname [-c config-file -d ]\n"; printf "\t -c config-file : config file (default: /etc/snapback.conf)\n"; printf "\t -d : debug mode (default: Off)\n"; exit; } ## ---------- ---------- ---------- ---------- ---------- ---------- sub do_backup { local($fqdn,$dir) = @_; ## Long form of hostname local($host,@rest) = split (/\./, $fqdn); ## short form of hostname local($prefix, $backupdir); local($rotate_all)=0; ## flag for do_rotate routine ## ---------- ## STEP 1: check the clock and verify if we are just doing ## the hourly backups, or also the daily/weekly/monthlies. ## If the timestamp on the current backup dir does not match ## todays date, then this must be the first run after midnight, ## so we check the dailies/weeklies/monthlies also. ## Not very efficient, since we check this for each backup set ## that we run, instead of just once for all. Oh well. local($backup_date); local($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst); local ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime, $blksize,$blocks); ## Check the file ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime, $blksize,$blocks) = stat "$dir.0"; ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime($ctime); $backup_date = $yday; ## Check the clock ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time); ## we assume (dangerous I know) that if the timestamp on the directory ## is not the same date as today, then it must be yesterday. In any ## case, this is then the first run after midnight today. local($do_dailies, $do_weeklies, $do_monthlies ) = 0; if ($backup_date != $yday) { $do_dailies = 1; if ($debug) {print "DEBUG: do_dailies=true\n"; } if ($mday = 0) { $do_monthlies = 1;} ## ... And first of month. if ($wday = 0) { $do_weeklies = 1;} ## ... And First of Week. } ## ---------- ## STEP 2: housekeeping - is the backup destination directory ## set up? TODO: create it otherwise? $prefix = $dest . "/" . $host . $dir ; die "Missing $prefix\n" unless ( -d $prefix ); ## ---------- ## STEP 3: Process Hourly backups ## 3.1: Rotate older backups $backupdir = $prefix . $hr_dir; if ($debug) {print "DEBUG: do_rotate($hr_backup,$backupdir)\n"; } do_rotate($hr_backup, $backupdir, $rotate_all); ## 3.2: Hard link from the newest backup: if (-d "$backupdir.0") { if ($debug) {print "DEBUG: Hard Link newest backup\n"; } system("$cp -al $backupdir.0 $backupdir.1") == 0 or die "FAILED: $cp -al $backupdir.0 $backupdir.1"; } ## 3.3: ## Now rsync from the client dir into the latest snapshot ## (notice that rsync behaves like cp --remove-destination by ## default, so the destination is unlinked first. If it were not ## so, this would copy over the other snapshot(s) too! if($debug) {print "DEBUG: $rsync $r_opts $fqdn:$dir $backupdir.0\n"; } `echo "--\n$rsync $r_opts $fqdn:$dir $backupdir.0\n\n" >> $rsync_log`; system ("$rsync $r_opts $fqdn:$dir $backupdir.0 >> $rsync_log") == 0 or die "FAILED: $rsync $r_opts $fqdn:$dir $backupdir.0 >> $rsync_log"; # update the mtime of hourly.0 to reflect the snapshot time system ("touch $backupdir.0"); ## ---------- ## STEP 4: Process Daily/Weekly/Monthly backups ## -- simpler than above, the rsync is already done. We just need ## to "rotate" the old backups, and then hard link to the ## newest hourly backup from yesterday. NOTE that will be the ## .1 version, not the .0 version -- the .0 version is from today. $yesterdays_hourly = $backupdir.0; $rotate_all=1; ## flag for do_rotate routine ## Daily Backups - similar steps to above, rotate, hard link if ($do_dailies) { $backupdir = $prefix . $daily_dir; do_rotate($daily_backup, $backupdir, $rotate_all); ## No rsync necessary, just hard-link from the most-recent hourly. if (-d "$yesterdays_hourly") { system("$cp -al $yesterdays_hourly $backupdir.0") == 0 or die "FAILED: $cp -al $yesterdays_hourly $backupdir.0"; } } ## Weekly Backups if ($do_weeklies) { $backupdir = $prefix . $wkly_dir; do_rotate($wkly_backup, $backupdir, $rotate_all); if (-d "$yesterdays_hourly") { system("$cp -al $yesterdays_hourly $backupdir.0") == 0 or die "FAILED: $cp -al $yesterdays_hourly $backupdir.0"; } } ## Monthly Backups if ($do_monthlies) { $backupdir = $prefix . $mthly_dir; do_rotate($mthly_backup, $backupdir, $rotate_all); if (-d "$yesterdays_hourly") { system("$cp -al $yesterdays_hourly $backupdir.0") == 0 or die "FAILED: $cp -al $yesterdays_hourly $backupdir.0"; } } } ## ---------- ---------- ---------- ---------- ---------- ---------- # Age/rotate the old backup directories. # -- the backup dirs are named like: back.0, back.1, back.2 # -- so the count is 3 (3 backups) # -- we deleted the oldest (back.2) and move the next-oldest up # so back.2 becomes back.3, back.1 becomes, back.2, etc. # -- then make a hard link from back.0 to back.1 # $maxbackups = number of copies they keep, we count from Zero, # so for 4 copies, we'd have 0,1,2,3. In the comments below # we'll give examples assuming a $maxbackup of 4. sub do_rotate { local ($maxbackups, $dir, $rotate_all) = @_; ## Step 1: nothing to do if they're only keeping 1 copy if (($maxbackups == 1) && ($rotate_all==0)) { return ; } ## Step 2: delete the oldest copy. (eg: $dir.3) local($count) = local($countplus) = $maxbackups-1; if (-d "$dir.$count") { if ($debug) {print "DEBUG: $rm -rf $dir.$count\n";} system("$rm -rf $dir.$count") == 0 or die "FAILED: $rm -rf $dir.$count"; } $count--; ## Step 3: rotate/rename the "middle" copies (eg: $dir.1,2,3) ## DO NOTHING with the most recent backup (eg: $dir.0) of hourlies. ## Rotate same as the rest for dailies/weeklies/etc. if ($rotate_all) { $smallest = 0 } else {$smallest = 1}; while ($count >= $smallest) { if ($debug) {print "DEBUG: count = $count, "; } if (-d "$dir.$count") { if ($debug) {print "DEBUG: $mv $dir.$count $dir.$countplus\n";} system("$mv $dir.$count $dir.$countplus" ) == 0 or die "FAILED: $mv $dir.$count $dir.$countplus"; } $count--; $countplus--; } } ## ---------- ---------- ---------- ---------- ---------- ---------- ## Need a local "die" equivalent function, to ## print an error and clean up, and ensure that the ## admin's are emailed results. Tie into the normal end-of-program?