1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 """
39 Store-type extension that writes data to Amazon S3.
40
41 This extension requires a new configuration section <amazons3> and is intended
42 to be run immediately after the standard stage action, replacing the standard
43 store action. Aside from its own configuration, it requires the options and
44 staging configuration sections in the standard Cedar Backup configuration file.
45 Since it is intended to replace the store action, it does not rely on any store
46 configuration.
47
48 The underlying functionality relies on the U{AWS CLI interface
49 <http://aws.amazon.com/documentation/cli/>}. Before you use this extension,
50 you need to set up your Amazon S3 account and configure the AWS CLI connection
51 per Amazon's documentation. The extension assumes that the backup is being
52 executed as root, and switches over to the configured backup user to
53 communicate with AWS. So, make sure you configure AWS CLI as the backup user
54 and not root.
55
56 You can optionally configure Cedar Backup to encrypt data before sending it
57 to S3. To do that, provide a complete command line using the C{${input}} and
58 C{${output}} variables to represent the original input file and the encrypted
59 output file. This command will be executed as the backup user.
60
61 For instance, you can use something like this with GPG::
62
63 /usr/bin/gpg -c --no-use-agent --batch --yes --passphrase-file /home/backup/.passphrase -o ${output} ${input}
64
65 The GPG mechanism depends on a strong passphrase for security. One way to
66 generate a strong passphrase is using your system random number generator, i.e.::
67
68 dd if=/dev/urandom count=20 bs=1 | xxd -ps
69
70 (See U{StackExchange <http://security.stackexchange.com/questions/14867/gpg-encryption-security>}
71 for more details about that advice.) If you decide to use encryption, make sure
72 you save off the passphrase in a safe place, so you can get at your backup data
73 later if you need to. And obviously, make sure to set permissions on the
74 passphrase file so it can only be read by the backup user.
75
76 This extension was written for and tested on Linux. It will throw an exception
77 if run on Windows.
78
79 @author: Kenneth J. Pronovici <pronovic@ieee.org>
80 """
81
82
83
84
85
86
87 import sys
88 import os
89 import logging
90 import tempfile
91 import datetime
92 import json
93 import shutil
94
95
96 from CedarBackup2.filesystem import FilesystemList, BackupFileList
97 from CedarBackup2.util import resolveCommand, executeCommand, isRunningAsRoot, changeOwnership, isStartOfWeek
98 from CedarBackup2.xmlutil import createInputDom, addContainerNode, addBooleanNode, addStringNode, addLongNode
99 from CedarBackup2.xmlutil import readFirstChild, readString, readBoolean, readLong
100 from CedarBackup2.actions.util import writeIndicatorFile
101 from CedarBackup2.actions.constants import DIR_TIME_FORMAT, STAGE_INDICATOR
102
103
104
105
106
107
108 logger = logging.getLogger("CedarBackup2.log.extend.amazons3")
109
110 SU_COMMAND = [ "su" ]
111 AWS_COMMAND = [ "aws" ]
112
113 STORE_INDICATOR = "cback.amazons3"
121
122 """
123 Class representing Amazon S3 configuration.
124
125 Amazon S3 configuration is used for storing backup data in Amazon's S3 cloud
126 storage using the C{s3cmd} tool.
127
128 The following restrictions exist on data in this class:
129
130 - The s3Bucket value must be a non-empty string
131 - The encryptCommand value, if set, must be a non-empty string
132 - The full backup size limit, if set, must be a number of bytes >= 0
133 - The incremental backup size limit, if set, must be a number of bytes >= 0
134
135 @sort: __init__, __repr__, __str__, __cmp__, warnMidnite, s3Bucket
136 """
137
138 - def __init__(self, warnMidnite=None, s3Bucket=None, encryptCommand=None,
139 fullBackupSizeLimit=None, incrementalBackupSizeLimit=None):
140 """
141 Constructor for the C{AmazonS3Config} class.
142
143 @param warnMidnite: Whether to generate warnings for crossing midnite.
144 @param s3Bucket: Name of the Amazon S3 bucket in which to store the data
145 @param encryptCommand: Command used to encrypt backup data before upload to S3
146 @param fullBackupSizeLimit: Maximum size of a full backup, in bytes
147 @param incrementalBackupSizeLimit: Maximum size of an incremental backup, in bytes
148
149 @raise ValueError: If one of the values is invalid.
150 """
151 self._warnMidnite = None
152 self._s3Bucket = None
153 self._encryptCommand = None
154 self._fullBackupSizeLimit = None
155 self._incrementalBackupSizeLimit = None
156 self.warnMidnite = warnMidnite
157 self.s3Bucket = s3Bucket
158 self.encryptCommand = encryptCommand
159 self.fullBackupSizeLimit = fullBackupSizeLimit
160 self.incrementalBackupSizeLimit = incrementalBackupSizeLimit
161
168
170 """
171 Informal string representation for class instance.
172 """
173 return self.__repr__()
174
209
211 """
212 Property target used to set the midnite warning flag.
213 No validations, but we normalize the value to C{True} or C{False}.
214 """
215 if value:
216 self._warnMidnite = True
217 else:
218 self._warnMidnite = False
219
221 """
222 Property target used to get the midnite warning flag.
223 """
224 return self._warnMidnite
225
227 """
228 Property target used to set the S3 bucket.
229 """
230 if value is not None:
231 if len(value) < 1:
232 raise ValueError("S3 bucket must be non-empty string.")
233 self._s3Bucket = value
234
236 """
237 Property target used to get the S3 bucket.
238 """
239 return self._s3Bucket
240
242 """
243 Property target used to set the encrypt command.
244 """
245 if value is not None:
246 if len(value) < 1:
247 raise ValueError("Encrypt command must be non-empty string.")
248 self._encryptCommand = value
249
251 """
252 Property target used to get the encrypt command.
253 """
254 return self._encryptCommand
255
257 """
258 Property target used to set the full backup size limit.
259 The value must be an integer >= 0.
260 @raise ValueError: If the value is not valid.
261 """
262 if value is None:
263 self._fullBackupSizeLimit = None
264 else:
265 try:
266 value = int(value)
267 except TypeError:
268 raise ValueError("Full backup size limit must be an integer >= 0.")
269 if value < 0:
270 raise ValueError("Full backup size limit must be an integer >= 0.")
271 self._fullBackupSizeLimit = value
272
274 """
275 Property target used to get the full backup size limit.
276 """
277 return self._fullBackupSizeLimit
278
280 """
281 Property target used to set the incremental backup size limit.
282 The value must be an integer >= 0.
283 @raise ValueError: If the value is not valid.
284 """
285 if value is None:
286 self._incrementalBackupSizeLimit = None
287 else:
288 try:
289 value = int(value)
290 except TypeError:
291 raise ValueError("Incremental backup size limit must be an integer >= 0.")
292 if value < 0:
293 raise ValueError("Incremental backup size limit must be an integer >= 0.")
294 self._incrementalBackupSizeLimit = value
295
297 """
298 Property target used to get the incremental backup size limit.
299 """
300 return self._incrementalBackupSizeLimit
301
302 warnMidnite = property(_getWarnMidnite, _setWarnMidnite, None, "Whether to generate warnings for crossing midnite.")
303 s3Bucket = property(_getS3Bucket, _setS3Bucket, None, doc="Amazon S3 Bucket in which to store data")
304 encryptCommand = property(_getEncryptCommand, _setEncryptCommand, None, doc="Command used to encrypt data before upload to S3")
305 fullBackupSizeLimit = property(_getFullBackupSizeLimit, _setFullBackupSizeLimit, None,
306 doc="Maximum size of a full backup, in bytes")
307 incrementalBackupSizeLimit = property(_getIncrementalBackupSizeLimit, _setIncrementalBackupSizeLimit, None,
308 doc="Maximum size of an incremental backup, in bytes")
309
316
317 """
318 Class representing this extension's configuration document.
319
320 This is not a general-purpose configuration object like the main Cedar
321 Backup configuration object. Instead, it just knows how to parse and emit
322 amazons3-specific configuration values. Third parties who need to read and
323 write configuration related to this extension should access it through the
324 constructor, C{validate} and C{addConfig} methods.
325
326 @note: Lists within this class are "unordered" for equality comparisons.
327
328 @sort: __init__, __repr__, __str__, __cmp__, amazons3, validate, addConfig
329 """
330
331 - def __init__(self, xmlData=None, xmlPath=None, validate=True):
332 """
333 Initializes a configuration object.
334
335 If you initialize the object without passing either C{xmlData} or
336 C{xmlPath} then configuration will be empty and will be invalid until it
337 is filled in properly.
338
339 No reference to the original XML data or original path is saved off by
340 this class. Once the data has been parsed (successfully or not) this
341 original information is discarded.
342
343 Unless the C{validate} argument is C{False}, the L{LocalConfig.validate}
344 method will be called (with its default arguments) against configuration
345 after successfully parsing any passed-in XML. Keep in mind that even if
346 C{validate} is C{False}, it might not be possible to parse the passed-in
347 XML document if lower-level validations fail.
348
349 @note: It is strongly suggested that the C{validate} option always be set
350 to C{True} (the default) unless there is a specific need to read in
351 invalid configuration from disk.
352
353 @param xmlData: XML data representing configuration.
354 @type xmlData: String data.
355
356 @param xmlPath: Path to an XML file on disk.
357 @type xmlPath: Absolute path to a file on disk.
358
359 @param validate: Validate the document after parsing it.
360 @type validate: Boolean true/false.
361
362 @raise ValueError: If both C{xmlData} and C{xmlPath} are passed-in.
363 @raise ValueError: If the XML data in C{xmlData} or C{xmlPath} cannot be parsed.
364 @raise ValueError: If the parsed configuration document is not valid.
365 """
366 self._amazons3 = None
367 self.amazons3 = None
368 if xmlData is not None and xmlPath is not None:
369 raise ValueError("Use either xmlData or xmlPath, but not both.")
370 if xmlData is not None:
371 self._parseXmlData(xmlData)
372 if validate:
373 self.validate()
374 elif xmlPath is not None:
375 xmlData = open(xmlPath).read()
376 self._parseXmlData(xmlData)
377 if validate:
378 self.validate()
379
381 """
382 Official string representation for class instance.
383 """
384 return "LocalConfig(%s)" % (self.amazons3)
385
387 """
388 Informal string representation for class instance.
389 """
390 return self.__repr__()
391
393 """
394 Definition of equals operator for this class.
395 Lists within this class are "unordered" for equality comparisons.
396 @param other: Other object to compare to.
397 @return: -1/0/1 depending on whether self is C{<}, C{=} or C{>} other.
398 """
399 if other is None:
400 return 1
401 if self.amazons3 != other.amazons3:
402 if self.amazons3 < other.amazons3:
403 return -1
404 else:
405 return 1
406 return 0
407
409 """
410 Property target used to set the amazons3 configuration value.
411 If not C{None}, the value must be a C{AmazonS3Config} object.
412 @raise ValueError: If the value is not a C{AmazonS3Config}
413 """
414 if value is None:
415 self._amazons3 = None
416 else:
417 if not isinstance(value, AmazonS3Config):
418 raise ValueError("Value must be a C{AmazonS3Config} object.")
419 self._amazons3 = value
420
422 """
423 Property target used to get the amazons3 configuration value.
424 """
425 return self._amazons3
426
427 amazons3 = property(_getAmazonS3, _setAmazonS3, None, "AmazonS3 configuration in terms of a C{AmazonS3Config} object.")
428
430 """
431 Validates configuration represented by the object.
432
433 AmazonS3 configuration must be filled in. Within that, the s3Bucket target must be filled in
434
435 @raise ValueError: If one of the validations fails.
436 """
437 if self.amazons3 is None:
438 raise ValueError("AmazonS3 section is required.")
439 if self.amazons3.s3Bucket is None:
440 raise ValueError("AmazonS3 s3Bucket must be set.")
441
443 """
444 Adds an <amazons3> configuration section as the next child of a parent.
445
446 Third parties should use this function to write configuration related to
447 this extension.
448
449 We add the following fields to the document::
450
451 warnMidnite //cb_config/amazons3/warn_midnite
452 s3Bucket //cb_config/amazons3/s3_bucket
453 encryptCommand //cb_config/amazons3/encrypt
454 fullBackupSizeLimit //cb_config/amazons3/full_size_limit
455 incrementalBackupSizeLimit //cb_config/amazons3/incr_size_limit
456
457 @param xmlDom: DOM tree as from C{impl.createDocument()}.
458 @param parentNode: Parent that the section should be appended to.
459 """
460 if self.amazons3 is not None:
461 sectionNode = addContainerNode(xmlDom, parentNode, "amazons3")
462 addBooleanNode(xmlDom, sectionNode, "warn_midnite", self.amazons3.warnMidnite)
463 addStringNode(xmlDom, sectionNode, "s3_bucket", self.amazons3.s3Bucket)
464 addStringNode(xmlDom, sectionNode, "encrypt", self.amazons3.encryptCommand)
465 addLongNode(xmlDom, sectionNode, "full_size_limit", self.amazons3.fullBackupSizeLimit)
466 addLongNode(xmlDom, sectionNode, "incr_size_limit", self.amazons3.incrementalBackupSizeLimit)
467
469 """
470 Internal method to parse an XML string into the object.
471
472 This method parses the XML document into a DOM tree (C{xmlDom}) and then
473 calls a static method to parse the amazons3 configuration section.
474
475 @param xmlData: XML data to be parsed
476 @type xmlData: String data
477
478 @raise ValueError: If the XML cannot be successfully parsed.
479 """
480 (xmlDom, parentNode) = createInputDom(xmlData)
481 self._amazons3 = LocalConfig._parseAmazonS3(parentNode)
482
483 @staticmethod
511
512
513
514
515
516
517
518
519
520
521 -def executeAction(configPath, options, config):
522 """
523 Executes the amazons3 backup action.
524
525 @param configPath: Path to configuration file on disk.
526 @type configPath: String representing a path on disk.
527
528 @param options: Program command-line options.
529 @type options: Options object.
530
531 @param config: Program configuration.
532 @type config: Config object.
533
534 @raise ValueError: Under many generic error conditions
535 @raise IOError: If there are I/O problems reading or writing files
536 """
537 logger.debug("Executing amazons3 extended action.")
538 if not isRunningAsRoot():
539 logger.error("Error: the amazons3 extended action must be run as root.")
540 raise ValueError("The amazons3 extended action must be run as root.")
541 if sys.platform == "win32":
542 logger.error("Error: the amazons3 extended action is not supported on Windows.")
543 raise ValueError("The amazons3 extended action is not supported on Windows.")
544 if config.options is None or config.stage is None:
545 raise ValueError("Cedar Backup configuration is not properly filled in.")
546 local = LocalConfig(xmlPath=configPath)
547 stagingDirs = _findCorrectDailyDir(options, config, local)
548 _applySizeLimits(options, config, local, stagingDirs)
549 _writeToAmazonS3(config, local, stagingDirs)
550 _writeStoreIndicator(config, stagingDirs)
551 logger.info("Executed the amazons3 extended action successfully.")
552
563 """
564 Finds the correct daily staging directory to be written to Amazon S3.
565
566 This is substantially similar to the same function in store.py. The
567 main difference is that it doesn't rely on store configuration at all.
568
569 @param options: Options object.
570 @param config: Config object.
571 @param local: Local config object.
572
573 @return: Correct staging dir, as a dict mapping directory to date suffix.
574 @raise IOError: If the staging directory cannot be found.
575 """
576 oneDay = datetime.timedelta(days=1)
577 today = datetime.date.today()
578 yesterday = today - oneDay
579 tomorrow = today + oneDay
580 todayDate = today.strftime(DIR_TIME_FORMAT)
581 yesterdayDate = yesterday.strftime(DIR_TIME_FORMAT)
582 tomorrowDate = tomorrow.strftime(DIR_TIME_FORMAT)
583 todayPath = os.path.join(config.stage.targetDir, todayDate)
584 yesterdayPath = os.path.join(config.stage.targetDir, yesterdayDate)
585 tomorrowPath = os.path.join(config.stage.targetDir, tomorrowDate)
586 todayStageInd = os.path.join(todayPath, STAGE_INDICATOR)
587 yesterdayStageInd = os.path.join(yesterdayPath, STAGE_INDICATOR)
588 tomorrowStageInd = os.path.join(tomorrowPath, STAGE_INDICATOR)
589 todayStoreInd = os.path.join(todayPath, STORE_INDICATOR)
590 yesterdayStoreInd = os.path.join(yesterdayPath, STORE_INDICATOR)
591 tomorrowStoreInd = os.path.join(tomorrowPath, STORE_INDICATOR)
592 if options.full:
593 if os.path.isdir(todayPath) and os.path.exists(todayStageInd):
594 logger.info("Amazon S3 process will use current day's staging directory [%s]" % todayPath)
595 return { todayPath:todayDate }
596 raise IOError("Unable to find staging directory to process (only tried today due to full option).")
597 else:
598 if os.path.isdir(todayPath) and os.path.exists(todayStageInd) and not os.path.exists(todayStoreInd):
599 logger.info("Amazon S3 process will use current day's staging directory [%s]" % todayPath)
600 return { todayPath:todayDate }
601 elif os.path.isdir(yesterdayPath) and os.path.exists(yesterdayStageInd) and not os.path.exists(yesterdayStoreInd):
602 logger.info("Amazon S3 process will use previous day's staging directory [%s]" % yesterdayPath)
603 if local.amazons3.warnMidnite:
604 logger.warn("Warning: Amazon S3 process crossed midnite boundary to find data.")
605 return { yesterdayPath:yesterdayDate }
606 elif os.path.isdir(tomorrowPath) and os.path.exists(tomorrowStageInd) and not os.path.exists(tomorrowStoreInd):
607 logger.info("Amazon S3 process will use next day's staging directory [%s]" % tomorrowPath)
608 if local.amazons3.warnMidnite:
609 logger.warn("Warning: Amazon S3 process crossed midnite boundary to find data.")
610 return { tomorrowPath:tomorrowDate }
611 raise IOError("Unable to find unused staging directory to process (tried today, yesterday, tomorrow).")
612
619 """
620 Apply size limits, throwing an exception if any limits are exceeded.
621
622 Size limits are optional. If a limit is set to None, it does not apply.
623 The full size limit applies if the full option is set or if today is the
624 start of the week. The incremental size limit applies otherwise. Limits
625 are applied to the total size of all the relevant staging directories.
626
627 @param options: Options object.
628 @param config: Config object.
629 @param local: Local config object.
630 @param stagingDirs: Dictionary mapping directory path to date suffix.
631
632 @raise ValueError: Under many generic error conditions
633 @raise ValueError: If a size limit has been exceeded
634 """
635 if options.full or isStartOfWeek(config.options.startingDay):
636 logger.debug("Using Amazon S3 size limit for full backups.")
637 limit = local.amazons3.fullBackupSizeLimit
638 else:
639 logger.debug("Using Amazon S3 size limit for incremental backups.")
640 limit = local.amazons3.incrementalBackupSizeLimit
641 if limit is None:
642 logger.debug("No Amazon S3 size limit will be applied.")
643 else:
644 logger.debug("Amazon S3 size limit is: %d bytes" % limit)
645 contents = BackupFileList()
646 for stagingDir in stagingDirs:
647 contents.addDirContents(stagingDir)
648 total = contents.totalSize()
649 logger.debug("Amazon S3 backup size is is: %d bytes" % total)
650 if total > limit:
651 logger.error("Amazon S3 size limit exceeded: %.0f bytes > %d bytes" % (total, limit))
652 raise ValueError("Amazon S3 size limit exceeded: %.0f bytes > %d bytes" % (total, limit))
653 else:
654 logger.info("Total size does not exceed Amazon S3 size limit, so backup can continue.")
655
662 """
663 Writes the indicated staging directories to an Amazon S3 bucket.
664
665 Each of the staging directories listed in C{stagingDirs} will be written to
666 the configured Amazon S3 bucket from local configuration. The directories
667 will be placed into the image at the root by date, so staging directory
668 C{/opt/stage/2005/02/10} will be placed into the S3 bucket at C{/2005/02/10}.
669 If an encrypt commmand is provided, the files will be encrypted first.
670
671 @param config: Config object.
672 @param local: Local config object.
673 @param stagingDirs: Dictionary mapping directory path to date suffix.
674
675 @raise ValueError: Under many generic error conditions
676 @raise IOError: If there is a problem writing to Amazon S3
677 """
678 for stagingDir in stagingDirs.keys():
679 logger.debug("Storing stage directory to Amazon S3 [%s]." % stagingDir)
680 dateSuffix = stagingDirs[stagingDir]
681 s3BucketUrl = "s3://%s/%s" % (local.amazons3.s3Bucket, dateSuffix)
682 logger.debug("S3 bucket URL is [%s]" % s3BucketUrl)
683 _clearExistingBackup(config, s3BucketUrl)
684 if local.amazons3.encryptCommand is None:
685 logger.debug("Encryption is disabled; files will be uploaded in cleartext.")
686 _uploadStagingDir(config, stagingDir, s3BucketUrl)
687 _verifyUpload(config, stagingDir, s3BucketUrl)
688 else:
689 logger.debug("Encryption is enabled; files will be uploaded after being encrypted.")
690 encryptedDir = tempfile.mkdtemp(dir=config.options.workingDir)
691 changeOwnership(encryptedDir, config.options.backupUser, config.options.backupGroup)
692 try:
693 _encryptStagingDir(config, local, stagingDir, encryptedDir)
694 _uploadStagingDir(config, encryptedDir, s3BucketUrl)
695 _verifyUpload(config, encryptedDir, s3BucketUrl)
696 finally:
697 if os.path.exists(encryptedDir):
698 shutil.rmtree(encryptedDir)
699
715
722 """
723 Clear any existing backup files for an S3 bucket URL.
724 @param config: Config object.
725 @param s3BucketUrl: S3 bucket URL associated with the staging directory
726 """
727 suCommand = resolveCommand(SU_COMMAND)
728 awsCommand = resolveCommand(AWS_COMMAND)
729 actualCommand = "%s s3 rm --recursive %s/" % (awsCommand[0], s3BucketUrl)
730 result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
731 if result != 0:
732 raise IOError("Error [%d] calling AWS CLI to clear existing backup for [%s]." % (result, s3BucketUrl))
733 logger.debug("Completed clearing any existing backup in S3 for [%s]" % s3BucketUrl)
734
741 """
742 Upload the contents of a staging directory out to the Amazon S3 cloud.
743 @param config: Config object.
744 @param stagingDir: Staging directory to upload
745 @param s3BucketUrl: S3 bucket URL associated with the staging directory
746 """
747 suCommand = resolveCommand(SU_COMMAND)
748 awsCommand = resolveCommand(AWS_COMMAND)
749 actualCommand = "%s s3 cp --recursive %s/ %s/" % (awsCommand[0], stagingDir, s3BucketUrl)
750 result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
751 if result != 0:
752 raise IOError("Error [%d] calling AWS CLI to upload staging directory to [%s]." % (result, s3BucketUrl))
753 logger.debug("Completed uploading staging dir [%s] to [%s]" % (stagingDir, s3BucketUrl))
754
755
756
757
758
759
760 -def _verifyUpload(config, stagingDir, s3BucketUrl):
761 """
762 Verify that a staging directory was properly uploaded to the Amazon S3 cloud.
763 @param config: Config object.
764 @param stagingDir: Staging directory to verify
765 @param s3BucketUrl: S3 bucket URL associated with the staging directory
766 """
767 (bucket, prefix) = s3BucketUrl.replace("s3://", "").split("/", 1)
768 suCommand = resolveCommand(SU_COMMAND)
769 awsCommand = resolveCommand(AWS_COMMAND)
770 query = "Contents[].{Key: Key, Size: Size}"
771 actualCommand = "%s s3api list-objects --bucket %s --prefix %s --query '%s'" % (awsCommand[0], bucket, prefix, query)
772 (result, data) = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand], returnOutput=True)
773 if result != 0:
774 raise IOError("Error [%d] calling AWS CLI verify upload to [%s]." % (result, s3BucketUrl))
775 contents = { }
776 for entry in json.loads("".join(data)):
777 key = entry["Key"].replace(prefix, "")
778 size = long(entry["Size"])
779 contents[key] = size
780 files = FilesystemList()
781 files.addDirContents(stagingDir)
782 for entry in files:
783 if os.path.isfile(entry):
784 key = entry.replace(stagingDir, "")
785 size = long(os.stat(entry).st_size)
786 if not key in contents:
787 raise IOError("File was apparently not uploaded: [%s]" % entry)
788 else:
789 if size != contents[key]:
790 raise IOError("File size differs [%s], expected %s bytes but got %s bytes" % (entry, size, contents[key]))
791 logger.debug("Completed verifying upload from [%s] to [%s]." % (stagingDir, s3BucketUrl))
792
799 """
800 Encrypt a staging directory, creating a new directory in the process.
801 @param config: Config object.
802 @param stagingDir: Staging directory to use as source
803 @param encryptedDir: Target directory into which encrypted files should be written
804 """
805 suCommand = resolveCommand(SU_COMMAND)
806 files = FilesystemList()
807 files.addDirContents(stagingDir)
808 for cleartext in files:
809 if os.path.isfile(cleartext):
810 encrypted = "%s%s" % (encryptedDir, cleartext.replace(stagingDir, ""))
811 if long(os.stat(cleartext).st_size) == 0:
812 open(encrypted, 'a').close()
813 else:
814 actualCommand = local.amazons3.encryptCommand.replace("${input}", cleartext).replace("${output}", encrypted)
815 subdir = os.path.dirname(encrypted)
816 if not os.path.isdir(subdir):
817 os.makedirs(subdir)
818 changeOwnership(subdir, config.options.backupUser, config.options.backupGroup)
819 result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
820 if result != 0:
821 raise IOError("Error [%d] encrypting [%s]." % (result, cleartext))
822 logger.debug("Completed encrypting staging directory [%s] into [%s]" % (stagingDir, encryptedDir))
823