1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 """
39 Store-type extension that writes data to Amazon S3.
40
41 This extension requires a new configuration section <amazons3> and is intended
42 to be run immediately after the standard stage action, replacing the standard
43 store action. Aside from its own configuration, it requires the options and
44 staging configuration sections in the standard Cedar Backup configuration file.
45 Since it is intended to replace the store action, it does not rely on any store
46 configuration.
47
48 The underlying functionality relies on the U{AWS CLI interface
49 <http://aws.amazon.com/documentation/cli/>}. Before you use this extension,
50 you need to set up your Amazon S3 account and configure the AWS CLI connection
51 per Amazon's documentation. The extension assumes that the backup is being
52 executed as root, and switches over to the configured backup user to
53 communicate with AWS. So, make sure you configure AWS CLI as the backup user
54 and not root.
55
56 You can optionally configure Cedar Backup to encrypt data before sending it
57 to S3. To do that, provide a complete command line using the C{${input}} and
58 C{${output}} variables to represent the original input file and the encrypted
59 output file. This command will be executed as the backup user.
60
61 For instance, you can use something like this with GPG::
62
63 /usr/bin/gpg -c --no-use-agent --batch --yes --passphrase-file /home/backup/.passphrase -o ${output} ${input}
64
65 The GPG mechanism depends on a strong passphrase for security. One way to
66 generate a strong passphrase is using your system random number generator, i.e.::
67
68 dd if=/dev/urandom count=20 bs=1 | xxd -ps
69
70 (See U{StackExchange <http://security.stackexchange.com/questions/14867/gpg-encryption-security>}
71 for more details about that advice.) If you decide to use encryption, make sure
72 you save off the passphrase in a safe place, so you can get at your backup data
73 later if you need to. And obviously, make sure to set permissions on the
74 passphrase file so it can only be read by the backup user.
75
76 This extension was written for and tested on Linux. It will throw an exception
77 if run on Windows.
78
79 @author: Kenneth J. Pronovici <pronovic@ieee.org>
80 """
81
82
83
84
85
86
87 import sys
88 import os
89 import logging
90 import tempfile
91 import datetime
92 import json
93 import shutil
94 from functools import total_ordering
95
96
97 from CedarBackup3.filesystem import FilesystemList, BackupFileList
98 from CedarBackup3.util import resolveCommand, executeCommand, isRunningAsRoot, changeOwnership, isStartOfWeek
99 from CedarBackup3.xmlutil import createInputDom, addContainerNode, addBooleanNode, addStringNode, addLongNode
100 from CedarBackup3.xmlutil import readFirstChild, readString, readBoolean, readLong
101 from CedarBackup3.actions.util import writeIndicatorFile
102 from CedarBackup3.actions.constants import DIR_TIME_FORMAT, STAGE_INDICATOR
103
104
105
106
107
108
109 logger = logging.getLogger("CedarBackup3.log.extend.amazons3")
110
111 SU_COMMAND = [ "su" ]
112 AWS_COMMAND = [ "aws" ]
113
114 STORE_INDICATOR = "cback.amazons3"
115
116
117
118
119
120
121 @total_ordering
122 -class AmazonS3Config(object):
123
124 """
125 Class representing Amazon S3 configuration.
126
127 Amazon S3 configuration is used for storing backup data in Amazon's S3 cloud
128 storage using the C{s3cmd} tool.
129
130 The following restrictions exist on data in this class:
131
132 - The s3Bucket value must be a non-empty string
133 - The encryptCommand value, if set, must be a non-empty string
134 - The full backup size limit, if set, must be a number of bytes >= 0
135 - The incremental backup size limit, if set, must be a number of bytes >= 0
136
137 @sort: __init__, __repr__, __str__, __cmp__, __eq__, __lt__, __gt__,
138 warnMidnite, s3Bucket
139 """
140
141 - def __init__(self, warnMidnite=None, s3Bucket=None, encryptCommand=None,
142 fullBackupSizeLimit=None, incrementalBackupSizeLimit=None):
143 """
144 Constructor for the C{AmazonS3Config} class.
145
146 @param warnMidnite: Whether to generate warnings for crossing midnite.
147 @param s3Bucket: Name of the Amazon S3 bucket in which to store the data
148 @param encryptCommand: Command used to encrypt backup data before upload to S3
149 @param fullBackupSizeLimit: Maximum size of a full backup, in bytes
150 @param incrementalBackupSizeLimit: Maximum size of an incremental backup, in bytes
151
152 @raise ValueError: If one of the values is invalid.
153 """
154 self._warnMidnite = None
155 self._s3Bucket = None
156 self._encryptCommand = None
157 self._fullBackupSizeLimit = None
158 self._incrementalBackupSizeLimit = None
159 self.warnMidnite = warnMidnite
160 self.s3Bucket = s3Bucket
161 self.encryptCommand = encryptCommand
162 self.fullBackupSizeLimit = fullBackupSizeLimit
163 self.incrementalBackupSizeLimit = incrementalBackupSizeLimit
164
171
173 """
174 Informal string representation for class instance.
175 """
176 return self.__repr__()
177
179 """Equals operator, iplemented in terms of original Python 2 compare operator."""
180 return self.__cmp__(other) == 0
181
183 """Less-than operator, iplemented in terms of original Python 2 compare operator."""
184 return self.__cmp__(other) < 0
185
187 """Greater-than operator, iplemented in terms of original Python 2 compare operator."""
188 return self.__cmp__(other) > 0
189
224
226 """
227 Property target used to set the midnite warning flag.
228 No validations, but we normalize the value to C{True} or C{False}.
229 """
230 if value:
231 self._warnMidnite = True
232 else:
233 self._warnMidnite = False
234
236 """
237 Property target used to get the midnite warning flag.
238 """
239 return self._warnMidnite
240
242 """
243 Property target used to set the S3 bucket.
244 """
245 if value is not None:
246 if len(value) < 1:
247 raise ValueError("S3 bucket must be non-empty string.")
248 self._s3Bucket = value
249
251 """
252 Property target used to get the S3 bucket.
253 """
254 return self._s3Bucket
255
257 """
258 Property target used to set the encrypt command.
259 """
260 if value is not None:
261 if len(value) < 1:
262 raise ValueError("Encrypt command must be non-empty string.")
263 self._encryptCommand = value
264
266 """
267 Property target used to get the encrypt command.
268 """
269 return self._encryptCommand
270
272 """
273 Property target used to set the full backup size limit.
274 The value must be an integer >= 0.
275 @raise ValueError: If the value is not valid.
276 """
277 if value is None:
278 self._fullBackupSizeLimit = None
279 else:
280 try:
281 value = int(value)
282 except TypeError:
283 raise ValueError("Full backup size limit must be an integer >= 0.")
284 if value < 0:
285 raise ValueError("Full backup size limit must be an integer >= 0.")
286 self._fullBackupSizeLimit = value
287
289 """
290 Property target used to get the full backup size limit.
291 """
292 return self._fullBackupSizeLimit
293
295 """
296 Property target used to set the incremental backup size limit.
297 The value must be an integer >= 0.
298 @raise ValueError: If the value is not valid.
299 """
300 if value is None:
301 self._incrementalBackupSizeLimit = None
302 else:
303 try:
304 value = int(value)
305 except TypeError:
306 raise ValueError("Incremental backup size limit must be an integer >= 0.")
307 if value < 0:
308 raise ValueError("Incremental backup size limit must be an integer >= 0.")
309 self._incrementalBackupSizeLimit = value
310
312 """
313 Property target used to get the incremental backup size limit.
314 """
315 return self._incrementalBackupSizeLimit
316
317 warnMidnite = property(_getWarnMidnite, _setWarnMidnite, None, "Whether to generate warnings for crossing midnite.")
318 s3Bucket = property(_getS3Bucket, _setS3Bucket, None, doc="Amazon S3 Bucket in which to store data")
319 encryptCommand = property(_getEncryptCommand, _setEncryptCommand, None, doc="Command used to encrypt data before upload to S3")
320 fullBackupSizeLimit = property(_getFullBackupSizeLimit, _setFullBackupSizeLimit, None,
321 doc="Maximum size of a full backup, in bytes")
322 incrementalBackupSizeLimit = property(_getIncrementalBackupSizeLimit, _setIncrementalBackupSizeLimit, None,
323 doc="Maximum size of an incremental backup, in bytes")
324
325
326
327
328
329
330 @total_ordering
331 -class LocalConfig(object):
332
333 """
334 Class representing this extension's configuration document.
335
336 This is not a general-purpose configuration object like the main Cedar
337 Backup configuration object. Instead, it just knows how to parse and emit
338 amazons3-specific configuration values. Third parties who need to read and
339 write configuration related to this extension should access it through the
340 constructor, C{validate} and C{addConfig} methods.
341
342 @note: Lists within this class are "unordered" for equality comparisons.
343
344 @sort: __init__, __repr__, __str__, __cmp__, __eq__, __lt__, __gt__,
345 amazons3, validate, addConfig
346 """
347
348 - def __init__(self, xmlData=None, xmlPath=None, validate=True):
349 """
350 Initializes a configuration object.
351
352 If you initialize the object without passing either C{xmlData} or
353 C{xmlPath} then configuration will be empty and will be invalid until it
354 is filled in properly.
355
356 No reference to the original XML data or original path is saved off by
357 this class. Once the data has been parsed (successfully or not) this
358 original information is discarded.
359
360 Unless the C{validate} argument is C{False}, the L{LocalConfig.validate}
361 method will be called (with its default arguments) against configuration
362 after successfully parsing any passed-in XML. Keep in mind that even if
363 C{validate} is C{False}, it might not be possible to parse the passed-in
364 XML document if lower-level validations fail.
365
366 @note: It is strongly suggested that the C{validate} option always be set
367 to C{True} (the default) unless there is a specific need to read in
368 invalid configuration from disk.
369
370 @param xmlData: XML data representing configuration.
371 @type xmlData: String data.
372
373 @param xmlPath: Path to an XML file on disk.
374 @type xmlPath: Absolute path to a file on disk.
375
376 @param validate: Validate the document after parsing it.
377 @type validate: Boolean true/false.
378
379 @raise ValueError: If both C{xmlData} and C{xmlPath} are passed-in.
380 @raise ValueError: If the XML data in C{xmlData} or C{xmlPath} cannot be parsed.
381 @raise ValueError: If the parsed configuration document is not valid.
382 """
383 self._amazons3 = None
384 self.amazons3 = None
385 if xmlData is not None and xmlPath is not None:
386 raise ValueError("Use either xmlData or xmlPath, but not both.")
387 if xmlData is not None:
388 self._parseXmlData(xmlData)
389 if validate:
390 self.validate()
391 elif xmlPath is not None:
392 with open(xmlPath) as f:
393 xmlData = f.read()
394 self._parseXmlData(xmlData)
395 if validate:
396 self.validate()
397
399 """
400 Official string representation for class instance.
401 """
402 return "LocalConfig(%s)" % (self.amazons3)
403
405 """
406 Informal string representation for class instance.
407 """
408 return self.__repr__()
409
411 """Equals operator, iplemented in terms of original Python 2 compare operator."""
412 return self.__cmp__(other) == 0
413
415 """Less-than operator, iplemented in terms of original Python 2 compare operator."""
416 return self.__cmp__(other) < 0
417
419 """Greater-than operator, iplemented in terms of original Python 2 compare operator."""
420 return self.__cmp__(other) > 0
421
423 """
424 Original Python 2 comparison operator.
425 Lists within this class are "unordered" for equality comparisons.
426 @param other: Other object to compare to.
427 @return: -1/0/1 depending on whether self is C{<}, C{=} or C{>} other.
428 """
429 if other is None:
430 return 1
431 if self.amazons3 != other.amazons3:
432 if self.amazons3 < other.amazons3:
433 return -1
434 else:
435 return 1
436 return 0
437
439 """
440 Property target used to set the amazons3 configuration value.
441 If not C{None}, the value must be a C{AmazonS3Config} object.
442 @raise ValueError: If the value is not a C{AmazonS3Config}
443 """
444 if value is None:
445 self._amazons3 = None
446 else:
447 if not isinstance(value, AmazonS3Config):
448 raise ValueError("Value must be a C{AmazonS3Config} object.")
449 self._amazons3 = value
450
452 """
453 Property target used to get the amazons3 configuration value.
454 """
455 return self._amazons3
456
457 amazons3 = property(_getAmazonS3, _setAmazonS3, None, "AmazonS3 configuration in terms of a C{AmazonS3Config} object.")
458
460 """
461 Validates configuration represented by the object.
462
463 AmazonS3 configuration must be filled in. Within that, the s3Bucket target must be filled in
464
465 @raise ValueError: If one of the validations fails.
466 """
467 if self.amazons3 is None:
468 raise ValueError("AmazonS3 section is required.")
469 if self.amazons3.s3Bucket is None:
470 raise ValueError("AmazonS3 s3Bucket must be set.")
471
473 """
474 Adds an <amazons3> configuration section as the next child of a parent.
475
476 Third parties should use this function to write configuration related to
477 this extension.
478
479 We add the following fields to the document::
480
481 warnMidnite //cb_config/amazons3/warn_midnite
482 s3Bucket //cb_config/amazons3/s3_bucket
483 encryptCommand //cb_config/amazons3/encrypt
484 fullBackupSizeLimit //cb_config/amazons3/full_size_limit
485 incrementalBackupSizeLimit //cb_config/amazons3/incr_size_limit
486
487 @param xmlDom: DOM tree as from C{impl.createDocument()}.
488 @param parentNode: Parent that the section should be appended to.
489 """
490 if self.amazons3 is not None:
491 sectionNode = addContainerNode(xmlDom, parentNode, "amazons3")
492 addBooleanNode(xmlDom, sectionNode, "warn_midnite", self.amazons3.warnMidnite)
493 addStringNode(xmlDom, sectionNode, "s3_bucket", self.amazons3.s3Bucket)
494 addStringNode(xmlDom, sectionNode, "encrypt", self.amazons3.encryptCommand)
495 addLongNode(xmlDom, sectionNode, "full_size_limit", self.amazons3.fullBackupSizeLimit)
496 addLongNode(xmlDom, sectionNode, "incr_size_limit", self.amazons3.incrementalBackupSizeLimit)
497
499 """
500 Internal method to parse an XML string into the object.
501
502 This method parses the XML document into a DOM tree (C{xmlDom}) and then
503 calls a static method to parse the amazons3 configuration section.
504
505 @param xmlData: XML data to be parsed
506 @type xmlData: String data
507
508 @raise ValueError: If the XML cannot be successfully parsed.
509 """
510 (xmlDom, parentNode) = createInputDom(xmlData)
511 self._amazons3 = LocalConfig._parseAmazonS3(parentNode)
512
513 @staticmethod
541
542
543
544
545
546
547
548
549
550
551 -def executeAction(configPath, options, config):
552 """
553 Executes the amazons3 backup action.
554
555 @param configPath: Path to configuration file on disk.
556 @type configPath: String representing a path on disk.
557
558 @param options: Program command-line options.
559 @type options: Options object.
560
561 @param config: Program configuration.
562 @type config: Config object.
563
564 @raise ValueError: Under many generic error conditions
565 @raise IOError: If there are I/O problems reading or writing files
566 """
567 logger.debug("Executing amazons3 extended action.")
568 if not isRunningAsRoot():
569 logger.error("Error: the amazons3 extended action must be run as root.")
570 raise ValueError("The amazons3 extended action must be run as root.")
571 if sys.platform == "win32":
572 logger.error("Error: the amazons3 extended action is not supported on Windows.")
573 raise ValueError("The amazons3 extended action is not supported on Windows.")
574 if config.options is None or config.stage is None:
575 raise ValueError("Cedar Backup configuration is not properly filled in.")
576 local = LocalConfig(xmlPath=configPath)
577 stagingDirs = _findCorrectDailyDir(options, config, local)
578 _applySizeLimits(options, config, local, stagingDirs)
579 _writeToAmazonS3(config, local, stagingDirs)
580 _writeStoreIndicator(config, stagingDirs)
581 logger.info("Executed the amazons3 extended action successfully.")
582
593 """
594 Finds the correct daily staging directory to be written to Amazon S3.
595
596 This is substantially similar to the same function in store.py. The
597 main difference is that it doesn't rely on store configuration at all.
598
599 @param options: Options object.
600 @param config: Config object.
601 @param local: Local config object.
602
603 @return: Correct staging dir, as a dict mapping directory to date suffix.
604 @raise IOError: If the staging directory cannot be found.
605 """
606 oneDay = datetime.timedelta(days=1)
607 today = datetime.date.today()
608 yesterday = today - oneDay
609 tomorrow = today + oneDay
610 todayDate = today.strftime(DIR_TIME_FORMAT)
611 yesterdayDate = yesterday.strftime(DIR_TIME_FORMAT)
612 tomorrowDate = tomorrow.strftime(DIR_TIME_FORMAT)
613 todayPath = os.path.join(config.stage.targetDir, todayDate)
614 yesterdayPath = os.path.join(config.stage.targetDir, yesterdayDate)
615 tomorrowPath = os.path.join(config.stage.targetDir, tomorrowDate)
616 todayStageInd = os.path.join(todayPath, STAGE_INDICATOR)
617 yesterdayStageInd = os.path.join(yesterdayPath, STAGE_INDICATOR)
618 tomorrowStageInd = os.path.join(tomorrowPath, STAGE_INDICATOR)
619 todayStoreInd = os.path.join(todayPath, STORE_INDICATOR)
620 yesterdayStoreInd = os.path.join(yesterdayPath, STORE_INDICATOR)
621 tomorrowStoreInd = os.path.join(tomorrowPath, STORE_INDICATOR)
622 if options.full:
623 if os.path.isdir(todayPath) and os.path.exists(todayStageInd):
624 logger.info("Amazon S3 process will use current day's staging directory [%s]", todayPath)
625 return { todayPath:todayDate }
626 raise IOError("Unable to find staging directory to process (only tried today due to full option).")
627 else:
628 if os.path.isdir(todayPath) and os.path.exists(todayStageInd) and not os.path.exists(todayStoreInd):
629 logger.info("Amazon S3 process will use current day's staging directory [%s]", todayPath)
630 return { todayPath:todayDate }
631 elif os.path.isdir(yesterdayPath) and os.path.exists(yesterdayStageInd) and not os.path.exists(yesterdayStoreInd):
632 logger.info("Amazon S3 process will use previous day's staging directory [%s]", yesterdayPath)
633 if local.amazons3.warnMidnite:
634 logger.warn("Warning: Amazon S3 process crossed midnite boundary to find data.")
635 return { yesterdayPath:yesterdayDate }
636 elif os.path.isdir(tomorrowPath) and os.path.exists(tomorrowStageInd) and not os.path.exists(tomorrowStoreInd):
637 logger.info("Amazon S3 process will use next day's staging directory [%s]", tomorrowPath)
638 if local.amazons3.warnMidnite:
639 logger.warn("Warning: Amazon S3 process crossed midnite boundary to find data.")
640 return { tomorrowPath:tomorrowDate }
641 raise IOError("Unable to find unused staging directory to process (tried today, yesterday, tomorrow).")
642
649 """
650 Apply size limits, throwing an exception if any limits are exceeded.
651
652 Size limits are optional. If a limit is set to None, it does not apply.
653 The full size limit applies if the full option is set or if today is the
654 start of the week. The incremental size limit applies otherwise. Limits
655 are applied to the total size of all the relevant staging directories.
656
657 @param options: Options object.
658 @param config: Config object.
659 @param local: Local config object.
660 @param stagingDirs: Dictionary mapping directory path to date suffix.
661
662 @raise ValueError: Under many generic error conditions
663 @raise ValueError: If a size limit has been exceeded
664 """
665 if options.full or isStartOfWeek(config.options.startingDay):
666 logger.debug("Using Amazon S3 size limit for full backups.")
667 limit = local.amazons3.fullBackupSizeLimit
668 else:
669 logger.debug("Using Amazon S3 size limit for incremental backups.")
670 limit = local.amazons3.incrementalBackupSizeLimit
671 if limit is None:
672 logger.debug("No Amazon S3 size limit will be applied.")
673 else:
674 logger.debug("Amazon S3 size limit is: %d bytes", limit)
675 contents = BackupFileList()
676 for stagingDir in stagingDirs:
677 contents.addDirContents(stagingDir)
678 total = contents.totalSize()
679 logger.debug("Amazon S3 backup size is is: %d bytes", total)
680 if total > limit:
681 logger.error("Amazon S3 size limit exceeded: %.0f bytes > %d bytes", total, limit)
682 raise ValueError("Amazon S3 size limit exceeded: %.0f bytes > %d bytes" % (total, limit))
683 else:
684 logger.info("Total size does not exceed Amazon S3 size limit, so backup can continue.")
685
692 """
693 Writes the indicated staging directories to an Amazon S3 bucket.
694
695 Each of the staging directories listed in C{stagingDirs} will be written to
696 the configured Amazon S3 bucket from local configuration. The directories
697 will be placed into the image at the root by date, so staging directory
698 C{/opt/stage/2005/02/10} will be placed into the S3 bucket at C{/2005/02/10}.
699 If an encrypt commmand is provided, the files will be encrypted first.
700
701 @param config: Config object.
702 @param local: Local config object.
703 @param stagingDirs: Dictionary mapping directory path to date suffix.
704
705 @raise ValueError: Under many generic error conditions
706 @raise IOError: If there is a problem writing to Amazon S3
707 """
708 for stagingDir in list(stagingDirs.keys()):
709 logger.debug("Storing stage directory to Amazon S3 [%s].", stagingDir)
710 dateSuffix = stagingDirs[stagingDir]
711 s3BucketUrl = "s3://%s/%s" % (local.amazons3.s3Bucket, dateSuffix)
712 logger.debug("S3 bucket URL is [%s]", s3BucketUrl)
713 _clearExistingBackup(config, s3BucketUrl)
714 if local.amazons3.encryptCommand is None:
715 logger.debug("Encryption is disabled; files will be uploaded in cleartext.")
716 _uploadStagingDir(config, stagingDir, s3BucketUrl)
717 _verifyUpload(config, stagingDir, s3BucketUrl)
718 else:
719 logger.debug("Encryption is enabled; files will be uploaded after being encrypted.")
720 encryptedDir = tempfile.mkdtemp(dir=config.options.workingDir)
721 changeOwnership(encryptedDir, config.options.backupUser, config.options.backupGroup)
722 try:
723 _encryptStagingDir(config, local, stagingDir, encryptedDir)
724 _uploadStagingDir(config, encryptedDir, s3BucketUrl)
725 _verifyUpload(config, encryptedDir, s3BucketUrl)
726 finally:
727 if os.path.exists(encryptedDir):
728 shutil.rmtree(encryptedDir)
729
745
752 """
753 Clear any existing backup files for an S3 bucket URL.
754 @param config: Config object.
755 @param s3BucketUrl: S3 bucket URL associated with the staging directory
756 """
757 suCommand = resolveCommand(SU_COMMAND)
758 awsCommand = resolveCommand(AWS_COMMAND)
759 actualCommand = "%s s3 rm --recursive %s/" % (awsCommand[0], s3BucketUrl)
760 result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
761 if result != 0:
762 raise IOError("Error [%d] calling AWS CLI to clear existing backup for [%s]." % (result, s3BucketUrl))
763 logger.debug("Completed clearing any existing backup in S3 for [%s]", s3BucketUrl)
764
771 """
772 Upload the contents of a staging directory out to the Amazon S3 cloud.
773 @param config: Config object.
774 @param stagingDir: Staging directory to upload
775 @param s3BucketUrl: S3 bucket URL associated with the staging directory
776 """
777 suCommand = resolveCommand(SU_COMMAND)
778 awsCommand = resolveCommand(AWS_COMMAND)
779 actualCommand = "%s s3 cp --recursive %s/ %s/" % (awsCommand[0], stagingDir, s3BucketUrl)
780 result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
781 if result != 0:
782 raise IOError("Error [%d] calling AWS CLI to upload staging directory to [%s]." % (result, s3BucketUrl))
783 logger.debug("Completed uploading staging dir [%s] to [%s]", stagingDir, s3BucketUrl)
784
785
786
787
788
789
790 -def _verifyUpload(config, stagingDir, s3BucketUrl):
791 """
792 Verify that a staging directory was properly uploaded to the Amazon S3 cloud.
793 @param config: Config object.
794 @param stagingDir: Staging directory to verify
795 @param s3BucketUrl: S3 bucket URL associated with the staging directory
796 """
797 (bucket, prefix) = s3BucketUrl.replace("s3://", "").split("/", 1)
798 suCommand = resolveCommand(SU_COMMAND)
799 awsCommand = resolveCommand(AWS_COMMAND)
800 query = "Contents[].{Key: Key, Size: Size}"
801 actualCommand = "%s s3api list-objects --bucket %s --prefix %s --query '%s'" % (awsCommand[0], bucket, prefix, query)
802 (result, data) = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand], returnOutput=True)
803 if result != 0:
804 raise IOError("Error [%d] calling AWS CLI verify upload to [%s]." % (result, s3BucketUrl))
805 contents = { }
806 for entry in json.loads("".join(data)):
807 key = entry["Key"].replace(prefix, "")
808 size = int(entry["Size"])
809 contents[key] = size
810 files = FilesystemList()
811 files.addDirContents(stagingDir)
812 for entry in files:
813 if os.path.isfile(entry):
814 key = entry.replace(stagingDir, "")
815 size = int(os.stat(entry).st_size)
816 if not key in contents:
817 raise IOError("File was apparently not uploaded: [%s]" % entry)
818 else:
819 if size != contents[key]:
820 raise IOError("File size differs [%s], expected %s bytes but got %s bytes" % (entry, size, contents[key]))
821 logger.debug("Completed verifying upload from [%s] to [%s].", stagingDir, s3BucketUrl)
822
829 """
830 Encrypt a staging directory, creating a new directory in the process.
831 @param config: Config object.
832 @param stagingDir: Staging directory to use as source
833 @param encryptedDir: Target directory into which encrypted files should be written
834 """
835 suCommand = resolveCommand(SU_COMMAND)
836 files = FilesystemList()
837 files.addDirContents(stagingDir)
838 for cleartext in files:
839 if os.path.isfile(cleartext):
840 encrypted = "%s%s" % (encryptedDir, cleartext.replace(stagingDir, ""))
841 if int(os.stat(cleartext).st_size) == 0:
842 with open(encrypted, 'a') as f:
843 f.close()
844 else:
845 actualCommand = local.amazons3.encryptCommand.replace("${input}", cleartext).replace("${output}", encrypted)
846 subdir = os.path.dirname(encrypted)
847 if not os.path.isdir(subdir):
848 os.makedirs(subdir)
849 changeOwnership(subdir, config.options.backupUser, config.options.backupGroup)
850 result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
851 if result != 0:
852 raise IOError("Error [%d] encrypting [%s]." % (result, cleartext))
853 logger.debug("Completed encrypting staging directory [%s] into [%s]", stagingDir, encryptedDir)
854