pushing some updates; luks logging not done

This commit is contained in:
2019-12-20 12:50:41 -05:00
parent 4418348e78
commit a65ef8232a
13 changed files with 462 additions and 279 deletions

View File

@@ -20,7 +20,7 @@ class Config(object):
self.xsd = None
self.defaultsParser = None
self.obj = None
_logger.debug('Instantiated {0}.'.format(type(self).__name__))
_logger.info('Instantiated {0}.'.format(type(self).__name__))
def main(self, validate = True, populate_defaults = True):
self.fetch()
@@ -135,7 +135,7 @@ class Config(object):
for e in x.xpath(xpathq):
e.tag = etree.QName(e).localname
elif isinstance(obj, (etree._Element, etree._ElementTree)):
_logger.debug('XML object provided: {0}'.format(etree.tostring(obj)))
_logger.debug('XML object provided: {0}'.format(etree.tostring(obj, with_tail = False).decode('utf-8')))
obj = copy.deepcopy(obj)
for e in obj.xpath(xpathq):
e.tag = etree.QName(e).localname

View File

@@ -1,13 +1,20 @@
import logging
##
import gi
gi.require_version('BlockDev', '2.0')
from gi.repository import BlockDev, GLib
BlockDev.ensure_init([None])
_logger = logging.getLogger('disk:_common')
def addBDPlugin(plugin_name):
_logger.info('Enabling plugin: {0}'.format(plugin_name))
plugins = BlockDev.get_available_plugin_names()
plugins.append(plugin_name)
plugins = list(set(plugins)) # Deduplicate
_logger.debug('Currently loaded plugins: {0}'.format(','.join(plugins)))
spec = BlockDev.plugin_specs_from_names(plugins)
_logger.debug('Plugin {0} loaded.'.format(plugin_name))
return(BlockDev.ensure_init(spec))

View File

@@ -1,8 +1,10 @@
import logging
import os
import uuid
##
import blkinfo
import psutil # Do I need this if I can have libblockdev's mounts API? Is there a way to get current mounts?
# import psutil # Do I need this if I can have libblockdev's mounts API? Is there a way to get current mounts?
from lxml import etree
##
import aif.constants
import aif.utils
@@ -10,20 +12,122 @@ from . import _common
_BlockDev = _common.BlockDev
_logger = logging.getLogger(__name__)
class Disk(object):
def __init__(self, disk_xml):
self.xml = disk_xml
_logger.debug('disk_xml: {0}'.format(etree.tostring(self.xml, with_tail = False).decode('utf-8')))
self.devpath = os.path.realpath(self.xml.attrib['device'])
aif.disk._common.addBDPlugin('part')
self.is_lowformatted = None
self.is_hiformatted = None
self.is_partitioned = None
self.partitions = None
self._initDisk()
def _initDisk(self):
if self.devpath == 'auto':
self.devpath = '/dev/{0}'.format(blkinfo.BlkDiskInfo().get_disks()[0]['kname'])
if not os.path.isfile(self.devpath):
_logger.error('Disk {0} does not exist; please specify an explicit device path'.format(self.devpath))
raise ValueError('Disk not found')
self.table_type = self.xml.attrib.get('diskFormat', 'gpt').lower()
if self.table_type in ('bios', 'mbr', 'dos', 'msdos'):
_logger.debug('Disk format set to MSDOS.')
self.table_type = _BlockDev.PartTableType.MSDOS
elif self.table_type == 'gpt':
self.table_type = _BlockDev.PartTableType.GPT
_logger.debug('Disk format set to GPT.')
else:
_logger.error('Disk format {0} is invalid for this system\'s architecture; must be gpt or msdos')
raise ValueError('Invalid disk format')
self.device = self.disk = _BlockDev.part.get_disk_spec(self.devpath)
self.is_lowformatted = False
self.is_hiformatted = False
self.is_partitioned = False
self.partitions = []
return(None)
def diskFormat(self):
if self.is_lowformatted:
return(None)
# This is a safeguard. We do *not* want to low-format a disk that is mounted.
aif.utils.checkMounted(self.devpath)
# TODO: BlockDev.part.set_disk_flag(<disk>,
# BlockDev.PartDiskFlag(1),
# True) ??
# https://lazka.github.io/pgi-docs/BlockDev-2.0/enums.html#BlockDev.PartDiskFlag
# https://unix.stackexchange.com/questions/325886/bios-gpt-do-we-need-a-boot-flag
_BlockDev.part.create_table(self.devpath, self.table_type, True)
self.is_lowformatted = True
self.is_partitioned = False
return(None)
def getPartitions(self):
# For GPT, this *technically* should be 34 -- or, more precisely, 2048 (see FAQ in manual), but the alignment
# optimizer fixes it for us automatically.
# But for DOS tables, it's required.
_logger.info('Establishing partitions for {0}'.format(self.devpath))
if self.table_type == 'msdos':
start_sector = 2048
else:
start_sector = 0
self.partitions = []
xml_partitions = self.xml.findall('part')
for idx, part in enumerate(xml_partitions):
partnum = idx + 1
if self.table_type == 'gpt':
p = Partition(part, self.disk, start_sector, partnum, self.table_type)
else:
parttype = 'primary'
if len(xml_partitions) > 4:
if partnum == 4:
parttype = 'extended'
elif partnum > 4:
parttype = 'logical'
p = Partition(part, self.disk, start_sector, partnum, self.table_type, part_type = parttype)
start_sector = p.end + 1
self.partitions.append(p)
_logger.debug('Added partition {0}'.format(p.id))
return(None)
def partFormat(self):
if self.is_partitioned:
return(None)
if not self.is_lowformatted:
self.diskFormat()
# This is a safeguard. We do *not* want to partition a disk that is mounted.
aif.utils.checkMounted(self.devpath)
if not self.partitions:
self.getPartitions()
if not self.partitions:
return(None)
for p in self.partitions:
p.format()
p.is_hiformatted = True
self.is_partitioned = True
return(None)
# TODO: LOGGING!
class Partition(object):
def __init__(self, part_xml, diskobj, start_sector, partnum, tbltype, part_type = None):
# Belive it or not, dear reader, but this *entire method* is just to set attributes.
if tbltype not in ('gpt', 'msdos'):
raise ValueError('{0} must be one of gpt or msdos'.format(tbltype))
_logger.error('Invalid tabletype specified: {0}. Must be one of: gpt,msdos.'.format(tbltype))
raise ValueError('Invalid tbltype.')
if tbltype == 'msdos' and part_type not in ('primary', 'extended', 'logical'):
raise ValueError(('You must specify if this is a '
'primary, extended, or logical partition for msdos partition tables'))
_logger.error(('Table type msdos requires the part_type to be specified and must be one of: primary,'
'extended,logical (instead of: {0}).').format(part_type))
raise ValueError('The part_type must be specified for msdos tables')
aif.disk._common.addBDPlugin('part')
self.xml = part_xml
_logger.debug('part_xml: {0}'.format(etree.tostring(self.xml, with_tail = False).decode('utf-8')))
_logger.debug('Partition number: {0}'.format(partnum))
self.id = self.xml.attrib['id']
self.table_type = getattr(_BlockDev.PartTableType, tbltype.upper())
_logger.debug('Partition table type: {0}.'.format(tbltype))
if tbltype == 'msdos':
# Could technically be _BlockDev.PartTypeReq.NEXT BUT that doesn't *quite* work
# with this project's structure.
@@ -41,6 +145,7 @@ class Partition(object):
self.disk = diskobj
self.device = self.disk.path
self.devpath = '{0}{1}'.format(self.device, self.partnum)
_logger.debug('Assigned to disk: {0} ({1}) at path {2}'.format(self.disk.id, self.device, self.devpath))
self.is_hiformatted = False
sizes = {}
for s in ('start', 'stop'):
@@ -70,7 +175,9 @@ class Partition(object):
else:
self.end = self.begin + sizes['stop'][0]
self.size = (self.end - self.begin)
_logger.debug('Size: {0} sectors (sector {1} to {2}).'.format(self.size, self.begin, self.end))
self.part_name = self.xml.attrib.get('name')
_logger.debug('Partition name: {0}'.format(self.part_name))
self.partition = None
self._initFlags()
self._initFstype()
@@ -86,131 +193,47 @@ class Partition(object):
else:
continue
self.flags.append(_BlockDev.PartFlag(flag_id))
_logger.debug('Partition flags: {0}'.format(','.join(self.flags)))
return(None)
def _initFstype(self):
_err = ('{0} is not a valid partition filesystem type; '
'must be one of {1} or an fdisk-compatible GPT GUID').format(
self.xml.attrib['fsType'],
', '.join(sorted(aif.constants.PARTED_FSTYPES)))
if self.fs_type in aif.constants.PARTED_FSTYPES_GUIDS.keys():
self.fs_type = aif.constants.PARTED_FSTYPES_GUIDS[self.fs_type]
_logger.debug('Filesystem type (parted): {0}'.format(self.fs_type))
else:
try:
self.fs_type = uuid.UUID(hex = self.fs_type)
_logger.debug('Filesystem type (explicit GUID): {0}'.format(str(self.fs_type)))
except ValueError:
raise ValueError(_err)
_logger.error('Partition type GUID {0} is not a valid UUID4 string'.format(self.fs_type))
raise ValueError('Invalid partition type GUID')
if self.fs_type not in aif.constants.GPT_GUID_IDX.keys():
raise ValueError(_err)
_logger.error('Partition type GUID {0} is not a valid partition type'.format(self.fs_type))
raise ValueError('Invalid partition type value')
return(None)
def format(self):
_logger.info('Formatting partion {0}.'.format(self.id))
# This is a safeguard. We do *not* want to partition a disk that is mounted.
aif.utils.checkMounted(self.devpath)
_logger.info('Creating partition object.')
self.partition = _BlockDev.part.create_part(self.device,
self.part_type,
self.begin,
self.size,
_BlockDev.PartAlign.OPTIMAL)
_logger.debug('Partition object created.')
self.devpath = self.partition.path
_logger.debug('Partition path updated: {0}'.format(self.devpath))
_BlockDev.part.set_part_type(self.device, self.devpath, str(self.fs_type).upper())
if self.part_name:
_BlockDev.part.set_part_name(self.device, self.devpath, self.part_name)
if self.flags:
for f in self.flags:
_BlockDev.part.set_part_flag(self.device, self.devpath, f, True)
_logger.info('Partition {0} formatted.'.format(self.devpath))
return(None)
#
# def detect(self):
# pass # TODO; blkinfo?
class Disk(object):
def __init__(self, disk_xml):
self.xml = disk_xml
self.devpath = os.path.realpath(self.xml.attrib['device'])
aif.disk._common.addBDPlugin('part')
self.is_lowformatted = None
self.is_hiformatted = None
self.is_partitioned = None
self.partitions = None
self._initDisk()
def _initDisk(self):
if self.devpath == 'auto':
self.devpath = '/dev/{0}'.format(blkinfo.BlkDiskInfo().get_disks()[0]['kname'])
if not os.path.isfile(self.devpath):
raise ValueError('{0} does not exist; please specify an explicit device path'.format(self.devpath))
self.table_type = self.xml.attrib.get('diskFormat', 'gpt').lower()
if self.table_type in ('bios', 'mbr', 'dos', 'msdos'):
self.table_type = _BlockDev.PartTableType.MSDOS
elif self.table_type == 'gpt':
self.table_type = _BlockDev.PartTableType.GPT
else:
raise ValueError(('Disk format {0} is not valid for this architecture;'
'must be one of: gpt or msdos'.format(self.table_type)))
self.device = self.disk = _BlockDev.part.get_disk_spec(self.devpath)
self.is_lowformatted = False
self.is_hiformatted = False
self.is_partitioned = False
self.partitions = []
return(None)
def diskFormat(self):
if self.is_lowformatted:
return ()
# This is a safeguard. We do *not* want to low-format a disk that is mounted.
aif.utils.checkMounted(self.devpath)
# TODO: BlockDev.part.set_disk_flag(<disk>,
# BlockDev.PartDiskFlag(1),
# True) ??
# https://lazka.github.io/pgi-docs/BlockDev-2.0/enums.html#BlockDev.PartDiskFlag
# https://unix.stackexchange.com/questions/325886/bios-gpt-do-we-need-a-boot-flag
_BlockDev.part.create_table(self.devpath, self.table_type, True)
self.is_lowformatted = True
self.is_partitioned = False
return(None)
def getPartitions(self):
# For GPT, this *technically* should be 34 -- or, more precisely, 2048 (see FAQ in manual), but the alignment
# optimizer fixes it for us automatically.
# But for DOS tables, it's required.
if self.table_type == 'msdos':
start_sector = 2048
else:
start_sector = 0
self.partitions = []
xml_partitions = self.xml.findall('part')
for idx, part in enumerate(xml_partitions):
partnum = idx + 1
if self.table_type == 'gpt':
p = Partition(part, self.disk, start_sector, partnum, self.table_type)
else:
parttype = 'primary'
if len(xml_partitions) > 4:
if partnum == 4:
parttype = 'extended'
elif partnum > 4:
parttype = 'logical'
p = Partition(part, self.disk, start_sector, partnum, self.table_type, part_type = parttype)
start_sector = p.end + 1
self.partitions.append(p)
return(None)
def partFormat(self):
if self.is_partitioned:
return(None)
if not self.is_lowformatted:
self.diskFormat()
# This is a safeguard. We do *not* want to partition a disk that is mounted.
aif.utils.checkMounted(self.devpath)
if not self.partitions:
self.getPartitions()
if not self.partitions:
return(None)
for p in self.partitions:
p.format()
p.is_hiformatted = True
self.is_partitioned = True
return ()

View File

@@ -3,8 +3,8 @@
# https://github.com/dcantrell/pyparted/blob/master/examples/query_device_capacity.py
# TODO: Remember to replicate genfstab behaviour.
import logging
import os
import re
try:
# https://stackoverflow.com/a/34812552/733214
# https://github.com/karelzak/util-linux/blob/master/libmount/python/test_mount_context.py#L6
@@ -15,7 +15,7 @@ except ImportError:
##
import blkinfo
import parted # https://www.gnu.org/software/parted/api/index.html
import psutil
from lxml import etree
##
import aif.constants
import aif.utils
@@ -25,14 +25,114 @@ import aif.utils
# https://unix.stackexchange.com/questions/325886/bios-gpt-do-we-need-a-boot-flag
_logger = logging.getLogger(__name__)
class Disk(object):
def __init__(self, disk_xml):
self.xml = disk_xml
_logger.debug('disk_xml: {0}'.format(etree.tostring(self.xml, with_tail = False).decode('utf-8')))
self.id = self.xml.attrib['id']
self.devpath = os.path.realpath(self.xml.attrib['device'])
self.is_lowformatted = None
self.is_hiformatted = None
self.is_partitioned = None
self.partitions = None
self._initDisk()
def _initDisk(self):
if self.devpath == 'auto':
self.devpath = '/dev/{0}'.format(blkinfo.BlkDiskInfo().get_disks()[0]['kname'])
if not os.path.isfile(self.devpath):
raise ValueError('{0} does not exist; please specify an explicit device path'.format(self.devpath))
self.table_type = self.xml.attrib.get('diskFormat', 'gpt').lower()
if self.table_type in ('bios', 'mbr', 'dos'):
self.table_type = 'msdos'
validlabels = parted.getLabels()
if self.table_type not in validlabels:
raise ValueError(('Disk format {0} is not valid for this architecture;'
'must be one of: {1}'.format(self.table_type, ', '.join(list(validlabels)))))
self.device = parted.getDevice(self.devpath)
self.disk = parted.freshDisk(self.device, self.table_type)
_logger.debug('Configured parted device for {0}.'.format(self.devpath))
self.is_lowformatted = False
self.is_hiformatted = False
self.is_partitioned = False
self.partitions = []
return(None)
def diskFormat(self):
if self.is_lowformatted:
return(None)
# This is a safeguard. We do *not* want to low-format a disk that is mounted.
aif.utils.checkMounted(self.devpath)
self.disk.deleteAllPartitions()
self.disk.commit()
self.is_lowformatted = True
self.is_partitioned = False
return(None)
def getPartitions(self):
# For GPT, this *technically* should be 34 -- or, more precisely, 2048 (see FAQ in manual), but the alignment
# optimizer fixes it for us automatically.
# But for DOS tables, it's required.
_logger.info('Establishing partitions for {0}'.format(self.devpath))
if self.table_type == 'msdos':
start_sector = 2048
else:
start_sector = 0
self.partitions = []
xml_partitions = self.xml.findall('part')
for idx, part in enumerate(xml_partitions):
partnum = idx + 1
if self.table_type == 'gpt':
p = Partition(part, self.disk, start_sector, partnum, self.table_type)
else:
parttype = 'primary'
if len(xml_partitions) > 4:
if partnum == 4:
parttype = 'extended'
elif partnum > 4:
parttype = 'logical'
p = Partition(part, self.disk, start_sector, partnum, self.table_type, part_type = parttype)
start_sector = p.end + 1
self.partitions.append(p)
_logger.debug('Added partition {0}'.format(p.id))
return(None)
def partFormat(self):
if self.is_partitioned:
return(None)
if not self.is_lowformatted:
self.diskFormat()
# This is a safeguard. We do *not* want to partition a disk that is mounted.
aif.utils.checkMounted(self.devpath)
if not self.partitions:
self.getPartitions()
if not self.partitions:
return(None)
for p in self.partitions:
self.disk.addPartition(partition = p, constraint = self.device.optimalAlignedConstraint)
self.disk.commit()
p.devpath = p.partition.path
p.is_hiformatted = True
self.is_partitioned = True
return(None)
class Partition(object):
def __init__(self, part_xml, diskobj, start_sector, partnum, tbltype, part_type = None):
if tbltype not in ('gpt', 'msdos'):
raise ValueError('{0} must be one of gpt or msdos'.format(tbltype))
_logger.error('Invalid tabletype specified: {0}. Must be one of: gpt,msdos.'.format(tbltype))
raise ValueError('Invalid tbltype.')
if tbltype == 'msdos' and part_type not in ('primary', 'extended', 'logical'):
raise ValueError(('You must specify if this is a '
'primary, extended, or logical partition for msdos partition tables'))
_logger.error(('Table type msdos requires the part_type to be specified and must be one of: primary,'
'extended,logical (instead of: {0}).').format(part_type))
raise ValueError('The part_type must be specified for msdos tables')
self.xml = part_xml
_logger.debug('part_xml: {0}'.format(etree.tostring(self.xml, with_tail = False).decode('utf-8')))
_logger.debug('Partition number: {0}'.format(partnum))
_logger.debug('Partition table type: {0}.'.format(tbltype))
self.id = self.xml.attrib['id']
self.flags = set()
for f in self.xml.findall('partitionFlag'):
@@ -58,6 +158,7 @@ class Partition(object):
self.disk = diskobj
self.device = self.disk.device
self.devpath = '{0}{1}'.format(self.device.path, self.partnum)
_logger.debug('Assigned to disk: {0} ({1}) at path {2}'.format(self.disk.id, self.device, self.devpath))
self.is_hiformatted = False
sizes = {}
for s in ('start', 'stop'):
@@ -86,6 +187,7 @@ class Partition(object):
self.end = (self.device.getLength() - 1) - sizes['stop'][0]
else:
self.end = self.begin + sizes['stop'][0]
_logger.debug('Size: sector {0} to {1}.'.format(self.begin, self.end))
# TECHNICALLY we could craft the Geometry object with "length = ...", but it doesn't let us be explicit
# in configs. So we manually crunch the numbers and do it all at the end.
self.geometry = parted.Geometry(device = self.device,
@@ -110,94 +212,7 @@ class Partition(object):
# self.partition.name = self.xml.attrib.get('name')
_pedpart = self.partition.getPedPartition()
_pedpart.set_name(self.xml.attrib['name'])
_logger.debug('Partition name: {0}'.format(self.xml.attrib['name']))
#
# def detect(self):
# pass # TODO; blkinfo?
class Disk(object):
def __init__(self, disk_xml):
self.xml = disk_xml
self.id = self.xml.attrib['id']
self.devpath = os.path.realpath(self.xml.attrib['device'])
self.is_lowformatted = None
self.is_hiformatted = None
self.is_partitioned = None
self.partitions = None
self._initDisk()
def _initDisk(self):
if self.devpath == 'auto':
self.devpath = '/dev/{0}'.format(blkinfo.BlkDiskInfo().get_disks()[0]['kname'])
if not os.path.isfile(self.devpath):
raise ValueError('{0} does not exist; please specify an explicit device path'.format(self.devpath))
self.table_type = self.xml.attrib.get('diskFormat', 'gpt').lower()
if self.table_type in ('bios', 'mbr', 'dos'):
self.table_type = 'msdos'
validlabels = parted.getLabels()
if self.table_type not in validlabels:
raise ValueError(('Disk format {0} is not valid for this architecture;'
'must be one of: {1}'.format(self.table_type, ', '.join(list(validlabels)))))
self.device = parted.getDevice(self.devpath)
self.disk = parted.freshDisk(self.device, self.table_type)
self.is_lowformatted = False
self.is_hiformatted = False
self.is_partitioned = False
self.partitions = []
return(None)
def diskFormat(self):
if self.is_lowformatted:
return(None)
# This is a safeguard. We do *not* want to low-format a disk that is mounted.
aif.utils.checkMounted(self.devpath)
self.disk.deleteAllPartitions()
self.disk.commit()
self.is_lowformatted = True
self.is_partitioned = False
return(None)
def getPartitions(self):
# For GPT, this *technically* should be 34 -- or, more precisely, 2048 (see FAQ in manual), but the alignment
# optimizer fixes it for us automatically.
# But for DOS tables, it's required.
if self.table_type == 'msdos':
start_sector = 2048
else:
start_sector = 0
self.partitions = []
xml_partitions = self.xml.findall('part')
for idx, part in enumerate(xml_partitions):
partnum = idx + 1
if self.table_type == 'gpt':
p = Partition(part, self.disk, start_sector, partnum, self.table_type)
else:
parttype = 'primary'
if len(xml_partitions) > 4:
if partnum == 4:
parttype = 'extended'
elif partnum > 4:
parttype = 'logical'
p = Partition(part, self.disk, start_sector, partnum, self.table_type, part_type = parttype)
start_sector = p.end + 1
self.partitions.append(p)
return(None)
def partFormat(self):
if self.is_partitioned:
return(None)
if not self.is_lowformatted:
self.diskFormat()
# This is a safeguard. We do *not* want to partition a disk that is mounted.
aif.utils.checkMounted(self.devpath)
if not self.partitions:
self.getPartitions()
if not self.partitions:
return(None)
for p in self.partitions:
self.disk.addPartition(partition = p, constraint = self.device.optimalAlignedConstraint)
self.disk.commit()
p.devpath = p.partition.path
p.is_hiformatted = True
self.is_partitioned = True
return(None)

View File

@@ -1,7 +1,9 @@
import logging
import os
import subprocess
##
import psutil
from lxml import etree
##
import aif.disk.block as block
import aif.disk.luks as luks
@@ -10,7 +12,9 @@ import aif.disk.mdadm as mdadm
import aif.utils
from . import _common
_BlockDev = _common.BlockDev
_logger = logging.getLogger(__name__)
FS_FSTYPES = aif.utils.kernelFilesystems()
@@ -21,41 +25,54 @@ class FS(object):
# http://storaged.org/doc/udisks2-api/latest/gdbus-org.freedesktop.UDisks2.Filesystem.html#gdbus-interface-org-freedesktop-UDisks2-Filesystem.top_of_page
# http://storaged.org/doc/udisks2-api/latest/ ?
self.xml = fs_xml
_logger.debug('fs_xml: {0}'.format(etree.tostring(self.xml, with_tail = False).decode('utf-8')))
self.id = self.xml.attrib['id']
if not isinstance(sourceobj, (block.Disk,
block.Partition,
luks.LUKS,
lvm.LV,
mdadm.Array)):
raise ValueError(('sourceobj must be of type '
'aif.disk.block.Partition, '
'aif.disk.luks.LUKS, '
'aif.disk.lvm.LV, or'
'aif.disk.mdadm.Array'))
_logger.error(('sourceobj must be of type '
'aif.disk.block.Partition, '
'aif.disk.luks.LUKS, '
'aif.disk.lvm.LV, or'
'aif.disk.mdadm.Array.'))
raise ValueError('Invalid sourceobj type')
self.source = sourceobj
self.devpath = sourceobj.devpath
self.formatted = False
self.fstype = self.xml.attrib.get('type')
if self.fstype not in FS_FSTYPES:
raise ValueError('{0} is not a supported filesystem type on this system'.format(self.fstype))
_logger.error('{0} is not a supported filesystem type on this system.'.format(self.fstype))
raise ValueError('Invalid filesystem type')
def format(self):
if self.formatted:
return ()
return(None)
# This is a safeguard. We do *not* want to high-format a disk that is mounted.
aif.utils.checkMounted(self.devpath)
# TODO: Can I format with DBus/gobject-introspection? I feel like I *should* be able to, but BlockDev's fs
# plugin is *way* too limited in terms of filesystems and UDisks doesn't let you format that high-level.
# TODO! Logging
cmd = ['mkfs',
'-t', self.fstype]
_logger.info('Formatting {0}.'.format(self.devpath))
cmd_str = ['mkfs',
'-t', self.fstype]
for o in self.xml.findall('opt'):
cmd.append(o.attrib['name'])
cmd_str.append(o.attrib['name'])
if o.text:
cmd.append(o.text)
cmd.append(self.devpath)
subprocess.run(cmd)
self.formatted = True
cmd_str.append(o.text)
cmd_str.append(self.devpath)
cmd = subprocess.run(cmd_str, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
_logger.info('Executed: {0}'.format(' '.join(cmd.args)))
if cmd.returncode != 0:
_logger.warning('Command returned non-zero status')
_logger.debug('Exit status: {0}'.format(str(cmd.returncode)))
for a in ('stdout', 'stderr'):
x = getattr(cmd, a)
if x:
_logger.debug('{0}: {1}'.format(a.upper(), x.decode('utf-8').strip()))
raise RuntimeError('Failed to format successfully')
else:
self.formatted = True
return(None)
@@ -63,8 +80,10 @@ class Mount(object):
# http://storaged.org/doc/udisks2-api/latest/gdbus-org.freedesktop.UDisks2.Filesystem.html#gdbus-method-org-freedesktop-UDisks2-Filesystem.Mount
def __init__(self, mount_xml, fsobj):
self.xml = mount_xml
_logger.debug('mount_xml: {0}'.format(etree.tostring(self.xml, with_tail = False).decode('utf-8')))
if not isinstance(fsobj, FS):
raise ValueError('partobj must be of type aif.disk.filesystem.FS')
_logger.error('partobj must be of type aif.disk.filesystem.FS.')
raise ValueError('Invalid type for fsobj')
_common.addBDPlugin('fs') # We *could* use the UDisks dbus to mount too, but best to stay within libblockdev.
self.id = self.xml.attrib['id']
self.fs = fsobj
@@ -82,11 +101,13 @@ class Mount(object):
opts.append('{0}={1}'.format(k, v))
else:
opts.append(k)
_logger.debug('Rendered mount opts: {0}'.format(opts))
return(opts)
def mount(self):
if self.mounted:
return(None)
_logger.info('Mounting {0} at {1} as {2}.'.format(self.source, self.target, self.fs.fstype))
os.makedirs(self.target, exist_ok = True)
opts = self._parseOpts()
_BlockDev.fs.mount(self.source,
@@ -94,12 +115,14 @@ class Mount(object):
self.fs.fstype,
(','.join(opts) if opts else None))
self.mounted = True
_logger.debug('{0} mounted.'.format(self.source))
return(None)
def unmount(self, lazy = False, force = False):
self.updateMount()
if not self.mounted and not force:
return(None)
_logger.info('Unmounting {0}.'.format(self.target))
_BlockDev.fs.unmount(self.target,
lazy,
force)
@@ -107,6 +130,7 @@ class Mount(object):
return(None)
def updateMount(self):
_logger.debug('Fetching mount status for {0}'.format(self.source))
if self.source in [p.device for p in psutil.disk_partitions(all = True)]:
self.mounted = True
else:

View File

@@ -1,7 +1,9 @@
import logging
import os
import subprocess
##
import psutil
from lxml import etree
##
import aif.disk.block_fallback as block
import aif.disk.luks_fallback as luks
@@ -10,52 +12,72 @@ import aif.disk.mdadm_fallback as mdadm
import aif.utils
_logger = logging.getLogger(__name__)
FS_FSTYPES = aif.utils.kernelFilesystems()
class FS(object):
def __init__(self, fs_xml, sourceobj):
self.xml = fs_xml
_logger.debug('fs_xml: {0}'.format(etree.tostring(self.xml, with_tail = False).decode('utf-8')))
if not isinstance(sourceobj, (block.Disk,
block.Partition,
luks.LUKS,
lvm.LV,
mdadm.Array)):
raise ValueError(('sourceobj must be of type '
'aif.disk.block.Partition, '
'aif.disk.luks.LUKS, '
'aif.disk.lvm.LV, or'
'aif.disk.mdadm.Array'))
_logger.error(('sourceobj must be of type '
'aif.disk.block.Partition, '
'aif.disk.luks.LUKS, '
'aif.disk.lvm.LV, or'
'aif.disk.mdadm.Array.'))
raise ValueError('Invalid sourceobj type')
self.id = self.xml.attrib['id']
self.source = sourceobj
self.devpath = sourceobj.devpath
self.formatted = False
self.fstype = self.xml.attrib.get('type')
if self.fstype not in FS_FSTYPES:
_logger.error('{0} is not a supported filesystem type on this system.'.format(self.fstype))
raise ValueError('Invalid filesystem type')
def format(self):
if self.formatted:
return ()
return(None)
# This is a safeguard. We do *not* want to high-format a disk that is mounted.
aif.utils.checkMounted(self.devpath)
# TODO! Logging
cmd = ['mkfs',
'-t', self.fstype]
_logger.info('Formatting {0}.'.format(self.devpath))
cmd_str = ['mkfs',
'-t', self.fstype]
for o in self.xml.findall('opt'):
cmd.append(o.attrib['name'])
cmd_str.append(o.attrib['name'])
if o.text:
cmd.append(o.text)
cmd.append(self.devpath)
subprocess.run(cmd)
self.formatted = True
cmd_str.append(o.text)
cmd_str.append(self.devpath)
cmd = subprocess.run(cmd_str, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
_logger.info('Executed: {0}'.format(' '.join(cmd.args)))
if cmd.returncode != 0:
_logger.warning('Command returned non-zero status')
_logger.debug('Exit status: {0}'.format(str(cmd.returncode)))
for a in ('stdout', 'stderr'):
x = getattr(cmd, a)
if x:
_logger.debug('{0}: {1}'.format(a.upper(), x.decode('utf-8').strip()))
raise RuntimeError('Failed to format successfully')
else:
self.formatted = True
return(None)
class Mount(object):
def __init__(self, mount_xml, fsobj):
self.xml = mount_xml
_logger.debug('mount_xml: {0}'.format(etree.tostring(self.xml, with_tail = False).decode('utf-8')))
self.id = self.xml.attrib['id']
if not isinstance(fsobj, FS):
raise ValueError('partobj must be of type aif.disk.filesystem.FS')
_logger.error('partobj must be of type aif.disk.filesystem.FS.')
raise ValueError('Invalid type for fsobj')
self.id = self.xml.attrib['id']
self.fs = fsobj
self.source = self.fs.devpath
@@ -72,39 +94,63 @@ class Mount(object):
opts.append('{0}={1}'.format(k, v))
else:
opts.append(k)
_logger.debug('Rendered mount opts: {0}'.format(opts))
return(opts)
def mount(self):
if self.mounted:
return(None)
_logger.info('Mounting {0} at {1} as {2}.'.format(self.source, self.target, self.fs.fstype))
os.makedirs(self.target, exist_ok = True)
opts = self._parseOpts()
# TODO: logging
cmd = ['/usr/bin/mount',
'--types', self.fs.fstype]
cmd_str = ['/usr/bin/mount',
'--types', self.fs.fstype]
if opts:
cmd.extend(['--options', ','.join(opts)])
cmd.extend([self.source, self.target])
subprocess.run(cmd)
self.mounted = True
cmd_str.extend(['--options', ','.join(opts)])
cmd_str.extend([self.source, self.target])
cmd = subprocess.run(cmd_str, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
_logger.info('Executed: {0}'.format(' '.join(cmd.args)))
if cmd.returncode != 0:
_logger.warning('Command returned non-zero status')
_logger.debug('Exit status: {0}'.format(str(cmd.returncode)))
for a in ('stdout', 'stderr'):
x = getattr(cmd, a)
if x:
_logger.debug('{0}: {1}'.format(a.upper(), x.decode('utf-8').strip()))
raise RuntimeError('Failed to mount successfully')
else:
self.mounted = True
_logger.debug('{0} mounted.'.format(self.source))
return(None)
def unmount(self, lazy = False, force = False):
self.updateMount()
if not self.mounted and not force:
return(None)
# TODO: logging
cmd = ['/usr/bin/umount']
_logger.info('Unmounting {0}.'.format(self.target))
cmd_str = ['/usr/bin/umount']
if lazy:
cmd.append('--lazy')
cmd_str.append('--lazy')
if force:
cmd.append('--force')
cmd.append(self.target)
subprocess.run(cmd)
self.mounted = False
cmd_str.append('--force')
cmd_str.append(self.target)
cmd = subprocess.run(cmd_str, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
_logger.info('Executed: {0}'.format(' '.join(cmd.args)))
if cmd.returncode != 0:
_logger.warning('Command returned non-zero status')
_logger.debug('Exit status: {0}'.format(str(cmd.returncode)))
for a in ('stdout', 'stderr'):
x = getattr(cmd, a)
if x:
_logger.debug('{0}: {1}'.format(a.upper(), x.decode('utf-8').strip()))
raise RuntimeError('Failed to unmount successfully')
else:
self.mounted = False
_logger.debug('{0} unmounted.'.format(self.source))
return(None)
def updateMount(self):
_logger.debug('Fetching mount status for {0}'.format(self.source))
if self.source in [p.device for p in psutil.disk_partitions(all = True)]:
self.mounted = True
else:

View File

@@ -1,13 +1,19 @@
import logging
import os
import secrets
import uuid
##
from lxml import etree
##
from . import _common
import aif.disk.block as block
import aif.disk.lvm as lvm
import aif.disk.mdadm as mdadm
_logger = logging.getLogger(__name__)
_BlockDev = _common.BlockDev
@@ -17,6 +23,7 @@ class LuksSecret(object):
self.passphrase = None
self.size = 4096
self.path = None
_logger.info('Instantiated {0}.'.format(type(self).__name__))
class LuksSecretPassphrase(LuksSecret):
@@ -29,7 +36,8 @@ class LuksSecretFile(LuksSecret):
# TODO: might do a little tweaking in a later release to support *reading from* bytes.
def __init__(self, path, passphrase = None, bytesize = 4096):
super().__init__()
self.path = os.path.realpath(path)
self.path = os.path.abspath(os.path.expanduser(path))
_logger.debug('Path canonized: {0} => {1}'.format(path, self.path))
self.passphrase = passphrase
self.size = bytesize # only used if passphrase == None
self._genSecret()
@@ -40,12 +48,14 @@ class LuksSecretFile(LuksSecret):
self.passphrase = secrets.token_bytes(self.size)
if not isinstance(self.passphrase, bytes):
self.passphrase = self.passphrase.encode('utf-8')
_logger.debug('Secret generated.')
return(None)
class LUKS(object):
def __init__(self, luks_xml, partobj):
self.xml = luks_xml
_logger.debug('luks_xml: {0}'.format(etree.tostring(self.xml, with_tail = False).decode('utf-8')))
self.id = self.xml.attrib['id']
self.name = self.xml.attrib['name']
self.device = partobj
@@ -57,53 +67,62 @@ class LUKS(object):
block.Partition,
lvm.LV,
mdadm.Array)):
raise ValueError(('partobj must be of type '
'aif.disk.block.Disk, '
'aif.disk.block.Partition, '
'aif.disk.lvm.LV, or'
'aif.disk.mdadm.Array'))
_logger.error(('partobj must be of type '
'aif.disk.block.Disk, '
'aif.disk.block.Partition, '
'aif.disk.lvm.LV, or'
'aif.disk.mdadm.Array.'))
raise ValueError('Invalid partobj type')
_common.addBDPlugin('crypto')
self.devpath = '/dev/mapper/{0}'.format(self.name)
self.info = None
def addSecret(self, secretobj):
if not isinstance(secretobj, LuksSecret):
raise ValueError('secretobj must be of type aif.disk.luks.LuksSecret '
'(aif.disk.luks.LuksSecretPassphrase or '
'aif.disk.luks.LuksSecretFile)')
_logger.error('secretobj must be of type '
'aif.disk.luks.LuksSecret '
'(aif.disk.luks.LuksSecretPassphrase or '
'aif.disk.luks.LuksSecretFile).')
raise ValueError('Invalid secretobj type')
self.secrets.append(secretobj)
return(None)
def createSecret(self, secrets_xml = None):
_logger.info('Compiling secrets.')
if not secrets_xml: # Find all of them from self
for secret in self.xml.findall('secrets'):
_logger.debug('No secrets_xml specified; fetching from configuration block.')
for secret_xml in self.xml.findall('secrets'):
_logger.debug('secret_xml: {0}'.format(etree.tostring(secret_xml, with_tail = False).decode('utf-8')))
secretobj = None
secrettypes = set()
for s in secret.iterchildren():
for s in secret_xml.iterchildren():
_logger.debug('secret_xml child: {0}'.format(etree.tostring(s, with_tail = False).decode('utf-8')))
secrettypes.add(s.tag)
if all((('passphrase' in secrettypes),
('keyFile' in secrettypes))):
# This is safe, because a valid config only has at most one of both types.
kf = secret.find('keyFile')
kf = secret_xml.find('keyFile')
secretobj = LuksSecretFile(kf.text, # path
passphrase = secret.find('passphrase').text,
passphrase = secret_xml.find('passphrase').text,
bytesize = kf.attrib.get('size', 4096)) # TECHNICALLY should be a no-op.
elif 'passphrase' in secrettypes:
secretobj = LuksSecretPassphrase(secret.find('passphrase').text)
secretobj = LuksSecretPassphrase(secret_xml.find('passphrase').text)
elif 'keyFile' in secrettypes:
kf = secret.find('keyFile')
kf = secret_xml.find('keyFile')
secretobj = LuksSecretFile(kf.text,
passphrase = None,
bytesize = kf.attrib.get('size', 4096))
self.secrets.append(secretobj)
else:
_logger.debug('A secrets_xml was specified.')
secretobj = None
secrettypes = set()
for s in secrets_xml.iterchildren():
_logger.debug('secrets_xml child: {0}'.format(etree.tostring(s, with_tail = False).decode('utf-8')))
secrettypes.add(s.tag)
if all((('passphrase' in secrettypes),
('keyFile' in secrettypes))):
# This is safe, because a valid config only has at most one of both types.
# This is safe because a valid config only has at most one of both types.
kf = secrets_xml.find('keyFile')
secretobj = LuksSecretFile(kf.text, # path
passphrase = secrets_xml.find('passphrase').text,
@@ -116,13 +135,16 @@ class LUKS(object):
passphrase = None,
bytesize = kf.attrib.get('size', 4096))
self.secrets.append(secretobj)
_logger.debug('Secrets compiled.')
return(None)
def create(self):
if self.created:
return(None)
_logger.info('Creating LUKS volume on {0}'.format(self.source))
if not self.secrets:
raise RuntimeError('Cannot create a LUKS volume with no secrets added')
_logger.error('Cannot create a LUKS volume with no secrets added.')
raise RuntimeError('Cannot create a LUKS volume with no secrets')
for idx, secret in enumerate(self.secrets):
if idx == 0:
# TODO: add support for custom parameters for below?
@@ -138,20 +160,26 @@ class LUKS(object):
self.secrets[0].passphrase,
secret.passphrase)
self.created = True
_logger.debug('Created LUKS volume.')
return(None)
def lock(self):
_logger.info('Locking: {0}'.format(self.source))
if not self.created:
raise RuntimeError('Cannot lock a LUKS volume before it is created')
_logger.error('Cannot lock a LUKS volume that does not exist yet.')
raise RuntimeError('Cannot lock non-existent volume')
if self.locked:
return(None)
_BlockDev.crypto.luks_close(self.name)
self.locked = True
_logger.debug('Locked.')
return(None)
def unlock(self, passphrase = None):
_logger.info('Unlocking: {0}'.format(self.source))
if not self.created:
raise RuntimeError('Cannot unlock a LUKS volume before it is created')
_logger.error('Cannot unlock a LUKS volume that does not exist yet.')
raise RuntimeError('Cannot unlock non-existent volume')
if not self.locked:
return(None)
_BlockDev.crypto.luks_open_blob(self.source,
@@ -159,10 +187,13 @@ class LUKS(object):
self.secrets[0].passphrase,
False) # read-only
self.locked = False
_logger.debug('Unlocked.')
return(None)
def updateInfo(self):
_logger.info('Updating info.')
if self.locked:
_logger.error('Tried to fetch metadata about a locked volume. A volume must be unlocked first.')
raise RuntimeError('Must be unlocked to gather info')
info = {}
_info = _BlockDev.crypto.luks_info(self.devpath)
@@ -177,12 +208,16 @@ class LUKS(object):
info[k] = v
info['_cipher'] = '{cipher}-{mode}'.format(**info)
self.info = info
_logger.debug('Rendered updated info: {0}'.format(info))
return(None)
def writeConf(self, conf = '/etc/crypttab'):
def writeConf(self, chroot_base, init_hook = True):
_logger.info('Generating crypttab.')
if not self.secrets:
raise RuntimeError('secrets must be added before the configuration can be written')
conf = os.path.realpath(conf)
_logger.error('Secrets must be added before the configuration can be written.')
raise RuntimeError('Missing secrets')
conf = os.path.join(chroot_base, 'etc', 'crypttab')
initconf = '{0}.initramfs'.format(conf)
with open(conf, 'r') as fh:
conflines = fh.read().splitlines()
# Get UUID
@@ -204,4 +239,5 @@ class LUKS(object):
if luksinfo not in conflines:
with open(conf, 'a') as fh:
fh.write('{0}\n'.format(luksinfo))
_logger.debug('Generated crypttab line: {0}'.format(luksinfo))
return(None)

View File

@@ -1,3 +1,4 @@
import logging
import os
import re
import secrets
@@ -6,12 +7,16 @@ import tempfile
import uuid
##
import parse
from lxml import etree
##
import aif.disk.block_fallback as block
import aif.disk.lvm_fallback as lvm
import aif.disk.mdadm_fallback as mdadm
_logger = logging.getLogger(__name__)
class LuksSecret(object):
def __init__(self, *args, **kwargs):
self.passphrase = None

View File

@@ -58,4 +58,4 @@ class Net(object):
realdest = os.path.join(chroot_base, dest)
os.symlink(src, realdest)
iface.writeConf(chroot_base)
return ()
return(None)

View File

@@ -133,7 +133,7 @@ def kernelFilesystems():
# The kernel *probably* has autoloading enabled, but in case it doesn't...
if os.getuid() == 0:
cmd = subprocess.run(['modprobe', fs_name], stderr = subprocess.PIPE, stdout = subprocess.PIPE)
_logger.debug('Executed: {0}'.format(' '.join(cmd.args)))
_logger.info('Executed: {0}'.format(' '.join(cmd.args)))
if cmd.returncode != 0:
_logger.warning('Command returned non-zero status')
_logger.debug('Exit status: {0}'.format(str(cmd.returncode)))
@@ -154,7 +154,7 @@ def kernelFilesystems():
def xmlBool(xmlobj):
# https://bugs.launchpad.net/lxml/+bug/1850221
if isinstance(xmlobj, bool):
return (xmlobj)
return(xmlobj)
if xmlobj.lower() in ('1', 'true'):
return(True)
elif xmlobj.lower() in ('0', 'false'):

View File

@@ -23,7 +23,7 @@ class ChecksumFile(object):
def __init__(self, checksum_xml, filetype):
self.xml = checksum_xml
if self.xml is not None:
_logger.debug('checksum_xml: {0}'.format(etree.tostring(self.xml).decode('utf-8')))
_logger.debug('checksum_xml: {0}'.format(etree.tostring(self.xml, with_tail = False).decode('utf-8')))
else:
_logger.error('checksum_xml is required but not specified')
raise ValueError('checksum_xml is required')
@@ -72,7 +72,7 @@ class Downloader(object):
self.xml = netresource_xml
_logger.info('Instantiated class {0}'.format(type(self).__name__))
if netresource_xml is not None:
_logger.debug('netresource_xml: {0}'.format(etree.tostring(self.xml).decode('utf-8')))
_logger.debug('netresource_xml: {0}'.format(etree.tostring(self.xml, with_tail = False).decode('utf-8')))
else:
_logger.error('netresource_xml is required but not specified')
raise ValueError('netresource_xml is required')
@@ -110,12 +110,12 @@ class Downloader(object):
def verify(self, verify_xml, *args, **kwargs):
gpg_xml = verify_xml.find('gpg')
if gpg_xml is not None:
_logger.debug('gpg_xml: {0}'.format(etree.tostring(gpg_xml).decode('utf-8')))
_logger.debug('gpg_xml: {0}'.format(etree.tostring(gpg_xml, with_tail = False).decode('utf-8')))
else:
_logger.debug('No <gpg> in verify_xml')
hash_xml = verify_xml.find('hash')
if hash_xml is not None:
_logger.debug('Hash XML: {0}'.format(etree.tostring(hash_xml).decode('utf-8')))
_logger.debug('hash_xml: {0}'.format(etree.tostring(hash_xml, with_tail = False).decode('utf-8')))
else:
_logger.debug('No <hash> in verify_xml')
results = {}
@@ -135,15 +135,15 @@ class Downloader(object):
_logger.debug('GPG primary key: {0}'.format(self.gpg.primary_key.fpr))
keys_xml = gpg_xml.find('keys')
if keys_xml is not None:
_logger.debug('keys_xml: {0}'.format(etree.tostring(keys_xml).decode('utf-8')))
_logger.debug('keys_xml: {0}'.format(etree.tostring(keys_xml, with_tail = False).decode('utf-8')))
else:
_logger.error('No required <keys> in gpg_xml')
raise ValueError('<keys> is required in a GPG verification block')
sigs_xml = gpg_xml.find('sigs')
if sigs_xml is not None:
_logger.debug('Keys XML: {0}'.format(etree.tostring(keys_xml).decode('utf-8')))
_logger.debug('sigs_xml: {0}'.format(etree.tostring(sigs_xml, with_tail = False).decode('utf-8')))
else:
_logger.error('No required <keys> in gpg_xml')
_logger.error('No required <sigs> in gpg_xml')
raise ValueError('<sigs> is required in a GPG verification block')
fnargs = {'strict': keys_xml.attrib.get('detect')}
if fnargs['strict']: # We have to manually do this since it's in our parent's __init__
@@ -157,7 +157,7 @@ class Downloader(object):
if keys_xml is not None:
fnargs['keys'] = []
for key_id_xml in keys_xml.findall('keyID'):
_logger.debug('Found <keyID>: {0}'.format(etree.tostring(key_id_xml).decode('utf-8')))
_logger.debug('key_id_xml: {0}'.format(etree.tostring(key_id_xml, with_tail = False).decode('utf-8')))
if key_id_xml.text == 'auto':
_logger.debug('Key ID was set to "auto"; using {0}'.format(aif.constants_fallback.ARCH_RELENG_KEY))
self.gpg.findKeyByID(aif.constants_fallback.ARCH_RELENG_KEY, source = 'remote',
@@ -174,7 +174,8 @@ class Downloader(object):
raise RuntimeError('Could not find key ID specified')
fnargs['keys'].append(k)
for key_file_xml in keys_xml.findall('keyFile'):
_logger.debug('Found <keyFile>: {0}'.format(etree.tostring(key_file_xml).decode('utf-8')))
_logger.debug('key_file_xml: {0}'.format(etree.tostring(key_file_xml,
with_tail = False).decode('utf-8')))
downloader = getDLHandler(key_file_xml.text.strip()) # Recursive objects for the win?
dl = downloader(key_file_xml)
dl.get()
@@ -218,7 +219,7 @@ class Downloader(object):
self.data.seek(0, 0)
if checksum_file_xml is not None:
for cksum_xml in checksum_file_xml:
_logger.debug('Found <checksumFile>: {0}'.format(etree.tostring(cksum_xml).decode('utf-8')))
_logger.debug('cksum_xml: {0}'.format(etree.tostring(cksum_xml, with_tail = False).decode('utf-8')))
htype = cksum_xml.attrib['hashType'].strip().lower()
ftype = cksum_xml.attrib['fileType'].strip().lower()
fname = cksum_xml.attrib.get('filePath',
@@ -237,7 +238,7 @@ class Downloader(object):
results.append(result)
if checksum_xml is not None:
for cksum_xml in checksum_xml:
_logger.debug('Found <checksum>: {0}'.format(etree.tostring(cksum_xml).decode('utf-8')))
_logger.debug('cksum_xml: {0}'.format(etree.tostring(cksum_xml, with_tail = False).decode('utf-8')))
# Thankfully, this is a LOT easier.
htype = cksum_xml.attrib['hashType'].strip().lower()
result = (cksum_xml.text.strip().lower() == checksums[htype])