whoo doggie. should check this in.

This commit is contained in:
brent s
2019-09-23 06:45:18 -04:00
parent 0695b86add
commit e9b7b52bb0
7 changed files with 229 additions and 36 deletions

View File

@@ -1,3 +1,7 @@
import os
import grp
import pwd
def xmlBool(xmlobj):
if isinstance(xmlobj, bool):
return (xmlobj)
@@ -6,4 +10,26 @@ def xmlBool(xmlobj):
elif xmlobj.lower() in ('0', 'false'):
return(False)
else:
return(None)
return(None)
def getSudoGroup():
is_sudo = False
if os.environ.get('SUDO_GID'):
gid = int(os.environ['SUDO_GID'])
is_sudo = True
else:
gid = os.getegid()
group = grp.getgrgid(gid)
return((group, gid, is_sudo))
def getSudoUser():
is_sudo = False
if os.environ.get('SUDO_UID'):
uid = int(os.environ['SUDO_UID'])
is_sudo = True
else:
uid = os.geteuid()
user = pwd.getpwuid(os.geteuid())
return((user, uid, is_sudo))

View File

@@ -1,39 +1,174 @@
import os
import grp
import pathlib
import pwd
import re
import shutil
import subprocess
##
import paramiko
##
import arb_util
class Mirror(object):
def __init__(self, mirror_xml, ns = '', *args, **kwargs):
self.xml = mirror_xml
self.ns = ns
if os.environ.get('SUDO_USER'):
_uname = os.environ['SUDO_USER']
else:
_uname = pwd.getpwuid(os.geteuid()).pw_name
self.user = pwd.getpwnam(mirror_xml.attrib.get('user', _uname))
self.fmode = int(self.xml.attrib.get('fileMode', '0600'), 8)
self.dmode = int(self.xml.attrib.get('dirMode', '0700'), 8)
user, uid, self.is_sudo = arb_util.getSudoUser()
self.user = pwd.getpwnam(mirror_xml.attrib.get('user', user.pw_name))
try:
self.fmode = int(mirror_xml.attrib.get('fileMode'), 8)
except TypeError:
self.fmode = None
try:
self.dmode = int(mirror_xml.attrib.get('dirMode'), 8)
except TypeError:
self.dmode = None
self.dest = self.xml.text
def sync(self):
# no-op; this is handled in the subclasses since it's unique to them.
pass
return(True)
class LocalMirror(Mirror):
def __init__(self, mirror_xml, ns = '', *args, **kwargs):
super().__init__(mirror_xml, ns = ns, *args, **kwargs)
if os.environ.get('SUDO_GID'):
_grpnm = os.environ['SUDO_GID']
else:
_grpnm = grp.getgrgid(os.getegid()).gr_name
self.group = grp.getgrnam(mirror_xml.attrib.get('group', _grpnm))
if self.user.pw_uid == arb_util.getSudoUser()[1]:
self.user = None
group, gid, is_sudo = arb_util.getSudoGroup()
self.group = grp.getgrnam(mirror_xml.attrib.get('group', group.gr_name))
if self.group.gr_gid == gid:
self.group = None
self.dest = os.path.abspath(os.path.expanduser(self.dest))
def sync(self, source):
source = os.path.abspath(os.path.expanduser(source))
for root, dirs, files in os.walk(source):
for d in dirs:
dpath = os.path.join(root, d)
reldpath = pathlib.PurePosixPath(dpath).relative_to(source)
destdpath = os.path.join(self.dest, reldpath)
if os.path.exists(destdpath):
shutil.rmtree(destdpath)
shutil.copytree(dpath, destdpath, symlinks = True, ignore_dangling_symlinks = True)
for f in files:
fpath = os.path.join(root, f)
relfpath = pathlib.PurePosixPath(fpath).relative_to(source)
destfpath = os.path.join(self.dest, relfpath)
shutil.copy2(fpath, destfpath)
break # We only need one iteration since copytree is recursive
# Now we set the user/group ownership and the file/dir modes.
# This first any() check is DEFINITELY a speed optimization if those perms aren't modified.
if any((self.user, self.group, self.fmode, self.dmode)):
if self.user:
os.chown(self.dest, self.user.pw_uid, -1, follow_symlinks = False)
if self.group:
os.chown(self.dest, -1, self.group.gr_gid, follow_symlinks = False)
if self.dmode:
try:
os.chmod(self.dest, self.dmode, follow_symlinks = False)
except NotImplementedError:
os.chmod(self.dest, self.dmode)
for root, dirs, files in os.walk(self.dest):
for d in dirs:
dpath = os.path.join(root, d)
if self.user:
os.chown(dpath, self.user.pw_uid, -1, follow_symlinks = False)
if self.group:
os.chown(dpath, -1, self.group.gr_gid, follow_symlinks = False)
if self.dmode:
try:
os.chmod(dpath, self.dmode, follow_symlinks = False)
except NotImplementedError:
os.chmod(dpath, self.dmode)
for f in files:
fpath = os.path.join(root, f)
if self.user:
os.chown(fpath, self.user.pw_uid, -1, follow_symlinks = False)
if self.group:
os.chown(fpath, -1, self.group.gr_gid, follow_symlinks = False)
if self.fmode:
try:
os.chmod(fpath, self.fmode, follow_symlinks = False)
except NotImplementedError:
os.chmod(fpath, self.fmode)
return(True)
class RemoteMirror(Mirror):
def __init__(self, mirror_xml, ns = '', *args, **kwargs):
super().__init__(mirror_xml, ns = ns, *args, **kwargs)
self.server = mirror_xml.attrib['server']
self.hardened = arb_util.xmlBool(mirror_xml.attrib.get('hardened', False))
self.port = int(mirror_xml.attrib.get('port', 22))
self.keyfile = os.path.abspath(os.path.expanduser(mirror_xml.attrib.get('key', '~/.ssh/id_rsa')))
self.remote_user = mirror_xml.attrib.get('remoteUser')
self.remote_group = mirror_xml.attrib.get('remoteGroup')
self.remote_group = mirror_xml.attrib.get('remoteGroup')
self.ssh = None
self.transport = None
def _initSSH(self):
has_ssh = False
if self.ssh and self.transport.is_active() and self.transport.is_alive():
has_ssh = True
if not has_ssh:
userhostkeys = os.path.abspath(os.path.expanduser('~/.ssh/known_hosts'))
self.ssh = paramiko.SSHClient()
self.ssh.load_system_host_keys()
if os.path.isfile(userhostkeys):
self.ssh.load_system_host_keys(userhostkeys)
self.ssh.set_missing_host_key_policy((paramiko.RejectPolicy
if self.hardened else
paramiko.AutoAddPolicy))
self.ssh.connect(hostname = self.server,
port = self.port,
username = self.user.pw_name,
key_filename = self.keyfile)
self.transport = self.ssh.get_transport()
return()
def _closeSSH(self):
if self.transport:
self.transport.close()
if self.ssh:
self.ssh.close()
return()
def sync(self, source):
source = os.path.abspath(os.path.expanduser(source))
cmd = ['rsync',
'--archive',
# '--delete', # TODO: yes? no? configurable?
'--rsh="ssh -p {0} -l {1}"'.format(self.port, self.user.pw_name),
source,
'{0}@{1}:{2}'.format(self.user.pw_name, self.server, self.dest)]
# TODO: log output?
rsync_out = subprocess.run(cmd, stderr = subprocess.PIPE, stdout = subprocess.PIPE)
# This first if is technically unnecessary, but it can offer a *slight* speed benefit. VERY slight.
# As in so negligible, only Jthan would care about it.
if any((self.remote_user, self.remote_group, self.fmode, self.dmode)):
if self.remote_user:
self._initSSH()
stdin, stdout, stderr = self.ssh.exec_command('chown -R {0} {1}'.format(self.remote_user,
self.dest))
if self.remote_group:
self._initSSH()
stdin, stdout, stderr = self.ssh.exec_command('chgrp -R {0} {1}'.format(self.remote_group,
self.dest))
if self.fmode:
self._initSSH()
stdin, stdout, stderr = self.ssh.exec_command(
('find {0} -type f -print0 | '
'xargs --null --no-run-if-empty chmod {1}').format(self.dest,
re.sub('^0o', '', oct(self.fmode))))
if self.dmode:
self._initSSH()
stdin, stdout, stderr = self.ssh.exec_command(
('find {0} -type d -print0 | '
'xargs --null --no-run-if-empty chmod {1}').format(self.dest,
re.sub('^0o', '', oct(self.dmode))))
self._closeSSH()
return(True)

View File

@@ -12,6 +12,7 @@ import requests
##
import arb_util
# TODO: implement alwaysBuild check!!!
# TODO: should this be a configuration option?
aurbase = 'https://aur.archlinux.org'

View File

@@ -1,5 +1,6 @@
import os
import re
import subprocess
##
import gpg
##
@@ -17,16 +18,32 @@ class Repo(object):
self.key = None
self.mirrors = []
self.packages = []
self.packagefiles = []
self.sigfiles = []
_key_id = self.xml.attrib.get('gpgKeyID')
self.key_id = (re.sub(r'\s+', '', _key_id) if _key_id else None)
self.staging_dir = os.path.abspath(os.path.expanduser(self.xml.attrib.get('staging',
'.')))
self.sign_pkgs = arb_util.xmlBool(self.xml.attrib.get('signPkgs', True))
self.sign_db = arb_util.xmlBool(self.xml.attrib.get('signDB', True))
self._initSigner()
if any((self.sign_db, self.sign_pkgs)):
self._initSigner()
self._initMirrors()
self._initPackages()
def _genRepo(self):
if not self.packagefiles:
# raise RuntimeError('.build() must be run before ._genRepo()')
return(None)
cmd = ['repo-add']
if self.sign_db:
cmd.extend(['--sign', '--key', self.key_id])
cmd.extend(['--remove',
os.path.join(self.staging_dir, '{0}.db.tar.xz'.format(self.name)),
*self.packagefiles])
repo_out = subprocess.run(cmd, stderr = subprocess.PIPE, stdout = subprocess.PIPE)
return(True)
def _initMirrors(self):
for m in self.xml.findall('{0}mirrors/{0}mirror.RemoteMirror'.format(self.ns)):
self.mirrors.append(mirror.RemoteMirror(m, ns = self.ns))
@@ -61,6 +78,7 @@ class Repo(object):
if squashed_key in keyforms:
if k.can_sign:
self.key = k
self.key_id = k.fpr
break
else:
for s in k.subkeys:
@@ -68,6 +86,7 @@ class Repo(object):
if squashed_key in subkeyforms:
if s.can_sign:
self.key = s
self.key_id = s.fpr
break
else:
if k.can_sign:
@@ -77,3 +96,21 @@ class Repo(object):
raise ValueError('Cannot find a suitable signing GPG key')
self.gpg.signers = [self.key]
return()
def build(self):
for p in self.packages:
self.packagefiles.extend(p.build(self.staging_dir))
if self.sign_pkgs:
for f in self.packagefiles:
sigfile = '{0}.sig'.format(f)
with open(f, 'rb') as pkg:
with open(sigfile, 'wb') as sig:
sig.write(self.gpg.sign(pkg.read(), mode = gpg.constants.SIG_MODE_DETACH)[0])
self.sigfiles.append(sigfile)
self._genRepo()
return()
def sync(self):
for m in self.mirrors:
m.sync(self.staging_dir)
return()