Compare commits
177 Commits
v2.60
...
4.x_rewrit
| Author | SHA1 | Date | |
|---|---|---|---|
|
8278c55cab
|
|||
|
9d018be8aa
|
|||
|
75580b43cc
|
|||
|
716120e14b
|
|||
| a376bea0e9 | |||
| 69b6ec60d0 | |||
| 0dd54a604d | |||
| c48c752f84 | |||
| 82c21f170a | |||
| f677839194 | |||
| 4ed9ef5f5f | |||
| 5017d76064 | |||
| 0682137b21 | |||
| 7c0c7bf5c0 | |||
| ee653e81f6 | |||
| 9f74e97c45 | |||
| b134ee67bd | |||
| e818b04f16 | |||
| 4de9d1a26c | |||
| 1d9b40a597 | |||
| f4f131890d | |||
| b2498ba98d | |||
| 559789ffe5 | |||
| bf12fbcda3 | |||
| 1df5bd87e0 | |||
| ed7ccdeeaf | |||
| befcd8185e | |||
| 262eefba07 | |||
| 46a9df6ef6 | |||
| d9ee277ff4 | |||
| 721c571da6 | |||
| 96bca202f0 | |||
| 303e006b35 | |||
| b2622406f0 | |||
| 7819b5edc4 | |||
| a315468ff8 | |||
| f4c5c0fdf8 | |||
| 4dd03dea75 | |||
| 5182e8154b | |||
| c6a837d1fe | |||
| aaf03db8bd | |||
| 77c1aea510 | |||
| e72eee4dec | |||
| 2b233b9af9 | |||
| c2cfd3298f | |||
| 2db702107d | |||
| 614c1b3e6f | |||
| 80d5d127ca | |||
| c10ceaa225 | |||
| 167cd342fa | |||
| f1f37547dd | |||
| 034092ae49 | |||
| 76da0bb29a | |||
| c489837d40 | |||
| 705ad0732c | |||
| 7e5e38a68a | |||
| 84f062813e | |||
| 111e812146 | |||
| a54b5b110d | |||
| 4217b7323b | |||
| 02b4fbc454 | |||
| cc3f23efc7 | |||
| 7c3a4a61b6 | |||
| e87976d740 | |||
| bf3dc2bb4c | |||
| fae0a53034 | |||
| f89283a301 | |||
| 3bcc023c01 | |||
| 74412e4dea | |||
| 676265b2aa | |||
| 6f3a0f6b86 | |||
| ef8562fb0e | |||
| 01e1b979f8 | |||
| 481a3e4cf9 | |||
| f9c44ecc8e | |||
| c9ccb3aa17 | |||
| 6ff5a96d76 | |||
| e3236eb0d6 | |||
| 60791f1596 | |||
| f37221c833 | |||
| 9afa665a75 | |||
| eec74f16e6 | |||
| 6760b46c5d | |||
| 06c8924367 | |||
| ac7bfa7320 | |||
| 2545d0742a | |||
| f26e03fda9 | |||
| 47684f989b | |||
| da3c8fa64a | |||
| 6d6585a62c | |||
| a1fe1dbc0a | |||
| 59b106df67 | |||
| 8b7a8ebc8c | |||
| c9d1eb0902 | |||
| 32638dbdff | |||
| 17078f3d1d | |||
| 0c9dcfd833 | |||
| d0d8105db3 | |||
| 2094cf4f1f | |||
| 0af57624fb | |||
| 856bea09a2 | |||
| 764ba2f8d0 | |||
| e1e464d5c5 | |||
| d40672fdd9 | |||
| 960dc34ba8 | |||
| 9887ee005e | |||
| 2187d7ac55 | |||
| d6f76dbfdc | |||
| 2db5a8f0f6 | |||
| e3e2e0a2db | |||
| 7ff830bf3b | |||
| 090d6eb28f | |||
| 99561565aa | |||
| b96805f4fb | |||
| a670ff8eb7 | |||
| 77590ef0a0 | |||
| 39e037c08a | |||
| 78fecbded4 | |||
| fca944f30a | |||
| 13812309dc | |||
| 82b919958a | |||
| 23c6cc7101 | |||
| f437391818 | |||
| b4a5e40b8f | |||
| 3c46d85683 | |||
| 60ee17bf9d | |||
| ad539b4254 | |||
| f81f23cc6d | |||
| 59b8a87df2 | |||
| c0048b1003 | |||
| 21fe72818c | |||
| 528949e82a | |||
| 5ac510762c | |||
| df172d73eb | |||
| dbeee4789d | |||
| f12bb7799d | |||
| 0c19a797fd | |||
| 7c6bc3a8c3 | |||
| 3d83164371 | |||
| b3bb0391c8 | |||
| 6f3e812d35 | |||
| 4b4cbd0f63 | |||
| 7381cc3d39 | |||
| dcceafc979 | |||
| 736457a6e0 | |||
| 0102ca26c3 | |||
| 6f53d09b04 | |||
| b95bef3b17 | |||
| 36c7da470a | |||
| e94fe963b9 | |||
| b626fcc8be | |||
| 7b9be0b9d8 | |||
| 078bca4c01 | |||
| b0afd3059f | |||
| 22c1f73e12 | |||
| 770293e2d8 | |||
| c8637e9779 | |||
| e838bab81c | |||
| 7cdf0eef50 | |||
| f419a6e4f6 | |||
| 0c4fb77ad1 | |||
| 0b191764b7 | |||
| acd5c0665a | |||
| a86115b7a2 | |||
|
|
2b512fb50f | ||
| ef2790eb68 | |||
|
|
6d611da615 | ||
| 6d9f24ee28 | |||
| c418fca548 | |||
| 2d6384e15d | |||
| a08ae5dd06 | |||
| 91b8edcc26 | |||
| 3cbf32be30 | |||
| 4e2a2fecb1 | |||
| 001fdf99d3 | |||
| e95c4f3cff | |||
| a75cff05b7 |
57
.gitignore
vendored
57
.gitignore
vendored
@@ -1,36 +1,33 @@
|
||||
# We don't want local build settings
|
||||
/build.conf
|
||||
# We don't want local build settings in case someone's using
|
||||
# the git dir as a place to store their build.ini
|
||||
confs/*
|
||||
|
||||
# The chroots should be generated locally ONLY. The perms/ownership would get futzed up anyways if checked into git.
|
||||
/root.x86_64
|
||||
/root.i686
|
||||
|
||||
# We don't want the copied/stripped/compressed chroots
|
||||
/build64
|
||||
/build32
|
||||
|
||||
# We don't need these in git. They should be generated dynamically.
|
||||
/http
|
||||
/iso
|
||||
/temp
|
||||
/TMPBOOT
|
||||
/tftpboot
|
||||
/latest.64.tar.gz
|
||||
/latest.32.tar.gz
|
||||
/lockfile.lck
|
||||
/VERSION_INFO.txt
|
||||
/BUILDNO
|
||||
/screenlog*
|
||||
# We don't need these in git. They should be generated dynamically, or they're used in testing/development or local
|
||||
# to a workstation.
|
||||
.latest.*.tar
|
||||
/buildnum
|
||||
screenlog*
|
||||
/logs
|
||||
*.swp
|
||||
*.lck
|
||||
/extrasrc
|
||||
*~
|
||||
.~lock.*
|
||||
.idea/
|
||||
|
||||
# You should really generate local copies of these, as they're pretty private.
|
||||
extra/pre-build.d/etc/openvpn/client.conf
|
||||
overlay/etc/ssh/*
|
||||
overlay/home/bdisk
|
||||
overlay/etc/systemd/system/multi-user.target.wants/openvpn@client.service
|
||||
src/ipxe_local/ssl/keys
|
||||
src/ipxe_local/ssl/crts
|
||||
src/ipxe_local/ssl/txt
|
||||
**/etc/dropbear
|
||||
**/etc/openvpn
|
||||
**/etc/systemd/system/multi-user.target.wants/openvpn@*
|
||||
**/etc/ssh
|
||||
**/home
|
||||
**ssl/
|
||||
!**ssl/.keepme
|
||||
!**ssl/openssl.cnf
|
||||
|
||||
# and we DEFINITELY don't need these.
|
||||
__pycache__/
|
||||
*.pyc
|
||||
*test*.py
|
||||
*test*.sh
|
||||
*test*.exp
|
||||
*.bak
|
||||
5
.gitmodules
vendored
5
.gitmodules
vendored
@@ -1,5 +0,0 @@
|
||||
[submodule "ipxe"]
|
||||
branch = master
|
||||
[submodule "src/ipxe"]
|
||||
path = src/ipxe
|
||||
url = git://git.ipxe.org/ipxe.git
|
||||
@@ -671,4 +671,4 @@ into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
||||
43
TODO
Normal file
43
TODO
Normal file
@@ -0,0 +1,43 @@
|
||||
- write classes/functions
|
||||
- XML-based config
|
||||
-x XML syntax
|
||||
--- x regex btags - case-insensitive? this can be represented in-pattern:
|
||||
x https://stackoverflow.com/a/9655186/733214
|
||||
--- remove sources stuff - that should be in the guest definitions.
|
||||
-x configuration generator
|
||||
--- x print end result xml config to stderr for easier redirection? or print prompts to stderr and xml to stdout?
|
||||
-- x XSD for validation
|
||||
-- Flask app for generating config?
|
||||
-- TKinter (or pygame?) GUI?
|
||||
--- https://docs.python.org/3/faq/gui.html
|
||||
--- https://www.pygame.org/wiki/gui
|
||||
- ensure we use docstrings in a Sphinx-compatible manner?
|
||||
https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html
|
||||
at the very least document all the functions and such so pydoc's happy.
|
||||
|
||||
- locking
|
||||
|
||||
- for docs, 3.x (as of 3.10) was 2.4M.
|
||||
|
||||
- x Need ability to write/parse mtree specs (or a similar equivalent) for applying ownerships/permissions to overlay files
|
||||
-- parsing is done. writing may? come later.
|
||||
--- i think writing is mostly done/straightforward; still need to work on parsing mode octals for files
|
||||
|
||||
|
||||
- package for PyPI:
|
||||
# https://packaging.python.org/tutorials/distributing-packages/
|
||||
# https://docs.python.org/3/distutils/apiref.html
|
||||
# https://python-packaging.readthedocs.io/en/latest/minimal.html
|
||||
# https://setuptools.readthedocs.io/en/latest/setuptools.html#new-and-changed-setup-keywords
|
||||
|
||||
|
||||
BUGS.SQUARE-R00T.NET bugs/tasks:
|
||||
#7: Ensure conditional deps/imports for features only if used.
|
||||
Is this setup.py-compatible?
|
||||
nooope. just make everything a dep.
|
||||
#14: Use os.path.join() for more consistency/pythonicness
|
||||
#24: Run as regular user? (pychroot? fakeroot?)
|
||||
#34: Build-time support for only building single phase of build
|
||||
#39: Fix UEFI
|
||||
#40: ISO overlay (to add e.g. memtest86+ to final ISO)
|
||||
#43: Support resuming partial tarball downloads (Accept-Ranges: bytes)
|
||||
4
bdisk/BIOS.py
Normal file
4
bdisk/BIOS.py
Normal file
@@ -0,0 +1,4 @@
|
||||
import jinja2
|
||||
import os
|
||||
import shutil
|
||||
|
||||
2
bdisk/GIT.py
Normal file
2
bdisk/GIT.py
Normal file
@@ -0,0 +1,2 @@
|
||||
import git
|
||||
import os
|
||||
493
bdisk/GPG.py
Normal file
493
bdisk/GPG.py
Normal file
@@ -0,0 +1,493 @@
|
||||
import copy
|
||||
import datetime
|
||||
import gpg
|
||||
import operator
|
||||
import os
|
||||
import re
|
||||
import utils # LOCAL
|
||||
from functools import reduce
|
||||
from gpg import gpgme
|
||||
|
||||
# Reference material.
|
||||
# http://files.au.adversary.org/crypto/GPGMEpythonHOWTOen.html
|
||||
# https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gpgme.git;a=tree;f=lang/python/examples;hb=HEAD
|
||||
# https://www.gnupg.org/documentation/manuals/gpgme.pdf
|
||||
# Support ECC? https://www.gnupg.org/faq/whats-new-in-2.1.html#ecc
|
||||
# section 4.1, 4.2, 7.5.1, 7.5.5 in gpgme manual
|
||||
|
||||
# These are static values. We include them in the parent so we don't define them every time a function is called.
|
||||
# Key signature attributes.
|
||||
_keysig_attrs = ('comment', 'email', 'expired', 'expires', 'exportable', 'invalid', 'keyid', 'name', 'notations',
|
||||
'pubkey_algo', 'revoked', 'sig_class', 'status', 'timestamp', 'uid')
|
||||
# Data signature attributes.
|
||||
_sig_attrs = ('chain_model', 'exp_timestamp', 'fpr', 'hash_algo', 'is_de_vs', 'key', 'notations', 'pka_address',
|
||||
'pka_trust', 'pubkey_algo', 'status', 'summary', 'timestamp', 'validity', 'validity_reason',
|
||||
'wrong_key_usage')
|
||||
|
||||
# A regex that ignores signature verification validity errors we don't care about.
|
||||
_valid_ignore = re.compile(('^('
|
||||
#'CHECKSUM|'
|
||||
'ELEMENT_NOT_FOUND|'
|
||||
'MISSING_VALUE|'
|
||||
#'UNKNOWN_PACKET|'
|
||||
'UNSUPPORTED_CMS_OBJ|'
|
||||
'WRONG_SECKEY|'
|
||||
'('
|
||||
'DECRYPT|'
|
||||
'INV|'
|
||||
'NO|'
|
||||
'PIN|'
|
||||
'SOURCE'
|
||||
')_'
|
||||
')'))
|
||||
# A function to build a list based on the above.
|
||||
def _gen_valid_validities():
|
||||
# Strips out and minimizes the error output.
|
||||
v = {}
|
||||
for s in dir(gpg.constants.validity):
|
||||
if _valid_ignore.search(s):
|
||||
continue
|
||||
val = getattr(gpg.constants.validity, s)
|
||||
if not isinstance(val, int):
|
||||
continue
|
||||
v[s] = val
|
||||
return(v)
|
||||
_valid_validities = _gen_valid_validities()
|
||||
def _get_sigstatus(status):
|
||||
statuses = []
|
||||
for e in _valid_validities:
|
||||
if ((status & _valid_validities[e]) == _valid_validities[e]):
|
||||
statuses.append(e)
|
||||
return(statuses)
|
||||
def _get_sig_isgood(sigstat):
|
||||
is_good = True
|
||||
if not ((sigstat & gpg.constants.sigsum.GREEN) == gpg.constants.sigsum.GREEN):
|
||||
is_good = False
|
||||
if not ((sigstat & gpg.constants.sigsum.VALID) == gpg.constants.sigsum.VALID):
|
||||
is_good = False
|
||||
return(is_good)
|
||||
|
||||
|
||||
# This helps translate the input name from the conf to a string compatible with the gpg module.
|
||||
_algmaps = {#'cv': 'cv{keysize}', # DISABLED, can't sign (only encrypt). Currently only 25519
|
||||
'ed': 'ed{keysize}', # Currently only 25519
|
||||
#'elg': 'elg{}', # DISABLED, can't sign (only encrypt). 1024, 2048, 4096
|
||||
'nist': 'nistp{keysize}', # 256, 384, 521
|
||||
'brainpool.1': 'brainpoolP{keysize}r1', # 256, 384, 512
|
||||
'sec.k1': 'secp{keysize}k1', # Currently only 256
|
||||
'rsa': 'rsa{keysize}', # Variable (1024 <> 4096), but we only support 1024, 2048, 4096
|
||||
'dsa': 'dsa{keysize}'} # Variable (768 <> 3072), but we only support 768, 2048, 3072
|
||||
|
||||
# This is just a helper function to get a delta from a unix epoch.
|
||||
def _epoch_helper(epoch):
|
||||
d = datetime.datetime.utcfromtimestamp(epoch) - datetime.datetime.utcnow()
|
||||
return(abs(int(d.total_seconds()))) # Returns a positive integer even if negative...
|
||||
#return(int(d.total_seconds()))
|
||||
|
||||
# _KeyEditor and _getEditPrompt are used to interactively edit keys -- notably currently used for editing trusts
|
||||
# (since there's no way to edit trust otherwise).
|
||||
# https://www.gnupg.org/documentation/manuals/gpgme/Advanced-Key-Editing.html
|
||||
# https://www.apt-browse.org/browse/debian/wheezy/main/amd64/python-pyme/1:0.8.1-2/file/usr/share/doc/python-pyme/examples/t-edit.py
|
||||
# https://searchcode.com/codesearch/view/20535820/
|
||||
# https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS
|
||||
# You can get the prompt identifiers and status indicators without grokking the source
|
||||
# by first interactively performing the type of edit(s) you want to do with this command:
|
||||
# gpg --expert --status-fd 2 --command-fd 2 --edit-key <KEY_ID>
|
||||
# Per:
|
||||
# https://lists.gnupg.org/pipermail/gnupg-users/2002-April/012630.html
|
||||
# https://lists.gt.net/gnupg/users/9544
|
||||
# https://raymii.org/s/articles/GPG_noninteractive_batch_sign_trust_and_send_gnupg_keys.html
|
||||
class _KeyEditor(object):
|
||||
def __init__(self, optmap):
|
||||
self.replied_once = False # This is used to handle the first prompt vs. the last
|
||||
self.optmap = optmap
|
||||
|
||||
def editKey(self, status, args, out):
|
||||
result = None
|
||||
out.seek(0, 0)
|
||||
def mapDict(m, d):
|
||||
return(reduce(operator.getitem, m, d))
|
||||
if args == 'keyedit.prompt' and self.replied_once:
|
||||
result = 'quit'
|
||||
elif status == 'KEY_CONSIDERED':
|
||||
result = None
|
||||
self.replied_once = False
|
||||
elif status == 'GET_LINE':
|
||||
self.replied_once = True
|
||||
_ilist = args.split('.')
|
||||
result = mapDict(_ilist, self.optmap['prompts'])
|
||||
if not result:
|
||||
result = None
|
||||
return(result)
|
||||
|
||||
def _getEditPrompt(key, trust, cmd, uid = None):
|
||||
if not uid:
|
||||
uid = key.uids[0]
|
||||
# This mapping defines the default "answers" to the gpgme key editing.
|
||||
# https://www.apt-browse.org/browse/debian/wheezy/main/amd64/python-pyme/1:0.8.1-2/file/usr/share/doc/python-pyme/examples/t-edit.py
|
||||
# https://searchcode.com/codesearch/view/20535820/
|
||||
# https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS
|
||||
# You can get the prompt identifiers and status indicators without grokking the source
|
||||
# by first interactively performing the type of edit(s) you want to do with this command:
|
||||
# gpg --status-fd 2 --command-fd 2 --edit-key <KEY_ID>
|
||||
if trust >= gpg.constants.validity.FULL: # For tsigning, it only prompts for two trust levels:
|
||||
_loctrust = 2 # "I trust fully"
|
||||
else:
|
||||
_loctrust = 1 # "I trust marginally"
|
||||
# TODO: make the trust depth configurable. 1 is probably the safest, but we try to guess here.
|
||||
# "Full" trust is a pretty big thing.
|
||||
if trust >= gpg.constants.validity.FULL:
|
||||
_locdepth = 2 # Allow +1 level of trust extension
|
||||
else:
|
||||
_locdepth = 1 # Only trust this key
|
||||
# The check level.
|
||||
# (0) I will not answer. (default)
|
||||
# (1) I have not checked at all.
|
||||
# (2) I have done casual checking.
|
||||
# (3) I have done very careful checking.
|
||||
# Since we're running this entirely non-interactively, we really should use 1.
|
||||
_chk_lvl = 1
|
||||
_map = {
|
||||
# Valid commands
|
||||
'cmds': ['trust', 'fpr', 'sign', 'tsign', 'lsign', 'nrsign', 'grip', 'list',
|
||||
'uid', 'key', 'check', 'deluid', 'delkey', 'delsig', 'pref', 'showpref',
|
||||
'revsig', 'enable', 'disable', 'showphoto', 'clean', 'minimize', 'save',
|
||||
'quit'],
|
||||
# Prompts served by the interactive session, and a map of their responses.
|
||||
# It's expanded in the parent call, but the prompt is actually in the form of e.g.:
|
||||
# keyedit.save (we expand that to a list and use that list as a "path" in the below dict)
|
||||
# We *could* just use a flat dict of full prompt to constants, but this is a better visual segregation &
|
||||
# prevents unnecessary duplication.
|
||||
'prompts': {
|
||||
'edit_ownertrust': {'value': str(trust), # Pulled at time of call
|
||||
'set_ultimate': {'okay': 'yes'}}, # If confirming ultimate trust, we auto-answer yes
|
||||
'untrusted_key': {'override': 'yes'}, # We don't care if it's untrusted
|
||||
'pklist': {'user_id': {'enter': uid.uid}}, # Prompt for a user ID - can we use the full uid string? (tsign)
|
||||
'sign_uid': {'class': str(_chk_lvl), # The certification/"check" level
|
||||
'okay': 'yes'}, # Are you sure that you want to sign this key with your key..."
|
||||
'trustsig_prompt': {'trust_value': str(_loctrust), # This requires some processing; see above
|
||||
'trust_depth': str(_locdepth), # The "depth" of the trust signature.
|
||||
'trust_regexp': None}, # We can "Restrict" trust to certain domains if we wanted.
|
||||
'keyedit': {'prompt': cmd, # Initiate trust editing (or whatever)
|
||||
'save': {'okay': 'yes'}}}} # Save if prompted
|
||||
return(_map)
|
||||
|
||||
|
||||
|
||||
class GPGHandler(object):
|
||||
def __init__(self, gnupg_homedir = None, key_id = None, keyservers = None):
|
||||
self.home = gnupg_homedir
|
||||
self.key_id = key_id
|
||||
self.keyservers = keyservers
|
||||
if self.home:
|
||||
self._prep_home()
|
||||
else:
|
||||
self._check_home()
|
||||
self.ctx = self.GetContext(home_dir = self.home)
|
||||
self._orig_kl_mode = self.ctx.get_keylist_mode()
|
||||
self.mykey = None
|
||||
self.subkey = None
|
||||
if self.key_id:
|
||||
self.mykey = self.ctx.get_key(self.key_id, secret = True)
|
||||
for s in self.mykey.subkeys:
|
||||
if s.can_sign:
|
||||
self.subkey = s
|
||||
self.ctx.signers = [self.mykey]
|
||||
break
|
||||
|
||||
def _check_home(self, home = None):
|
||||
if not home:
|
||||
home = self.home
|
||||
if not home:
|
||||
self.home = os.environ.get('GNUPGHOME', '~/.gnupg')
|
||||
home = self.home
|
||||
self._prep_home(home)
|
||||
return()
|
||||
|
||||
def _prep_home(self, home = None):
|
||||
if not home:
|
||||
home = self.home
|
||||
if not home:
|
||||
self.home = os.environ.get('GNUPGHOME', '~/.gnupg')
|
||||
self.home = os.path.abspath(os.path.expanduser(self.home))
|
||||
if os.path.isdir(self.home):
|
||||
_exists = True
|
||||
else:
|
||||
_exists = False
|
||||
_uid = os.getuid()
|
||||
_gid = os.getgid()
|
||||
try:
|
||||
os.makedirs(self.home, exist_ok = True)
|
||||
os.chown(self.home, _uid, _gid)
|
||||
os.chmod(self.home, 0o700)
|
||||
except PermissionError:
|
||||
# It's alright; it's HOPEFULLY already created.
|
||||
if not _exists:
|
||||
raise PermissionError('We need a GnuPG home directory we can '
|
||||
'write to')
|
||||
# TODO: write gpg.conf, parse existing one and write changes if needed.
|
||||
# Should use SHA512 etc. See:
|
||||
# https://spin.atomicobject.com/2013/11/24/secure-gpg-keys-guide/
|
||||
# https://github.com/BetterCrypto/Applied-Crypto-Hardening/blob/master/src/configuration/GPG/GnuPG/gpg.conf
|
||||
# https://riseup.net/en/security/message-security/openpgp/best-practices
|
||||
# And explicitly set keyservers if present in params.
|
||||
return()
|
||||
|
||||
def GetContext(self, **kwargs):
|
||||
ctx = gpg.Context(**kwargs)
|
||||
return(ctx)
|
||||
|
||||
def CreateKey(self, name, algo, keysize, email = None, comment = None, passwd = None, key = None, expiry = None):
|
||||
userid = name
|
||||
userid += ' ({0})'.format(comment) if comment else ''
|
||||
userid += ' <{0}>'.format(email) if email else ''
|
||||
if not expiry:
|
||||
expires = False
|
||||
else:
|
||||
expires = True
|
||||
params = {'algorithm': _algmaps[algo].format(keysize = keysize),
|
||||
'expires': expires,
|
||||
'expires_in': (_epoch_helper(expiry) if expires else 0),
|
||||
'sign': True,
|
||||
'passphrase': passwd}
|
||||
if not key:
|
||||
self.mykey = self.ctx.get_key(self.ctx.create_key(userid, **params).fpr)
|
||||
self.subkey = self.mykey.subkeys[0]
|
||||
else:
|
||||
if not self.mykey:
|
||||
self.mykey = self.ctx.get_key(self.ctx.create_key(userid, **params).fpr)
|
||||
self.subkey = self.ctx.get_key(self.ctx.create_subkey(self.mykey, **params).fpr)
|
||||
self.ctx.signers = [self.subkey]
|
||||
return()
|
||||
|
||||
def ListSigs(self, sig_data):
|
||||
key_ids = []
|
||||
# Currently as of May 13, 2018 there's no way using the GPGME API to do
|
||||
# the equivalent of the CLI's --list-packets. https://dev.gnupg.org/T3734
|
||||
# https://lists.gnupg.org/pipermail/gnupg-users/2018-January/059708.html
|
||||
# https://lists.gnupg.org/pipermail/gnupg-users/2018-January/059715.html
|
||||
# We use the "workaround" in:
|
||||
# https://lists.gnupg.org/pipermail/gnupg-users/2018-January/059711.html
|
||||
try:
|
||||
self.ctx.verify(sig_data)
|
||||
except gpg.errors.BadSignatures as sig_except:
|
||||
for line in [i.strip() for i in str(sig_except).splitlines()]:
|
||||
l = [i.strip() for i in line.split(':')]
|
||||
key_ids.append(l[0])
|
||||
return(key_ids)
|
||||
|
||||
def GetSigs(self, data_in, sig_data = None, verify_keys = None):
|
||||
signers = []
|
||||
if verify_keys:
|
||||
# Raises gpg.errors.BadSignatures if any are invalid.
|
||||
# Unlike Verify below, this will raise an exception.
|
||||
signers = verify_keys
|
||||
if sig_data:
|
||||
# Detached sig
|
||||
sig = self.ctx.verify(data_in, signature = sig_data, verify = signers)
|
||||
else:
|
||||
# Cleartext? or "normal" signatures (embedded)
|
||||
sig = self.ctx.verify(data_in, verify = signers)
|
||||
return(sig)
|
||||
|
||||
def GetKeysigs(self, pubkey):
|
||||
sigs = {}
|
||||
fpr = (pubkey if isinstance(pubkey, str) else pubkey.fpr)
|
||||
keys = list(self.ctx.keylist(fpr, mode = (gpg.constants.keylist.mode.LOCAL | gpg.constants.keylist.mode.SIGS)))
|
||||
for idx1, k in enumerate(keys):
|
||||
sigs[k.fpr] = {}
|
||||
for idx2, u in enumerate(k.uids):
|
||||
sigs[k.fpr][u.uid] = {}
|
||||
for idx3, sig in enumerate(u.signatures):
|
||||
signer = getattr(sig, 'keyid')
|
||||
sigs[k.fpr][u.uid][signer] = {}
|
||||
for a in _keysig_attrs:
|
||||
if a == 'keyid':
|
||||
continue
|
||||
sigs[k.fpr][u.uid][signer][a] = getattr(sig, a)
|
||||
return(sigs)
|
||||
|
||||
def CheckSigs(self, sig, sigkeys = None):
|
||||
# sig should be a GetSigs result.
|
||||
is_valid = True
|
||||
# See self.CheckSigs().
|
||||
# https://www.gnupg.org/documentation/manuals/gpgme/Verify.html
|
||||
# https://github.com/micahflee/torbrowser-launcher/issues/262#issuecomment-284342876
|
||||
sig = sig[1]
|
||||
result = {}
|
||||
_keys = [s.fpr.upper() for s in sig.signatures]
|
||||
if sigkeys:
|
||||
if isinstance(sigkeys, str):
|
||||
sigkeys = [sigkeys.upper()]
|
||||
elif isinstance(sigkeys, list):
|
||||
_sigkeys = []
|
||||
for s in sigkeys[:]:
|
||||
if isinstance(s, str):
|
||||
_sigkeys.append(s.upper())
|
||||
elif isinstance(s, gpgme._gpgme_key):
|
||||
_sigkeys.append(s.fpr)
|
||||
else:
|
||||
continue
|
||||
sigkeys = _sigkeys
|
||||
elif isinstance(sigkeys, gpgme._gpgme_key):
|
||||
sigkeys = [sigkeys.fpr]
|
||||
else:
|
||||
raise ValueError('sigkeys must be a key fingerprint or a key object (or a list of those).')
|
||||
if not set(sigkeys).issubset(_keys):
|
||||
raise ValueError('All specified keys were not present in the signature.')
|
||||
for s in sig.signatures:
|
||||
fpr = getattr(s, 'fpr')
|
||||
result[fpr] = {}
|
||||
for a in _sig_attrs:
|
||||
if a == 'fpr':
|
||||
continue
|
||||
result[fpr][a] = getattr(s, a)
|
||||
# Now we do some logic to determine if the sig is "valid".
|
||||
# Note that we can get confidence level by &'ing "validity" attr against gpg.constants.validity.*
|
||||
# Or just doing a <, >, <=, etc. operation since it's a sequential list of constants levels, not bitwise.
|
||||
# For now, we just check if it's valid or not, not "how valid" it is (how much we can trust it).
|
||||
_status = s.summary
|
||||
if not _get_sig_isgood(_status):
|
||||
result[fpr]['valid'] = False
|
||||
else:
|
||||
result[fpr]['valid'] = True
|
||||
if sigkeys:
|
||||
for k in sigkeys:
|
||||
if (k not in result) or (not result[k]['valid']):
|
||||
is_valid = False
|
||||
break
|
||||
else: # is_valid is satisfied by at LEAST one valid sig.
|
||||
is_valid = any([k[1]['valid'] for k in result])
|
||||
return(is_valid, result)
|
||||
|
||||
def Sign(self, data_in, ascii = True, mode = 'detached', notations = None):
|
||||
# notations is a list of dicts via notation format:
|
||||
# {<namespace>: {'value': 'some string', 'flags': BITWISE_OR_FLAGS}}
|
||||
# See RFC 4880 § 5.2.3.16 for valid user namespace format.
|
||||
if mode.startswith('d'):
|
||||
mode = gpg.constants.SIG_MODE_DETACH
|
||||
elif mode.startswith('c'):
|
||||
mode = gpg.constants.SIG_MODE_CLEAR
|
||||
elif mode.startswith('n'):
|
||||
mode = gpg.constants.SIG_MODE_NORMAL
|
||||
self.ctx.armor = ascii
|
||||
if not isinstance(data_in, bytes):
|
||||
if isinstance(data_in, str):
|
||||
data_in = data_in.encode('utf-8')
|
||||
else:
|
||||
# We COULD try serializing to JSON here, or converting to a pickle object,
|
||||
# or testing for other classes, etc. But we don't.
|
||||
# TODO?
|
||||
data_in = repr(data_in).encode('utf-8')
|
||||
data_in = gpg.Data(data_in)
|
||||
if notations:
|
||||
for n in notations:
|
||||
if not utils.valid().gpgsigNotation(n):
|
||||
raise ValueError('Malformatted notation: {0}'.format(n))
|
||||
for ns in n:
|
||||
self.ctx.sig_notation_add(ns, n[ns]['value'], n[ns]['flags'])
|
||||
# data_in *always* must be a bytes (or bytes-like?) object.
|
||||
# It will *always* return a bytes object.
|
||||
sig = self.ctx.sign(data_in, mode = mode)
|
||||
# And we need to clear the sig notations, otherwise they'll apply to the next signature this context makes.
|
||||
self.ctx.sig_notation_clear()
|
||||
return(sig)
|
||||
|
||||
def ImportPubkey(self, pubkey):
|
||||
fpr = (pubkey if isinstance(pubkey, str) else pubkey.fpr)
|
||||
try:
|
||||
self.ctx.get_key(fpr)
|
||||
return() # already imported
|
||||
except gpg.errors.KeyNotFound:
|
||||
pass
|
||||
_dflt_klm = self.ctx.get_keylist_mode()
|
||||
self.ctx.set_keylist_mode(gpg.constants.keylist.mode.EXTERN)
|
||||
if isinstance(pubkey, gpgme._gpgme_key):
|
||||
self.ctx.op_import_keys([pubkey])
|
||||
elif isinstance(pubkey, str):
|
||||
if not utils.valid().gpgkeyID(pubkey):
|
||||
raise ValueError('{0} is not a valid key or fingerprint'.format(pubkey))
|
||||
pubkey = self.ctx.get_key(fpr)
|
||||
self.ctx.op_import_keys([pubkey])
|
||||
self.ctx.set_keylist_mode(_dflt_klm)
|
||||
self.SignKey(pubkey)
|
||||
return()
|
||||
|
||||
def ImportPubkeyFromFile(self, pubkey_data):
|
||||
_fpath = os.path.abspath(os.path.expanduser(pubkey_data))
|
||||
if os.path.isfile(_fpath):
|
||||
with open(_fpath, 'rb') as f:
|
||||
k = self.ctx.key_import(f.read())
|
||||
else:
|
||||
k = self.ctx.key_import(pubkey_data)
|
||||
pubkey = self.ctx.get_key(k)
|
||||
self.SignKey(pubkey)
|
||||
return()
|
||||
|
||||
def SignKey(self, pubkey, local = False, notations = None):
|
||||
# notations is a list of dicts via notation format:
|
||||
# {<namespace>: {'value': 'some string', 'flags': BITWISE_OR_FLAGS}}
|
||||
# See RFC 4880 § 5.2.3.16 for valid user namespace format.
|
||||
if isinstance(pubkey, gpgme._gpgme_key):
|
||||
pass
|
||||
elif isinstance(pubkey, str):
|
||||
if not utils.valid().gpgkeyID(pubkey):
|
||||
raise ValueError('{0} is not a valid fingerprint'.format(pubkey))
|
||||
else:
|
||||
pubkey = self.ctx.get_key(pubkey)
|
||||
if notations:
|
||||
for n in notations:
|
||||
if not utils.valid().gpgsigNotation(n):
|
||||
raise ValueError('Malformatted notation: {0}'.format(n))
|
||||
for ns in n:
|
||||
self.ctx.sig_notation_add(ns, n[ns]['value'], n[ns]['flags'])
|
||||
self.ctx.key_sign(pubkey, local = local)
|
||||
self.TrustKey(pubkey)
|
||||
# And we need to clear the sig notations, otherwise they'll apply to the next signature this context makes.
|
||||
self.ctx.sig_notation_clear()
|
||||
return()
|
||||
|
||||
def TrustKey(self, pubkey, trust = gpg.constants.validity.FULL):
|
||||
# We use full as the default because signatures aren't considered valid otherwise.
|
||||
# TODO: we need a way of maybe reverting/rolling back any changes we do?
|
||||
output = gpg.Data()
|
||||
_map = _getEditPrompt(pubkey, trust, 'trust')
|
||||
self.ctx.interact(pubkey, _KeyEditor(_map).editKey, sink = output, fnc_value = output)
|
||||
output.seek(0, 0)
|
||||
return()
|
||||
|
||||
def ExportPubkey(self, fpr, ascii = True, sigs = False):
|
||||
orig_armor = self.ctx.armor
|
||||
self.ctx.armor = ascii
|
||||
if sigs:
|
||||
export_mode = 0
|
||||
else:
|
||||
export_mode = gpg.constants.EXPORT_MODE_MINIMAL # default is 0; minimal strips signatures
|
||||
kb = gpg.Data()
|
||||
self.ctx.op_export_keys([self.ctx.get_key(fpr)], export_mode, kb)
|
||||
kb.seek(0, 0)
|
||||
self.ctx.armor = orig_armor
|
||||
return(kb.read())
|
||||
|
||||
def DeleteKey(self, pubkey):
|
||||
if isinstance(pubkey, gpgme._gpgme_key):
|
||||
pass
|
||||
elif isinstance(pubkey, str):
|
||||
if not utils.valid().gpgkeyID(pubkey):
|
||||
raise ValueError('{0} is not a valid fingerprint'.format(pubkey))
|
||||
else:
|
||||
pubkey = self.ctx.get_key(pubkey)
|
||||
self.ctx.op_delete(pubkey, False)
|
||||
return()
|
||||
|
||||
def Verify(self, sig_data, data):
|
||||
# This is a more "flat" version of CheckSigs.
|
||||
# First we need to parse the sig(s) and import the key(s) to our keyring.
|
||||
signers = self.ListSigs(sig_data)
|
||||
for signer in signers:
|
||||
self.ImportPubkey(signer)
|
||||
try:
|
||||
self.ctx.verify(data, signature = sig_data, verify = signers)
|
||||
return(True)
|
||||
except gpg.errors.BadSignatures as err:
|
||||
return(False)
|
||||
12
bdisk/SSL.py
Normal file
12
bdisk/SSL.py
Normal file
@@ -0,0 +1,12 @@
|
||||
import OpenSSL
|
||||
# https://cryptography.io/en/latest/x509/reference/#cryptography.x509.CertificateBuilder.sign
|
||||
# migrate old functions of bSSL to use cryptography
|
||||
# but still waiting on their recpipes.
|
||||
# https://cryptography.io/en/latest/x509/tutorial/
|
||||
#import OpenSSL
|
||||
#k = OpenSSL.crypto.PKey()
|
||||
#k.generate_key(OpenSSL.crypto.TYPE_RSA, 4096)
|
||||
#x = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM,
|
||||
# k,
|
||||
# cipher = 'aes256',
|
||||
# passphrase = 'test')
|
||||
3
bdisk/UEFI.py
Normal file
3
bdisk/UEFI.py
Normal file
@@ -0,0 +1,3 @@
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
30
bdisk/__init__.py
Normal file
30
bdisk/__init__.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
|
||||
"""
|
||||
BDisk - An easy liveCD creator built in python.
|
||||
"""
|
||||
|
||||
# BDisk is only supported on Python 3.4 and up.
|
||||
if sys.version_info.major != 3:
|
||||
raise RuntimeError('BDisk is only supported on Python 3')
|
||||
elif sys.version_info.minor <= 3:
|
||||
raise RuntimeError('BDisk is only supported on Python 3.4 and up')
|
||||
|
||||
# BDisk is only supported on GNU/Linux. There *might* be a way to make it work
|
||||
# with certain *BSDs, but if that's possible at all it'll have to come at a
|
||||
# later date. Patches welcome.
|
||||
# I'd have to find out how to manipulate/create FAT filesystems and such as
|
||||
# well.
|
||||
# I'd be curious to see if I can get it working in Cygwin or WSL:
|
||||
# https://docs.microsoft.com/en-us/windows/wsl/install-win10
|
||||
# And maybe, if we're really pie-in-the-sky, macOS's Fink/Homebrew/Macports.
|
||||
if platform.system() != 'Linux':
|
||||
raise RuntimeError('BDisk is currently only supported on GNU/Linux')
|
||||
|
||||
# CURRENTLY, we require root user because of the chroots and such. However,
|
||||
# there should be creative ways to do this with cgroups as a regular user in
|
||||
# the future. Patches welcome (or at least some input).
|
||||
if os.geteuid() != 0:
|
||||
raise PermissionError('BDisk currently requires root privileges')
|
||||
933
bdisk/bdisk.xsd
Normal file
933
bdisk/bdisk.xsd
Normal file
@@ -0,0 +1,933 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
|
||||
targetNamespace="http://bdisk.square-r00t.net/"
|
||||
xmlns="http://bdisk.square-r00t.net/"
|
||||
elementFormDefault="qualified">
|
||||
|
||||
<!-- CUSTOM TYPES -->
|
||||
<!-- t_btag_uri: a string that will allow btags (xpath or variable only) or a URI string (but NOT a URN). -->
|
||||
<!-- We can't use xs:anyURI because it is too loose (allows things like relative paths, etc.) -->
|
||||
<!-- but ALSO too restrictive in that btags fail validation ({ and } are invalid for anyURI, -->
|
||||
<!-- ironically). -->
|
||||
<xs:simpleType name="t_btag_uri">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern value="\w+:(/?/?)[^\s]+"/>
|
||||
<xs:pattern value=".*\{variable%[A-Za-z0-9_]\}.*"/>
|
||||
<xs:pattern value=".*\{xpath%["'A-Za-z0-9_/\(\)\.\*@\-\[\]=]+\}.*"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<!-- END t_btag_uri -->
|
||||
|
||||
<!-- t_filename: a POSIX fully-portable filename. -->
|
||||
<xs:simpleType name="t_filename">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern value="([a-z0-9._-]+){1,255}"/>
|
||||
<xs:pattern value=".*\{variable%[A-Za-z0-9_]\}.*"/>
|
||||
<xs:pattern value=".*\{xpath%["'A-Za-z0-9_/\(\)\.\*@\-\[\]=]+\}.*"/>
|
||||
<!-- We don't allow (string)(regex) or (regex)(string) or (string)(regex)(string) or multiple regexes -->
|
||||
<!-- because that's just... not feasible to manage from a parsing perspective. -->
|
||||
<xs:pattern value="\{regex%.+\}"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<!-- END t_filename -->
|
||||
|
||||
<!-- t_gpg_keyid: a set of various patterns that match GPG key IDs. -->
|
||||
<xs:simpleType name="t_gpg_keyid">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern value="(none|new)"/>
|
||||
<xs:pattern value="(auto|default)"/>
|
||||
<xs:pattern value="(0x)?[0-9A-Fa-f]{40}"/>
|
||||
<xs:pattern value="(0x)?[0-9A-Fa-f]{16}"/>
|
||||
<xs:pattern value="(0x)?[0-9A-Fa-f]{8}"/>
|
||||
<xs:pattern value="([0-9A-Fa-f ]{4}){5} ?([0-9A-Fa-f ]{4}){4}[0-9A-Fa-f]{4}"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<!-- END t_gpg_keyid -->
|
||||
|
||||
<!-- t_gpg_keyid_list: a type for a list of key IDs. -->
|
||||
<xs:simpleType name="t_gpg_keyid_list">
|
||||
<xs:list itemType="t_gpg_keyid"/>
|
||||
</xs:simpleType>
|
||||
<!-- END t_gpg_key_list -->
|
||||
|
||||
<!-- t_net_loc: a remote host. Used for PKI Subject's commonName and host for rsync. -->
|
||||
<xs:simpleType name="t_net_loc">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern
|
||||
value="(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<!-- END t_net_loc -->
|
||||
|
||||
<!-- t_pass_hash_algo: used for t_password. -->
|
||||
<xs:simpleType name="t_pass_hash_algo">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:enumeration value="des"/>
|
||||
<xs:enumeration value="md5"/>
|
||||
<xs:enumeration value="sha256"/>
|
||||
<xs:enumeration value="sha512"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<!-- END t_pass_hash_algo -->
|
||||
|
||||
<!-- t_pass_salt: used for t_password. -->
|
||||
<xs:simpleType name="t_pass_salt">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern value="($[156]($rounds=[0-9]+)?$[a-zA-Z0-9./]{1,16}$?|auto|)"/>
|
||||
<xs:pattern value="\{variable%[A-Za-z0-9_]\}"/>
|
||||
<xs:pattern value="\{xpath%["'A-Za-z0-9_\(\)\.\*\-/\[\]=]+\}"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<!-- END t_pass_salt -->
|
||||
|
||||
<!-- t_password: used for rootpass and user/password elements. -->
|
||||
<xs:complexType name="t_password">
|
||||
<!-- The below will need some fleshing out and testing. It may not be possible strictly via XSD. -->
|
||||
<!-- TODO: restrict the value further with a union or multi-group regex that checks for a valid length? -->
|
||||
<!-- des: ????? -->
|
||||
<!-- md5: "[a-zA-Z0-9./]{22}" -->
|
||||
<!-- sha256: "[a-zA-Z0-9./]{43}" -->
|
||||
<!-- sha512: "[a-zA-Z0-9./]{86}" -->
|
||||
<xs:simpleContent>
|
||||
<xs:extension base="xs:string">
|
||||
<xs:attribute name="hash_algo" type="t_pass_hash_algo" use="optional"/>
|
||||
<xs:attribute name="hashed" type="xs:boolean" use="required"/>
|
||||
<xs:attribute name="salt" type="t_pass_salt" use="optional"/>
|
||||
</xs:extension>
|
||||
</xs:simpleContent>
|
||||
</xs:complexType>
|
||||
<!-- END t_password -->
|
||||
|
||||
<!-- t_path: for specifying subdirectories (either local filesystem or remote paths). -->
|
||||
<xs:simpleType name="t_path">
|
||||
<xs:restriction base="xs:string">
|
||||
<!-- We include blank to operate on default actions (or default filepaths). -->
|
||||
<xs:pattern value=""/>
|
||||
<xs:pattern value="(.+)/([^/]+)"/>
|
||||
<xs:pattern value="((.+)/([^/]+))?\{variable%[A-Za-z0-9_]\}((.+)/([^/]+))?"/>
|
||||
<xs:pattern value="((.+)/([^/]+))?\{xpath%["'A-Za-z0-9_\(\)\.\*\-/\[\]=]+\}((.+)/([^/]+))?"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<!-- END t_path -->
|
||||
|
||||
<!-- t_pki_cert: used for pki/ca/cert and pki/client/cert. -->
|
||||
<xs:complexType name="t_pki_cert">
|
||||
<xs:simpleContent>
|
||||
<xs:extension base="t_path">
|
||||
<xs:attribute name="hash_algo" use="required">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:enumeration value="blake2b512"/>
|
||||
<xs:enumeration value="blake2s256"/>
|
||||
<xs:enumeration value="gost"/>
|
||||
<xs:enumeration value="md4"/>
|
||||
<xs:enumeration value="md5"/>
|
||||
<xs:enumeration value="mdc2"/>
|
||||
<xs:enumeration value="rmd160"/>
|
||||
<xs:enumeration value="sha1"/>
|
||||
<xs:enumeration value="sha224"/>
|
||||
<xs:enumeration value="sha256"/>
|
||||
<xs:enumeration value="sha384"/>
|
||||
<xs:enumeration value="sha512"/>
|
||||
<xs:enumeration value="none"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:attribute>
|
||||
</xs:extension>
|
||||
</xs:simpleContent>
|
||||
</xs:complexType>
|
||||
<!-- END t_pki_cert -->
|
||||
|
||||
<!-- t_pki_key: used for pki/ca/key and pki/client/key -->
|
||||
<xs:complexType name="t_pki_key">
|
||||
<xs:simpleContent>
|
||||
<xs:extension base="t_path">
|
||||
<xs:attribute name="cipher" use="required">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:enumeration value="aes128"/>
|
||||
<xs:enumeration value="aes192"/>
|
||||
<xs:enumeration value="bf"/>
|
||||
<xs:enumeration value="blowfish"/>
|
||||
<xs:enumeration value="camellia128"/>
|
||||
<xs:enumeration value="camellia192"/>
|
||||
<xs:enumeration value="camellia256"/>
|
||||
<xs:enumeration value="des"/>
|
||||
<xs:enumeration value="rc2"/>
|
||||
<xs:enumeration value="seed"/>
|
||||
<xs:enumeration value="none"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:attribute>
|
||||
<xs:attribute name="passphrase" type="xs:string"/>
|
||||
<xs:attribute name="keysize"
|
||||
type="xs:positiveInteger"/>
|
||||
</xs:extension>
|
||||
</xs:simpleContent>
|
||||
</xs:complexType>
|
||||
<!-- END t_pki_key -->
|
||||
|
||||
<!-- t_pki_subject: used for pki/ca/subject and pki/client/subject -->
|
||||
<xs:complexType name="t_pki_subject">
|
||||
<xs:all>
|
||||
<!-- .../SUBJECT/COMMONNAME -->
|
||||
<xs:element name="commonName" type="t_net_loc"/>
|
||||
<!-- END .../SUBJECT/COMMONNAME -->
|
||||
<!-- .../SUBJECT/COUNTRYNAME -->
|
||||
<xs:element name="countryName">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:string">
|
||||
<!-- We can't validate an actual ISO-3166 ALPHA-2 code, but we can validate the format. -->
|
||||
<!-- TODO: maybe cron the generation of an external namespace? -->
|
||||
<xs:pattern value="[A-Z]{2}"/>
|
||||
<xs:pattern value=".*\{variable%[A-Za-z0-9_]\}.*"/>
|
||||
<xs:pattern value=".*\{xpath%["'A-Za-z0-9_/\(\)\.\*@\-\[\]=]+\}.*"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:element>
|
||||
<!-- END .../SUBJECT/COUNTRYNAME -->
|
||||
<!-- .../SUBJECT/LOCALITYNAME -->
|
||||
<xs:element name="localityName" type="xs:string"/>
|
||||
<!-- END .../SUBJECT/LOCALITYNAME -->
|
||||
<!-- .../SUBJECT/STATEORPROVINCENAME -->
|
||||
<xs:element name="stateOrProvinceName"
|
||||
type="xs:string"/>
|
||||
<!-- END .../SUBJECT/STATEORPROVINCENAME -->
|
||||
<!-- .../SUBJECT/ORGANIZATION -->
|
||||
<xs:element name="organization" type="xs:string"/>
|
||||
<!-- END .../SUBJECT/ORGANIZATION -->
|
||||
<!-- .../SUBJECT/ORGANIZATIONALUNITNAME -->
|
||||
<xs:element name="organizationalUnitName"
|
||||
type="xs:string"/>
|
||||
<!-- END .../SUBJECT/ORGANIZATIONALUNITNAME -->
|
||||
<!-- .../SUBJECT/EMAILADDRESS -->
|
||||
<xs:element name="emailAddress" type="xs:string"/>
|
||||
<!-- END .../SUBJECT/EMAILADDRESS -->
|
||||
</xs:all>
|
||||
</xs:complexType>
|
||||
<!-- END t_pki_subject -->
|
||||
|
||||
<!-- t_remote_file: an element that lets us define both a file pattern for remote content and flags attribute. -->
|
||||
<xs:complexType name="t_remote_file">
|
||||
<xs:simpleContent>
|
||||
<xs:extension base="t_filename">
|
||||
<xs:attribute name="flags" type="t_remote_file_flags" use="optional"/>
|
||||
</xs:extension>
|
||||
</xs:simpleContent>
|
||||
</xs:complexType>
|
||||
<!-- END t_remote_file -->
|
||||
|
||||
<!-- t_remote_file_flags: a type to match a list of known flags. -->
|
||||
<xs:simpleType name="t_remote_file_flags">
|
||||
<xs:list>
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:string">
|
||||
<!-- Currently we only support two flags. -->
|
||||
<xs:enumeration value="regex"/>
|
||||
<xs:enumeration value="latest"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:list>
|
||||
</xs:simpleType>
|
||||
<!-- END t_remote_file_flags -->
|
||||
|
||||
<!-- t_username: enforce a POSIX-compliant username. Used for user/username elements. -->
|
||||
<xs:simpleType name="t_username">
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern value="[a-z_]([a-z0-9_-]{0,31}|[a-z0-9_-]{0,30}$)"/>
|
||||
<xs:pattern value="\{variable%[A-Za-z0-9_]\}"/>
|
||||
<xs:pattern value="\{xpath%["'A-Za-z0-9_\(\)\.\*\-/\[\]=]+\}"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
<!-- END t_username -->
|
||||
<!-- END CUSTOM TYPES -->
|
||||
|
||||
<!-- ROOT ELEMENT ("BDISK") -->
|
||||
<xs:element name="bdisk">
|
||||
<xs:complexType>
|
||||
<!-- Should this be xs:sequence instead? -->
|
||||
<xs:sequence>
|
||||
<!-- BDISK/PROFILE -->
|
||||
<xs:element name="profile" maxOccurs="unbounded" minOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:all>
|
||||
<!-- BDISK/PROFILE/META -->
|
||||
<xs:element name="meta" maxOccurs="1" minOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:all>
|
||||
<!-- BDISK/PROFILE/META/NAMES -->
|
||||
<xs:element name="names" maxOccurs="1" minOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:all>
|
||||
<!-- BDISK/PROFILE/META/NAMES/NAME -->
|
||||
<xs:element name="name" maxOccurs="1" minOccurs="1">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern value="[A-Z0-9]{1,8}"/>
|
||||
<xs:pattern value="\{variable%[A-Za-z0-9_]\}"/>
|
||||
<xs:pattern value="\{xpath%[A-Za-z0-9_\(\)\.\*\-/]+\}"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/META/NAMES/NAME -->
|
||||
<!-- BDISK/PROFILE/META/NAMES/UXNAME -->
|
||||
<xs:element name="uxname" maxOccurs="1" minOccurs="1">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:string">
|
||||
<!-- refer to the 2009 POSIX spec, "3.282 Portable Filename Character Set" -->
|
||||
<!-- http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_282 -->
|
||||
<!-- (We use this string to name some files.) -->
|
||||
<xs:pattern value="([A-Za-z0-9._-]+){1,255}"/>
|
||||
<xs:pattern value="\{variable%[A-Za-z0-9_]\}"/>
|
||||
<xs:pattern value="\{xpath%[A-Za-z0-9_\(\)\.\*\-/]+\}"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/META/NAMES/UXNAME -->
|
||||
<!-- BDISK/PROFILE/META/NAMES/PNAME -->
|
||||
<xs:element name="pname" maxOccurs="1" minOccurs="1">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:string">
|
||||
<!-- TODO: Can I use UTF-8 instead? -->
|
||||
<!-- https://stackoverflow.com/a/9805789/733214 -->
|
||||
<xs:pattern value="\p{IsBasicLatin}*"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/META/NAMES/PNAME -->
|
||||
</xs:all>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/META/NAMES -->
|
||||
<!-- BDISK/PROFILE/META/DESC -->
|
||||
<xs:element name="desc" maxOccurs="1" minOccurs="1" type="xs:string"/>
|
||||
<!-- END BDISK/PROFILE/META/DESC -->
|
||||
<!-- BDISK/PROFILE/META/DEV -->
|
||||
<xs:element name="dev" maxOccurs="1" minOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:all>
|
||||
<!-- BDISK/PROFILE/META/DEV/AUTHOR -->
|
||||
<xs:element name="author" maxOccurs="1" minOccurs="1"
|
||||
type="xs:normalizedString"/>
|
||||
<!-- END BDISK/PROFILE/META/DEV/AUTHOR -->
|
||||
<!-- BDISK/PROFILE/META/DEV/EMAIL -->
|
||||
<!-- The following does NOT WORK. Shame, really. -->
|
||||
<!-- It seems to be an invalid pattern per my XSD validator (xmllint). -->
|
||||
<!--<xs:pattern value="([!#-'*+/-9=?A-Z^-~-]+(\.[!#-'*+/-9=?A-Z^-~-]+)*|"([]!#-[^-~ \t]|(\\[\t -~]))+")@([!#-'*+/-9=?A-Z^-~-]+(\.[!#-'*+/-9=?A-Z^-~-]+)*|\[[\t -Z^-~]*])"/>-->
|
||||
<xs:element name="email" maxOccurs="1" minOccurs="1"
|
||||
type="xs:normalizedString"/>
|
||||
<!-- END BDISK/PROFILE/META/DEV/EMAIL -->
|
||||
<!-- BDISK/PROFILE/META/DEV/WEBSITE -->
|
||||
<xs:element name="website" maxOccurs="1" minOccurs="1"
|
||||
type="t_btag_uri"/>
|
||||
<!-- END BDISK/PROFILE/META/DEV/WEBSITE -->
|
||||
</xs:all>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/META/DEV -->
|
||||
<!-- BDISK/PROFILE/META/URI -->
|
||||
<xs:element name="uri" maxOccurs="1" minOccurs="1" type="t_btag_uri"/>
|
||||
<!-- END BDISK/PROFILE/META/URI -->
|
||||
<!-- BDISK/PROFILE/META/VER -->
|
||||
<xs:element name="ver" maxOccurs="1" minOccurs="1">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:normalizedString">
|
||||
<!-- Like ../names/uxname, this is also used to name certain files so, POSIX portable filename. -->
|
||||
<xs:pattern value="([A-Za-z0-9._-]+){1,255}"/>
|
||||
<xs:pattern value="\{variable%[A-Za-z0-9_]\}"/>
|
||||
<xs:pattern value="\{xpath%[A-Za-z0-9_\(\)\.\*\-/]+\}"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/META/VER -->
|
||||
<!-- BDISK/PROFILE/META/MAX_RECURSE -->
|
||||
<xs:element name="max_recurse" maxOccurs="1" minOccurs="1">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:positiveInteger">
|
||||
<xs:maxExclusive value="1000"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/META/MAX_RECURSE -->
|
||||
<!-- BDISK/PROFILE/META/REGEXES -->
|
||||
<xs:element name="regexes" maxOccurs="1" minOccurs="0">
|
||||
<xs:complexType>
|
||||
<xs:sequence>
|
||||
<!-- BDISK/PROFILE/META/REGEXES/PATTERN -->
|
||||
<xs:element name="pattern" maxOccurs="unbounded" minOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:simpleContent>
|
||||
<xs:extension base="xs:string">
|
||||
<xs:attribute name="id" type="xs:string"
|
||||
use="required"/>
|
||||
</xs:extension>
|
||||
</xs:simpleContent>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/META/REGEXES/PATTERN -->
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/META/REGEXES -->
|
||||
<!-- BDISK/PROFILE/META/VARIABLES -->
|
||||
<xs:element name="variables" maxOccurs="1" minOccurs="0">
|
||||
<xs:complexType>
|
||||
<xs:sequence>
|
||||
<!-- BDISK/PROFILE/META/VARIABLES/VARIABLE -->
|
||||
<xs:element name="variable" maxOccurs="unbounded" minOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:simpleContent>
|
||||
<xs:extension base="xs:string">
|
||||
<xs:attribute name="id" type="xs:string"
|
||||
use="required"/>
|
||||
</xs:extension>
|
||||
</xs:simpleContent>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/META/VARIABLES/VARIABLE -->
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/META/VARIABLES -->
|
||||
</xs:all>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/META -->
|
||||
<!-- BDISK/PROFILE/ACCOUNTS -->
|
||||
<xs:element name="accounts" maxOccurs="1" minOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:sequence>
|
||||
<!-- BDISK/PROFILE/ACCOUNTS/ROOTPASS -->
|
||||
<xs:element name="rootpass" maxOccurs="1" minOccurs="1" type="t_password"/>
|
||||
<!-- END BDISK/PROFILE/ACCOUNTS/ROOTPASS -->
|
||||
<!-- BDISK/PROFILE/ACCOUNTS/USER -->
|
||||
<xs:element name="user" maxOccurs="unbounded" minOccurs="0">
|
||||
<xs:complexType>
|
||||
<xs:all>
|
||||
<!-- BDISK/PROFILE/ACCOUNTS/USER/USERNAME -->
|
||||
<xs:element name="username" type="t_username" maxOccurs="1"
|
||||
minOccurs="1"/>
|
||||
<!-- END BDISK/PROFILE/ACCOUNTS/USER/USERNAME -->
|
||||
<!-- BDISK/PROFILE/ACCOUNTS/USER/COMMENT -->
|
||||
<!-- https://en.wikipedia.org/wiki/Gecos_field -->
|
||||
<!-- Through experimentation, this *seems* to cap at 990 chars. -->
|
||||
<xs:element name="comment" maxOccurs="1"
|
||||
minOccurs="0">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:normalizedString">
|
||||
<xs:maxLength value="990"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/ACCOUNTS/USER/COMMENT -->
|
||||
<!-- BDISK/PROFILE/ACCOUNTS/USER/PASSWORD -->
|
||||
<xs:element name="password" type="t_password" maxOccurs="1"
|
||||
minOccurs="1"/>
|
||||
<!-- END BDISK/PROFILE/ACCOUNTS/USER/PASSWORD -->
|
||||
</xs:all>
|
||||
<xs:attribute name="sudo" type="xs:boolean" use="optional"/>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/ACCOUNTS/USER -->
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/ACCOUNTS -->
|
||||
<!-- BDISK/PROFILE/SOURCES -->
|
||||
<xs:element name="sources" maxOccurs="1" minOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:sequence>
|
||||
<!-- BDisk only supports two different architectures (x86/i686 and x86_64, respectively) currently. -->
|
||||
<!-- TODO: future improvements may let us include e.g. two different x86_64 environments (e.g. CentOS and Debian on the same media), but this is like, still in development stages. -->
|
||||
<!-- BDISK/PROFILE/SOURCES/SOURCE -->
|
||||
<xs:element name="source" minOccurs="1" maxOccurs="2">
|
||||
<xs:complexType>
|
||||
<xs:all>
|
||||
<!-- We cheat here. TECHNICALLY it should ONLY be scheme://location (no /path...), but there isn't a data type for that. -->
|
||||
<!-- Currently we enforce only one item. Future BDisk versions may be able to make use of multiple <mirror>s and select best one based on speed. -->
|
||||
<!-- BDISK/PROFILE/SOURCES/SOURCE/MIRROR -->
|
||||
<xs:element name="mirror" type="t_btag_uri" maxOccurs="1"
|
||||
minOccurs="1"/>
|
||||
<!-- END BDISK/PROFILE/SOURCES/SOURCE/MIRROR -->
|
||||
<!-- BDISK/PROFILE/SOURCES/SOURCE/ROOTPATH -->
|
||||
<xs:element name="rootpath" maxOccurs="1" minOccurs="1"
|
||||
type="t_path"/>
|
||||
<!-- END BDISK/PROFILE/SOURCES/SOURCE/ROOTPATH -->
|
||||
<!-- BDISK/PROFILE/SOURCES/SOURCE/TARBALL -->
|
||||
<xs:element name="tarball" maxOccurs="1" minOccurs="1"
|
||||
type="t_remote_file"/>
|
||||
<!-- END BDISK/PROFILE/SOURCES/SOURCE/TARBALL -->
|
||||
<!-- BDISK/PROFILE/SOURCES/SOURCE/CHECKSUM -->
|
||||
<xs:element name="checksum" maxOccurs="1" minOccurs="0">
|
||||
<xs:complexType>
|
||||
<xs:simpleContent>
|
||||
<xs:extension base="t_remote_file">
|
||||
<!-- There is NO way we can validate this, because it will vary based on the algorithms supported by the build host. -->
|
||||
<xs:attribute name="hash_algo" type="xs:string"
|
||||
use="required"/>
|
||||
<xs:attribute name="explicit" type="xs:boolean"
|
||||
use="required"/>
|
||||
</xs:extension>
|
||||
</xs:simpleContent>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/SOURCES/SOURCE/CHECKSUM -->
|
||||
<!-- BDISK/PROFILE/SOURCES/SOURCE/SIG -->
|
||||
<xs:element name="sig" maxOccurs="1" minOccurs="0">
|
||||
<xs:complexType>
|
||||
<xs:simpleContent>
|
||||
<xs:extension base="t_remote_file">
|
||||
<!-- Required; otherwise there's no point using it. -->
|
||||
<xs:attribute name="keys" type="t_gpg_keyid_list"
|
||||
use="required"/>
|
||||
<xs:attribute name="keyserver" type="t_btag_uri"
|
||||
use="optional"/>
|
||||
</xs:extension>
|
||||
</xs:simpleContent>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/SOURCES/SOURCE/SIG-->
|
||||
</xs:all>
|
||||
<xs:attribute name="arch" use="required">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern value="(i686|x86(_64)?|32|64)"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:attribute>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/SOURCES/SOURCE -->
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/SOURCES -->
|
||||
<!-- BDISK/PROFILE/PACKAGES -->
|
||||
<xs:element name="packages" maxOccurs="1" minOccurs="0">
|
||||
<xs:complexType>
|
||||
<xs:sequence>
|
||||
<!-- BDISK/PROFILE/PACKAGES/PACKAGE -->
|
||||
<xs:element name="package" maxOccurs="unbounded" minOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:simpleContent>
|
||||
<xs:extension base="xs:string">
|
||||
<xs:attribute name="version" type="xs:string" use="optional"/>
|
||||
<xs:attribute name="repo" type="xs:string" use="optional"/>
|
||||
<!-- Default is "both" -->
|
||||
<xs:attribute name="arch" use="optional">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern value="(i686|x86(_64)?|32|64|both)"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:attribute>
|
||||
</xs:extension>
|
||||
</xs:simpleContent>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/PACKAGES/PACKAGE -->
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/PACKAGES -->
|
||||
<!-- BDISK/PROFILE/SERVICES -->
|
||||
<xs:element name="services" maxOccurs="1" minOccurs="0">
|
||||
<xs:complexType>
|
||||
<xs:sequence>
|
||||
<!-- BDISK/PROFILE/SERVICES/SERVICE -->
|
||||
<xs:element name="service" maxOccurs="unbounded" minOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:simpleContent>
|
||||
<xs:extension base="xs:string">
|
||||
<xs:attribute name="enabled" type="xs:boolean" use="required"/>
|
||||
<xs:attribute name="blacklisted" type="xs:boolean" use="optional"/>
|
||||
</xs:extension>
|
||||
</xs:simpleContent>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/SERVICES/SERVICE -->
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/SERVICES -->
|
||||
<!-- BDISK/PROFILE/BUILD -->
|
||||
<xs:element name="build" maxOccurs="1" minOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:all>
|
||||
<!-- BDISK/PROFILE/BUILD/PATHS -->
|
||||
<xs:element name="paths">
|
||||
<xs:complexType>
|
||||
<xs:all>
|
||||
<!-- BDISK/PROFILE/BUILD/PATHS/BASE -->
|
||||
<xs:element name="base" maxOccurs="1" minOccurs="1" type="t_path"/>
|
||||
<!-- END BDISK/PROFILE/BUILD/PATHS/BASE -->
|
||||
<!-- BDISK/PROFILE/BUILD/PATHS/CACHE -->
|
||||
<xs:element name="cache" maxOccurs="1" minOccurs="1" type="t_path"/>
|
||||
<!-- END BDISK/PROFILE/BUILD/PATHS/CACHE -->
|
||||
<!-- BDISK/PROFILE/BUILD/PATHS/CHROOT -->
|
||||
<xs:element name="chroot" maxOccurs="1" minOccurs="1"
|
||||
type="t_path"/>
|
||||
<!-- END BDISK/PROFILE/BUILD/PATHS/CHROOT -->
|
||||
<!-- BDISK/PROFILE/BUILD/PATHS/OVERLAY -->
|
||||
<xs:element name="overlay" maxOccurs="1" minOccurs="1"
|
||||
type="t_path"/>
|
||||
<!-- END BDISK/PROFILE/BUILD/PATHS/OVERLAY -->
|
||||
<!-- BDISK/PROFILE/BUILD/PATHS/TEMPLATES -->
|
||||
<xs:element name="templates" maxOccurs="1" minOccurs="1"
|
||||
type="t_path"/>
|
||||
<!-- END BDISK/PROFILE/BUILD/PATHS/TEMPLATES -->
|
||||
<!-- BDISK/PROFILE/BUILD/PATHS/MOUNT -->
|
||||
<xs:element name="mount" maxOccurs="1" minOccurs="1" type="t_path"/>
|
||||
<!-- END BDISK/PROFILE/BUILD/PATHS/MOUNT -->
|
||||
<!-- BDISK/PROFILE/BUILD/PATHS/DISTROS -->
|
||||
<xs:element name="distros" maxOccurs="1" minOccurs="1"
|
||||
type="t_path"/>
|
||||
<!-- END BDISK/PROFILE/BUILD/PATHS/DISTROS -->
|
||||
<!-- BDISK/PROFILE/BUILD/PATHS/DEST -->
|
||||
<xs:element name="dest" maxOccurs="1" minOccurs="1" type="t_path"/>
|
||||
<!-- END BDISK/PROFILE/BUILD/PATHS/DEST -->
|
||||
<!-- BDISK/PROFILE/BUILD/PATHS/ISO -->
|
||||
<xs:element name="iso" maxOccurs="1" minOccurs="1" type="t_path"/>
|
||||
<!-- END BDISK/PROFILE/BUILD/PATHS/ISO -->
|
||||
<!-- BDISK/PROFILE/BUILD/PATHS/HTTP -->
|
||||
<xs:element name="http" maxOccurs="1" minOccurs="1" type="t_path"/>
|
||||
<!-- END BDISK/PROFILE/BUILD/PATHS/HTTP -->
|
||||
<!-- BDISK/PROFILE/BUILD/PATHS/TFTP -->
|
||||
<xs:element name="tftp" maxOccurs="1" minOccurs="1" type="t_path"/>
|
||||
<!-- END BDISK/PROFILE/BUILD/PATHS/TFTP -->
|
||||
<!-- EBDISK/PROFILE/BUILD/PATHS/PKI -->
|
||||
<xs:element name="pki" maxOccurs="1" minOccurs="1" type="t_path"/>
|
||||
<!-- END BDISK/PROFILE/BUILD/PATHS/PKI -->
|
||||
</xs:all>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/BUILD/PATHS -->
|
||||
<!-- BDISK/PROFILE/BUILD/BASEDISTRO -->
|
||||
<xs:element name="basedistro"/>
|
||||
<!-- END BDISK/PROFILE/BUILD/BASEDISTRO -->
|
||||
</xs:all>
|
||||
<xs:attribute name="its_full_of_stars" type="xs:boolean"/>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/BUILD -->
|
||||
<!-- BDISK/PROFILE/ISO -->
|
||||
<xs:element name="iso" maxOccurs="1" minOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:attribute name="sign" type="xs:boolean"/>
|
||||
<xs:attribute name="multi_arch">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:enumeration value="yes"/>
|
||||
<xs:enumeration value="no"/>
|
||||
<xs:enumeration value="true"/>
|
||||
<xs:enumeration value="false"/>
|
||||
<xs:enumeration value="x86_64"/>
|
||||
<xs:enumeration value="x86"/>
|
||||
<xs:enumeration value="64"/>
|
||||
<xs:enumeration value="32"/>
|
||||
<xs:enumeration value="i686"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:attribute>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/ISO -->
|
||||
<!-- BDISK/PROFILE/IPXE -->
|
||||
<xs:element name="ipxe" maxOccurs="1" minOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:all>
|
||||
<!-- BDISK/PROFILE/IPXE/URI -->
|
||||
<xs:element name="uri" type="t_btag_uri" maxOccurs="1" minOccurs="1"/>
|
||||
<!-- END BDISK/PROFILE/IPXE/URI -->
|
||||
</xs:all>
|
||||
<xs:attribute name="sign" type="xs:boolean"/>
|
||||
<xs:attribute name="iso" type="xs:boolean"/>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/IPXE -->
|
||||
<!-- BDISK/PROFILE/GPG -->
|
||||
<xs:element name="gpg" maxOccurs="1" minOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:sequence>
|
||||
<!-- BDISK/PROFILE/GPG/KEY -->
|
||||
<xs:element name="key" minOccurs="0" maxOccurs="unbounded">
|
||||
<xs:complexType>
|
||||
<xs:all>
|
||||
<!-- BDISK/PROFILE/GPG/KEY/NAME -->
|
||||
<xs:element name="name" type="xs:normalizedString" maxOccurs="1"
|
||||
minOccurs="1"/>
|
||||
<!-- END BDISK/PROFILE/GPG/KEY/NAME -->
|
||||
<!-- BDISK/PROFILE/GPG/KEY/EMAIL -->
|
||||
<xs:element name="email" type="xs:normalizedString" maxOccurs="1"
|
||||
minOccurs="1"/>
|
||||
<!-- END BDISK/PROFILE/GPG/KEY/EMAIL -->
|
||||
<!-- BDISK/PROFILE/GPG/KEY/COMMENT -->
|
||||
<xs:element name="comment" type="xs:string" maxOccurs="1"
|
||||
minOccurs="0"/>
|
||||
<!-- END BDISK/PROFILE/GPG/KEY/COMMENT -->
|
||||
<!-- BDISK/PROFILE/GPG/KEY/SUBKEY -->
|
||||
<xs:element name="subkey" maxOccurs="1" minOccurs="0">
|
||||
<xs:complexType>
|
||||
<!-- See below for notes on attributes. -->
|
||||
<!-- TODO: convert into shared type for parent as well? -->
|
||||
<xs:attribute name="algo" use="optional">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:enumeration value="rsa"/>
|
||||
<xs:enumeration value="dsa"/>
|
||||
<xs:enumeration value="ed"/>
|
||||
<xs:enumeration value="nist"/>
|
||||
<xs:enumeration value="brainpool.1"/>
|
||||
<xs:enumeration value="sec.k1"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:attribute>
|
||||
<xs:attribute name="keysize" type="xs:positiveInteger" use="optional"/>
|
||||
<xs:attribute name="expire" use="optional">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:integer">
|
||||
<xs:pattern value="(0|[0-9]{10})"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:attribute>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/GPG/KEY/SUBKEY -->
|
||||
</xs:all>
|
||||
<xs:attribute name="algo" use="optional">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:string">
|
||||
<!-- rsa, dsa, and elgamal are "normal". Newer GnuPG supports ECC (yay!), so we have support for those in the XSD (you can get a list with gpg -with-colons -list-config curve | cut -f3 -d":" | tr ';' '\n'). -->
|
||||
<!-- We test in-code if the host supports it. -->
|
||||
<xs:enumeration value="rsa"/>
|
||||
<xs:enumeration value="dsa"/>
|
||||
<!-- The following only support encryption. The entire reason we'd be generating a key is to sign files, so we disable them. -->
|
||||
<!-- <xs:enumeration value="elg"/> -->
|
||||
<!-- <xs:enumeration value="cv"/> -->
|
||||
<xs:enumeration value="ed"/>
|
||||
<xs:enumeration value="nist"/>
|
||||
<xs:enumeration value="brainpool.1"/>
|
||||
<xs:enumeration value="sec.k1"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:attribute>
|
||||
<!-- We COULD constrain this further, but it's conditional upon the algo type. So we'll do that in BDisk itself. -->
|
||||
<!-- But it may be possible? https://stackoverflow.com/a/39045446/733214 -->
|
||||
<xs:attribute name="keysize" type="xs:positiveInteger" use="optional"/>
|
||||
<!-- XSD doesn't have a datatype for Epoch vs. 0 (for no expire). -->
|
||||
<xs:attribute name="expire" use="optional">
|
||||
<xs:simpleType>
|
||||
<!--This is xs:integer instead of xs:positiveInteger because 0 will fail validation then. -->
|
||||
<xs:restriction base="xs:integer">
|
||||
<xs:pattern value="(0|[0-9]{10})"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:attribute>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/GPG/KEY -->
|
||||
</xs:sequence>
|
||||
<xs:attribute name="keyid" type="t_gpg_keyid" use="required"/>
|
||||
<xs:attribute name="publish" type="xs:boolean" use="optional"/>
|
||||
<xs:attribute name="prompt_passphrase" type="xs:boolean" use="required"/>
|
||||
<xs:attribute name="passphrase" use="optional">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern
|
||||
value="[!"#$%&\\'\(\)\*\+,\-\./0123456789:;<=>\?@ABCDEFGHIJKLMNOPQRSTUVWXYZ\[\]\^_`abcdefghijklmnopqrstuvwxyz\{\|\}~ ]+"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:attribute>
|
||||
<xs:attribute name="gnupghome" use="optional">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern value="(.+)/([^/]+)"/>
|
||||
<xs:pattern
|
||||
value="((.+)/([^/]+))?\{variable%[A-Za-z0-9_]\}((.+)/([^/]+))?"/>
|
||||
<xs:pattern
|
||||
value="((.+)/([^/]+))?\{xpath%[A-Za-z0-9_\(\)\.\*\-/]+\}((.+)/([^/]+))?"/>
|
||||
<xs:pattern value="(none|)"/>
|
||||
<xs:pattern value="(auto|default)"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:attribute>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/GPG -->
|
||||
<!-- BDISK/PROFILE/PKI -->
|
||||
<xs:element name="pki" maxOccurs="1" minOccurs="0">
|
||||
<xs:complexType>
|
||||
<xs:sequence>
|
||||
<!-- BDISK/PROFILE/PKI/CA -->
|
||||
<xs:element name="ca" maxOccurs="1" minOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:all>
|
||||
<!-- BDISK/PROFILE/PKI/CA/CERT -->
|
||||
<xs:element name="cert" maxOccurs="1" minOccurs="1"
|
||||
type="t_pki_cert"/>
|
||||
<!-- END BDISK/PROFILE/PKI/CA/CERT -->
|
||||
<!-- BDISK/PROFILE/PKI/CA/CSR -->
|
||||
<xs:element name="csr" maxOccurs="1" minOccurs="0" type="t_path"/>
|
||||
<!-- END BDISK/PROFILE/PKI/CA/CSR -->
|
||||
<!-- BDISK/PROFILE/PKI/CA/INDEX -->
|
||||
<xs:element name="index" maxOccurs="1" minOccurs="0" type="t_path"/>
|
||||
<!-- END BDISK/PROFILE/PKI/CA/INDEX -->
|
||||
<!-- BDISK/PROFILE/PKI/CA/SERIAL -->
|
||||
<xs:element name="serial" maxOccurs="1" minOccurs="0"
|
||||
type="t_path"/>
|
||||
<!-- END BDISK/PROFILE/PKI/CA/SERIAL -->
|
||||
<!-- BDISK/PROFILE/PKI/CA/KEY -->
|
||||
<xs:element name="key" minOccurs="1" maxOccurs="1"
|
||||
type="t_pki_key"/>
|
||||
<!-- END BDISK/PROFILE/PKI/CA/CSR -->
|
||||
<!-- BDISK/PROFILE/PKI/CA/SUBJECT -->
|
||||
<xs:element name="subject" maxOccurs="1" minOccurs="0"
|
||||
type="t_pki_subject"/>
|
||||
<!-- END BDISK/PROFILE/PKI/CA/SUBJECT -->
|
||||
</xs:all>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/PKI/CA -->
|
||||
<!-- BDISK/PROFILE/PKI/CLIENT -->
|
||||
<xs:element name="client" maxOccurs="1" minOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:all>
|
||||
<!-- BDISK/PROFILE/PKI/CLIENT/CERT -->
|
||||
<xs:element name="cert" maxOccurs="1" minOccurs="1"
|
||||
type="t_pki_cert"/>
|
||||
<!-- END BDISK/PROFILE/PKI/CLIENT/CERT -->
|
||||
<!-- BDISK/PROFILE/PKI/CLIENT/CSR -->
|
||||
<xs:element name="csr" maxOccurs="1" minOccurs="0" type="t_path"/>
|
||||
<!-- END BDISK/PROFILE/PKI/CLIENT/CSR -->
|
||||
<!-- BDISK/PROFILE/PKI/CLIENT/KEY -->
|
||||
<xs:element name="key" minOccurs="1" maxOccurs="1"
|
||||
type="t_pki_key"/>
|
||||
<!-- END BDISK/PROFILE/PKI/CLIENT/CSR -->
|
||||
<!-- BDISK/PROFILE/PKI/CLIENT/SUBJECT -->
|
||||
<xs:element name="subject" maxOccurs="1" minOccurs="0"
|
||||
type="t_pki_subject"/>
|
||||
<!-- END BDISK/PROFILE/PKI/CLIENT/SUBJECT -->
|
||||
</xs:all>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/PKI/CLIENT -->
|
||||
</xs:sequence>
|
||||
<xs:attribute name="overwrite" type="xs:boolean" use="required"/>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/PKI -->
|
||||
<!-- BDISK/PROFILE/SYNC -->
|
||||
<xs:element name="sync" maxOccurs="1" minOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:all>
|
||||
<!-- BDISK/PROFILE/SYNC/IPXE -->
|
||||
<xs:element name="ipxe" maxOccurs="1" minOccurs="0">
|
||||
<xs:complexType>
|
||||
<xs:simpleContent>
|
||||
<xs:extension base="t_path">
|
||||
<xs:attribute name="enabled" type="xs:boolean" use="optional"/>
|
||||
</xs:extension>
|
||||
</xs:simpleContent>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/SYNC/IPXE -->
|
||||
<!-- BDISK/PROFILE/SYNC/TFTP -->
|
||||
<xs:element name="tftp" maxOccurs="1" minOccurs="0">
|
||||
<xs:complexType>
|
||||
<xs:simpleContent>
|
||||
<xs:extension base="t_path">
|
||||
<xs:attribute name="enabled" type="xs:boolean" use="optional"/>
|
||||
</xs:extension>
|
||||
</xs:simpleContent>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/SYNC/TFTP -->
|
||||
<!-- BDISK/PROFILE/SYNC/ISO -->
|
||||
<xs:element name="iso" maxOccurs="1" minOccurs="0">
|
||||
<xs:complexType>
|
||||
<xs:simpleContent>
|
||||
<xs:extension base="t_path">
|
||||
<xs:attribute name="enabled" type="xs:boolean" use="optional"/>
|
||||
</xs:extension>
|
||||
</xs:simpleContent>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/SYNC/ISO -->
|
||||
<!-- BDISK/PROFILE/SYNC/GPG -->
|
||||
<xs:element name="gpg" maxOccurs="1" minOccurs="0">
|
||||
<xs:complexType>
|
||||
<xs:simpleContent>
|
||||
<xs:extension base="t_path">
|
||||
<xs:attribute name="enabled" type="xs:boolean" use="optional"/>
|
||||
<xs:attribute name="format" use="required">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:enumeration value="asc"/>
|
||||
<xs:enumeration value="bin"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:attribute>
|
||||
</xs:extension>
|
||||
</xs:simpleContent>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/SYNC/GPG -->
|
||||
<!-- BDISK/PROFILE/SYNC/RSYNC -->
|
||||
<xs:element name="rsync" maxOccurs="1" minOccurs="1">
|
||||
<xs:complexType>
|
||||
<xs:sequence>
|
||||
<!-- BDISK/PROFILE/SYNC/RSYNC/USER -->
|
||||
<xs:element name="user" type="t_username" maxOccurs="1"
|
||||
minOccurs="1"/>
|
||||
<!-- END BDISK/PROFILE/SYNC/RSYNC/USER -->
|
||||
<!-- BDISK/PROFILE/SYNC/RSYNC/HOST -->
|
||||
<xs:element name="host" type="t_net_loc" maxOccurs="1"
|
||||
minOccurs="1"/>
|
||||
<!-- END BDISK/PROFILE/SYNC/RSYNC/HOST -->
|
||||
<!-- BDISK/PROFILE/SYNC/RSYNC/PORT -->
|
||||
<xs:element name="port" maxOccurs="1" minOccurs="0">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:positiveInteger">
|
||||
<xs:minInclusive value="1"/>
|
||||
<xs:maxInclusive value="65535"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/SYNC/RSYNC/PORT -->
|
||||
<xs:choice>
|
||||
<!-- BDISK/PROFILE/SYNC/RSYNC/PUBKEY -->
|
||||
<xs:element name="pubkey" type="t_path" maxOccurs="1"
|
||||
minOccurs="1"/>
|
||||
<!-- END BDISK/PROFILE/SYNC/RSYNC/PUBKEY -->
|
||||
<!-- BDISK/PROFILE/SYNC/RSYNC/PUBKEY -->
|
||||
<xs:element name="password" maxOccurs="1" minOccurs="1"/>
|
||||
<!-- END BDISK/PROFILE/SYNC/RSYNC/PUBKEY -->
|
||||
</xs:choice>
|
||||
</xs:sequence>
|
||||
<xs:attribute name="enabled" type="xs:boolean" use="required"/>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/SYNC/IPXE -->
|
||||
</xs:all>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE/SYNC -->
|
||||
</xs:all>
|
||||
<xs:attribute name="id" type="xs:positiveInteger" use="optional"/>
|
||||
<xs:attribute name="name" type="xs:string" use="optional"/>
|
||||
<xs:attribute name="uuid" use="optional">
|
||||
<xs:simpleType>
|
||||
<xs:restriction base="xs:string">
|
||||
<xs:pattern
|
||||
value="[0-9a-f]{8}\-[0-9a-f]{4}\-4[0-9a-f]{3}\-[89ab][0-9a-f]{3}\-[0-9a-f]{12}"/>
|
||||
</xs:restriction>
|
||||
</xs:simpleType>
|
||||
</xs:attribute>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK/PROFILE -->
|
||||
</xs:sequence>
|
||||
</xs:complexType>
|
||||
</xs:element>
|
||||
<!-- END BDISK -->
|
||||
</xs:schema>
|
||||
3
bdisk/chroot.py
Normal file
3
bdisk/chroot.py
Normal file
@@ -0,0 +1,3 @@
|
||||
import os
|
||||
import psutil
|
||||
import subprocess
|
||||
1030
bdisk/confgen.py
Executable file
1030
bdisk/confgen.py
Executable file
File diff suppressed because it is too large
Load Diff
393
bdisk/confparse.py
Normal file
393
bdisk/confparse.py
Normal file
@@ -0,0 +1,393 @@
|
||||
import copy
|
||||
import os
|
||||
import pprint
|
||||
import re
|
||||
import lxml.etree
|
||||
from urllib.parse import urlparse
|
||||
import utils # LOCAL
|
||||
|
||||
|
||||
etree = lxml.etree
|
||||
detect = utils.detect()
|
||||
generate = utils.generate()
|
||||
transform = utils.transform()
|
||||
valid = utils.valid()
|
||||
|
||||
class Conf(object):
|
||||
def __init__(self, cfg, profile = None, validate_cfg = False,
|
||||
xsd_file = None):
|
||||
"""
|
||||
A configuration object.
|
||||
|
||||
Read a configuration file, parse it, and make it available to the rest
|
||||
of BDisk.
|
||||
|
||||
Args:
|
||||
|
||||
cfg The configuration. Can be a filesystem path, a string,
|
||||
bytes, or a stream. If bytes or a bytestream, it must be
|
||||
in UTF-8 format.
|
||||
|
||||
profile (optional) A sub-profile in the configuration. If None
|
||||
is provided, we'll first look for the first profile
|
||||
named 'default' (case-insensitive). If one isn't found,
|
||||
then the first profile found will be used. Can be a
|
||||
string (in which we'll automatically search for the
|
||||
given value in the "name" attribute) or a dict for more
|
||||
fine-grained profile identification, such as:
|
||||
|
||||
{'name': 'PROFILE_NAME',
|
||||
'id': 1,
|
||||
'uuid': '00000000-0000-0000-0000-000000000000'}
|
||||
|
||||
You can provide any combination of these
|
||||
(e.g. "profile={'id': 2, 'name' = 'some_profile'}").
|
||||
Non-greedy matching (meaning ALL attributes specified
|
||||
must match).
|
||||
"""
|
||||
if validate_cfg == 'pre':
|
||||
# Validate before attempting any other operations
|
||||
self.validate()
|
||||
self.xml_suppl = utils.xml_supplicant(cfg, profile = profile)
|
||||
self.xml = self.xml_suppl.xml
|
||||
for e in self.xml_suppl.xml.iter():
|
||||
self.xml_suppl.substitute(e)
|
||||
self.xml_suppl.get_profile(profile = self.xml_suppl.orig_profile)
|
||||
with open('/tmp/parsed.xml', 'wb') as f:
|
||||
f.write(lxml.etree.tostring(self.xml_suppl.xml))
|
||||
self.profile = self.xml_suppl.profile
|
||||
self.xsd = xsd_file
|
||||
self.cfg = {}
|
||||
if validate_cfg:
|
||||
# Validation post-substitution
|
||||
self.validate(parsed = False)
|
||||
# TODO: populate checksum{} with hash_algo if explicit
|
||||
|
||||
def get_pki_obj(self, pki, pki_type):
|
||||
elem = {}
|
||||
if pki_type not in ('ca', 'client'):
|
||||
raise ValueError('pki_type must be "ca" or "client"')
|
||||
if pki_type == 'ca':
|
||||
elem['index'] = None
|
||||
elem['serial'] = None
|
||||
for e in pki.xpath('./*'):
|
||||
# These have attribs or children.
|
||||
if e.tag in ('cert', 'key', 'subject'):
|
||||
elem[e.tag] = {}
|
||||
if e.tag == 'subject':
|
||||
for sub in e.xpath('./*'):
|
||||
elem[e.tag][sub.tag] = transform.xml2py(sub.text,
|
||||
attrib = False)
|
||||
else:
|
||||
for a in e.xpath('./@*'):
|
||||
elem[e.tag][a.attrname] = transform.xml2py(a)
|
||||
elem[e.tag]['path'] = e.text
|
||||
else:
|
||||
elem[e.tag] = e.text
|
||||
return(elem)
|
||||
|
||||
def get_source(self, source, item, _source):
|
||||
_source_item = {'flags': [], 'fname': None}
|
||||
elem = source.xpath('./{0}'.format(item))[0]
|
||||
if item == 'checksum':
|
||||
if elem.get('explicit', False):
|
||||
_explicit = transform.xml2py(
|
||||
elem.attrib['explicit'])
|
||||
_source_item['explicit'] = _explicit
|
||||
if _explicit:
|
||||
del(_source_item['fname'])
|
||||
_source_item['value'] = elem.text
|
||||
return(_source_item)
|
||||
else:
|
||||
_source_item['explicit'] = False
|
||||
if elem.get('hash_algo', False):
|
||||
_source_item['hash_algo'] = elem.attrib['hash_algo']
|
||||
else:
|
||||
_source_item['hash_algo'] = None
|
||||
if item == 'sig':
|
||||
if elem.get('keys', False):
|
||||
_keys = [i.strip() for i in elem.attrib['keys'].split()]
|
||||
_source_item['keys'] = _keys
|
||||
else:
|
||||
_source_item['keys'] = []
|
||||
if elem.get('keyserver', False):
|
||||
_source_item['keyserver'] = elem.attrib['keyserver']
|
||||
else:
|
||||
_source_item['keyserver'] = None
|
||||
_item = elem.text
|
||||
_flags = elem.get('flags', '')
|
||||
if _flags:
|
||||
for f in _flags.split():
|
||||
if f.strip().lower() == 'none':
|
||||
continue
|
||||
_source_item['flags'].append(f.strip().lower())
|
||||
if _source_item['flags']:
|
||||
if 'regex' in _source_item['flags']:
|
||||
ptrn = _item.format(**self.xml_suppl.btags['regex'])
|
||||
else:
|
||||
ptrn = None
|
||||
# TODO: remove all this shit! we switch to just a mirror url.
|
||||
_source_item['fname'] = detect.remote_files(
|
||||
'/'.join((_source['mirror'],
|
||||
_source['rootpath'])),
|
||||
ptrn = ptrn,
|
||||
flags = _source_item['flags'])
|
||||
else:
|
||||
_source_item['fname'] = _item
|
||||
return(_source_item)
|
||||
|
||||
def get_xsd(self):
|
||||
if isinstance(self.xsd, lxml.etree.XMLSchema):
|
||||
return(self.xsd)
|
||||
if not self.xsd:
|
||||
path = os.path.join(os.path.dirname(__file__), 'bdisk.xsd')
|
||||
else:
|
||||
path = os.path.abspath(os.path.expanduser(self.xsd))
|
||||
with open(path, 'rb') as f:
|
||||
xsd = lxml.etree.parse(f)
|
||||
return(xsd)
|
||||
|
||||
def parse_accounts(self):
|
||||
## PROFILE/ACCOUNTS
|
||||
self.cfg['users'] = []
|
||||
# First we handle the root user, since it's a "special" case.
|
||||
_root = self.profile.xpath('./accounts/rootpass')
|
||||
self.cfg['root'] = transform.user(_root)
|
||||
for user in self.profile.xpath('./accounts/user'):
|
||||
_user = {'username': user.xpath('./username/text()')[0],
|
||||
'sudo': transform.xml2py(user.attrib['sudo']),
|
||||
'comment': None}
|
||||
_comment = user.xpath('./comment/text()')
|
||||
if len(_comment):
|
||||
_user['comment'] = _comment[0]
|
||||
_password = user.xpath('./password')
|
||||
_user.update(transform.user(_password))
|
||||
self.cfg['users'].append(_user)
|
||||
return()
|
||||
|
||||
def parse_all(self):
|
||||
self.parse_profile()
|
||||
self.parse_meta()
|
||||
self.parse_accounts()
|
||||
self.parse_sources()
|
||||
self.parse_buildpaths()
|
||||
self.parse_pki()
|
||||
self.parse_gpg()
|
||||
self.parse_sync()
|
||||
return()
|
||||
|
||||
def parse_buildpaths(self):
|
||||
## PROFILE/BUILD(/PATHS)
|
||||
self.cfg['build'] = {'paths': {}}
|
||||
build = self.profile.xpath('./build')[0]
|
||||
_optimize = build.get('its_full_of_stars', 'false')
|
||||
self.cfg['build']['optimize'] = transform.xml2py(_optimize)
|
||||
for path in build.xpath('./paths/*'):
|
||||
self.cfg['build']['paths'][path.tag] = path.text
|
||||
self.cfg['build']['guests'] = build.get('guests', 'archlinux')
|
||||
# iso and ipxe are their own basic profile elements, but we group them
|
||||
# in here because 1.) they're related, and 2.) they're simple to
|
||||
# import. This may change in the future if they become more complex.
|
||||
## PROFILE/ISO
|
||||
self.cfg['iso'] = {'sign': None,
|
||||
'multi_arch': None}
|
||||
self.cfg['ipxe'] = {'sign': None,
|
||||
'iso': None}
|
||||
for x in ('iso', 'ipxe'):
|
||||
# We enable all features by default.
|
||||
elem = self.profile.xpath('./{0}'.format(x))[0]
|
||||
for a in self.cfg[x]:
|
||||
self.cfg[x][a] = transform.xml2py(elem.get(a, 'true'))
|
||||
if x == 'ipxe':
|
||||
self.cfg[x]['uri'] = elem.xpath('./uri/text()')[0]
|
||||
return()
|
||||
|
||||
def parse_gpg(self):
|
||||
## PROFILE/GPG
|
||||
self.cfg['gpg'] = {'keyid': None,
|
||||
'gnupghome': None,
|
||||
'publish': None,
|
||||
'prompt_passphrase': None,
|
||||
'keys': []}
|
||||
elem = self.profile.xpath('./gpg')[0]
|
||||
for attr in elem.xpath('./@*'):
|
||||
self.cfg['gpg'][attr.attrname] = transform.xml2py(attr)
|
||||
for key in elem.xpath('./key'):
|
||||
_keytpl = {'algo': 'rsa',
|
||||
'keysize': '4096'}
|
||||
_key = copy.deepcopy(_keytpl)
|
||||
_key['name'] = None
|
||||
_key['email'] = None
|
||||
_key['comment'] = None
|
||||
for attr in key.xpath('./@*'):
|
||||
_key[attr.attrname] = transform.xml2py(attr)
|
||||
for param in key.xpath('./*'):
|
||||
if param.tag == 'subkey':
|
||||
# We only support one subkey (for key generation).
|
||||
if 'subkey' not in _key:
|
||||
_key['subkey'] = copy.deepcopy(_keytpl)
|
||||
for attr in param.xpath('./@*'):
|
||||
_key['subkey'][attr.attrname] = transform.xml2py(attr)
|
||||
print(_key)
|
||||
else:
|
||||
_key[param.tag] = transform.xml2py(param.text, attrib = False)
|
||||
self.cfg['gpg']['keys'].append(_key)
|
||||
return()
|
||||
|
||||
def parse_meta(self):
|
||||
## PROFILE/META
|
||||
# Get the various meta strings. We skip regexes (we handle those
|
||||
# separately since they're unique'd per id attrib) and variables (they
|
||||
# are already substituted by self.xml_suppl.substitute(x)).
|
||||
_meta_iters = ('dev', 'names')
|
||||
for t in _meta_iters:
|
||||
self.cfg[t] = {}
|
||||
_xpath = './meta/{0}'.format(t)
|
||||
for e in self.profile.xpath(_xpath):
|
||||
for se in e:
|
||||
if not isinstance(se, lxml.etree._Comment):
|
||||
self.cfg[t][se.tag] = transform.xml2py(se.text,
|
||||
attrib = False)
|
||||
for e in ('desc', 'uri', 'ver', 'max_recurse'):
|
||||
_xpath = './meta/{0}/text()'.format(e)
|
||||
self.cfg[e] = transform.xml2py(self.profile.xpath(_xpath)[0],
|
||||
attrib = False)
|
||||
# HERE is where we would handle regex patterns.
|
||||
# But we don't, because they're in self.xml_suppl.btags['regex'].
|
||||
#self.cfg['regexes'] = {}
|
||||
#_regexes = self.profile.xpath('./meta/regexes/pattern')
|
||||
#if len(_regexes):
|
||||
# for ptrn in _regexes:
|
||||
# self.cfg['regexes'][ptrn.attrib['id']] = re.compile(ptrn.text)
|
||||
return()
|
||||
|
||||
def parse_pki(self):
|
||||
## PROFILE/PKI
|
||||
self.cfg['pki'] = {'clients': []}
|
||||
elem = self.profile.xpath('./pki')[0]
|
||||
self.cfg['pki']['overwrite'] = transform.xml2py(
|
||||
elem.get('overwrite', 'false'))
|
||||
ca = elem.xpath('./ca')[0]
|
||||
clients = elem.xpath('./client')
|
||||
self.cfg['pki']['ca'] = self.get_pki_obj(ca, 'ca')
|
||||
for client in clients:
|
||||
self.cfg['pki']['clients'].append(self.get_pki_obj(client,
|
||||
'client'))
|
||||
return()
|
||||
|
||||
def parse_profile(self):
|
||||
## PROFILE
|
||||
# The following are attributes of profiles that serve as identifiers.
|
||||
self.cfg['profile'] = {'id': None,
|
||||
'name': None,
|
||||
'uuid': None}
|
||||
for a in self.cfg['profile']:
|
||||
if a in self.profile.attrib:
|
||||
self.cfg['profile'][a] = transform.xml2py(
|
||||
self.profile.attrib[a],
|
||||
attrib = True)
|
||||
# Small bug in transform.xml2py that we unfortunately can't fix, so we manually fix.
|
||||
if 'id' in self.cfg['profile'] and isinstance(self.cfg['profile']['id'], bool):
|
||||
self.cfg['profile']['id'] = int(self.cfg['profile']['id'])
|
||||
return()
|
||||
|
||||
def parse_sources(self):
|
||||
## PROFILE/SOURCES
|
||||
self.cfg['sources'] = []
|
||||
for source in self.profile.xpath('./sources/source'):
|
||||
_source = {}
|
||||
_source['arch'] = source.attrib['arch']
|
||||
_source['mirror'] = source.xpath('./mirror/text()')[0]
|
||||
_source['rootpath'] = source.xpath('./rootpath/text()')[0]
|
||||
# The tarball, checksum, and sig components requires some...
|
||||
# special care.
|
||||
for e in ('tarball', 'checksum', 'sig'):
|
||||
_source[e] = self.get_source(source, e, _source)
|
||||
self.cfg['sources'].append(_source)
|
||||
return()
|
||||
|
||||
def parse_sync(self):
|
||||
## PROFILE/SYNC
|
||||
self.cfg['sync'] = {}
|
||||
elem = self.profile.xpath('./sync')[0]
|
||||
# We populate defaults in case they weren't specified.
|
||||
for e in ('gpg', 'ipxe', 'iso', 'tftp'):
|
||||
self.cfg['sync'][e] = {'enabled': False,
|
||||
'path': None}
|
||||
sub = elem.xpath('./{0}'.format(e))[0]
|
||||
for a in sub.xpath('./@*'):
|
||||
self.cfg['sync'][e][a.attrname] = transform.xml2py(a)
|
||||
self.cfg['sync'][e]['path'] = sub.text
|
||||
rsync = elem.xpath('./rsync')[0]
|
||||
self.cfg['sync']['rsync'] = {'enabled': False}
|
||||
for a in rsync.xpath('./@*'):
|
||||
self.cfg['sync']['rsync'][a.attrname] = transform.xml2py(a)
|
||||
for sub in rsync.xpath('./*'):
|
||||
self.cfg['sync']['rsync'][sub.tag] = transform.xml2py(
|
||||
sub.text,
|
||||
attrib = False)
|
||||
return()
|
||||
|
||||
def validate(self, parsed = False):
|
||||
xsd = self.get_xsd()
|
||||
if not isinstance(xsd, lxml.etree.XMLSchema):
|
||||
self.xsd = etree.XMLSchema(xsd)
|
||||
else:
|
||||
pass
|
||||
# This would return a bool if it validates or not.
|
||||
#self.xsd.validate(self.xml)
|
||||
# We want to get a more detailed exception.
|
||||
xml = etree.fromstring(self.xml_suppl.return_full())
|
||||
self.xsd.assertValid(xml)
|
||||
if parsed:
|
||||
# We wait until after it's parsed to evaluate because otherwise we
|
||||
# can't use utils.valid().
|
||||
# We only bother with stuff that would hinder building, though -
|
||||
# e.g. we don't check that profile's UUID is a valid UUID4.
|
||||
# The XSD can catch a lot of stuff, but it's not so hot with things like URI validation,
|
||||
# email validation, etc.
|
||||
# URLs
|
||||
for url in (self.cfg['uri'], self.cfg['dev']['website']):
|
||||
if not valid.url(url):
|
||||
raise ValueError('{0} is not a valid URL.'.format(url))
|
||||
# Emails
|
||||
for k in self.cfg['gpg']['keys']:
|
||||
if not valid.email(k['email']):
|
||||
raise ValueError('GPG key {0}: {1} is not a valid email address'.format(k['name'], k['email']))
|
||||
if not valid.email(self.cfg['dev']['email']):
|
||||
raise ValueError('{0} is not a valid email address'.format(self.cfg['dev']['email']))
|
||||
if self.cfg['pki']:
|
||||
if 'subject' in self.cfg['pki']['ca']:
|
||||
if not valid.email(self.cfg['pki']['ca']['subject']['emailAddress']):
|
||||
raise ValueError('{0} is not a valid email address'.format(
|
||||
self.cfg['pki']['ca']['subject']['emailAddress']))
|
||||
for cert in self.cfg['pki']['clients']:
|
||||
if not cert['subject']:
|
||||
continue
|
||||
if not valid.email(cert['subject']['emailAddress']):
|
||||
raise ValueError('{0} is not a valid email address'.format(cert['subject']['email']))
|
||||
# Salts/hashes
|
||||
if self.cfg['root']['salt']:
|
||||
if not valid.salt_hash(self.cfg['root']['salt']):
|
||||
raise ValueError('{0} is not a valid salt'.format(self.cfg['root']['salt']))
|
||||
if self.cfg['root']['hashed']:
|
||||
if not valid.salt_hash_full(self.cfg['root']['salt_hash'], self.cfg['root']['hash_algo']):
|
||||
raise ValueError('{0} is not a valid hash of type {1}'.format(self.cfg['root']['salt_hash'],
|
||||
self.cfg['root']['hash_algo']))
|
||||
for u in self.cfg['users']:
|
||||
if u['salt']:
|
||||
if not valid.salt_hash(u['salt']):
|
||||
raise ValueError('{0} is not a valid salt'.format(u['salt']))
|
||||
if u['hashed']:
|
||||
if not valid.salt_hash_full(u['salt_hash'], u['hash_algo']):
|
||||
raise ValueError('{0} is not a valid hash of type {1}'.format(u['salt_hash'], u['hash_algo']))
|
||||
# GPG Key IDs
|
||||
if self.cfg['gpg']['keyid']:
|
||||
if not valid.gpgkeyID(self.cfg['gpg']['keyid']):
|
||||
raise ValueError('{0} is not a valid GPG Key ID/fingerprint'.format(self.cfg['gpg']['keyid']))
|
||||
for s in self.cfg['sources']:
|
||||
if 'sig' in s:
|
||||
for k in s['sig']['keys']:
|
||||
if not valid.gpgkeyID(k):
|
||||
raise ValueError('{0} is not a valid GPG Key ID/fingerprint'.format(k))
|
||||
return()
|
||||
48
bdisk/download.py
Normal file
48
bdisk/download.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import requests
|
||||
|
||||
|
||||
class Download(object):
|
||||
def __init__(self, url, progress = True, offset = None, chunksize = 1024):
|
||||
self.cnt_len = None
|
||||
self.head = requests.head(url, allow_redirects = True).headers
|
||||
self.req_headers = {}
|
||||
self.range = False
|
||||
self.url = url
|
||||
self.offset = offset
|
||||
self.chunksize = chunksize
|
||||
self.progress = progress
|
||||
if 'accept-ranges' in self.head:
|
||||
if self.head['accept-ranges'].lower() != 'none':
|
||||
self.range = True
|
||||
if 'content-length' in self.head:
|
||||
try:
|
||||
self.cnt_len = int(self.head['content-length'])
|
||||
except TypeError:
|
||||
pass
|
||||
if self.cnt_len and self.offset and self.range:
|
||||
if not self.offset <= self.cnt_len:
|
||||
raise ValueError(('The offset requested ({0}) is greater than '
|
||||
'the content-length value').format(self.offset, self.cnt_len))
|
||||
self.req_headers['range'] = 'bytes={0}-'.format(self.offset)
|
||||
|
||||
def fetch(self):
|
||||
if not self.progress:
|
||||
self.req = requests.get(self.url, allow_redirects = True, headers = self.req_headers)
|
||||
self.bytes_obj = self.req.content
|
||||
else:
|
||||
self.req = requests.get(self.url, allow_redirects = True, stream = True, headers = self.req_headers)
|
||||
self.bytes_obj = bytes()
|
||||
_bytelen = 0
|
||||
# TODO: better handling for logging instead of print()s?
|
||||
for chunk in self.req.iter_content(chunk_size = self.chunksize):
|
||||
self.bytes_obj += chunk
|
||||
if self.cnt_len:
|
||||
print('\033[F')
|
||||
print('{0:.2f}'.format((_bytelen / float(self.head['content-length'])) * 100),
|
||||
end = '%',
|
||||
flush = True)
|
||||
_bytelen += self.chunksize
|
||||
else:
|
||||
print('.', end = '')
|
||||
print()
|
||||
return(self.bytes_obj)
|
||||
76
bdisk/env_prep.py
Normal file
76
bdisk/env_prep.py
Normal file
@@ -0,0 +1,76 @@
|
||||
import hashlib
|
||||
import importlib # needed for the guest-os-specific stuff...
|
||||
import os
|
||||
import download # LOCAL
|
||||
from urllib.parse import urljoin
|
||||
|
||||
|
||||
def hashsum_downloader(url, filename = None):
|
||||
# TODO: support "latest" and "regex" flags? or remove from specs (since the tarball can be specified by these)?
|
||||
# move that to the download.Download() class?
|
||||
d = download.Download(url, progress = False)
|
||||
hashes = {os.path.basename(k):v for (v, k) in [line.split() for line in d.fetch().decode('utf-8').splitlines()]}
|
||||
if filename:
|
||||
if filename in hashes:
|
||||
return(hashes[filename])
|
||||
else:
|
||||
raise KeyError('Filename {0} not in the list of hashes'.format(filename))
|
||||
return(hashes)
|
||||
|
||||
|
||||
class Prepper(object):
|
||||
# Prepare sources, destinations, etc.
|
||||
def __init__(self, cfg):
|
||||
self.cfg = cfg
|
||||
self.CreateDirs(self.cfg['build']['paths'])
|
||||
if 'handler' not in self.cfg['gpg'] or not self.cfg['gpg']['handler']:
|
||||
if self.cfg['gpg']['gnupghome']:
|
||||
os.environ['GNUPGHOME'] = self.cfg['gpg']['gnupghome']
|
||||
from . import GPG
|
||||
self.cfg['gpg']['handler'] = GPG.GPGHandler(gnupg_homedir = self.cfg['gpg']['gnupghome'],
|
||||
key_id = self.cfg['gpg']['keyid'])
|
||||
self.gpg = self.cfg['gpg']['handler']
|
||||
for idx, s in enumerate(self.cfg['sources']):
|
||||
self._download(idx)
|
||||
|
||||
def CreateDirs(self, dirs):
|
||||
for d in dirs:
|
||||
os.makedirs(d, exist_ok = True)
|
||||
os.chmod(d, 0o700)
|
||||
return()
|
||||
|
||||
def _download(self, source_idx):
|
||||
download = True
|
||||
_source = self.cfg['sources'][source_idx]
|
||||
_dest_dir = os.path.join(self.cfg['build']['paths']['cache'], source_idx)
|
||||
_tarball = os.path.join(_dest_dir, _source['tarball']['fname'])
|
||||
_remote_dir = urljoin(_source['mirror'], _source['rootpath'])
|
||||
_remote_tarball = urljoin(_remote_dir + '/', _source['tarball']['fname'])
|
||||
def _hash_verify(): # TODO: move to utils.valid()?
|
||||
# Get a checksum.
|
||||
if 'checksum' in _source:
|
||||
if not _source['checksum']['explicit']:
|
||||
_source['checksum']['value'] = hashsum_downloader(urljoin(_remote_dir + '/',
|
||||
_source['checksum']['fname']))
|
||||
if not _source['checksum']['hash_algo']:
|
||||
_source['checksum']['hash_algo'] = utils.detect.any_hash(_source['checksum']['value'],
|
||||
normalize = True)[0]
|
||||
_hash = hashlib.new(_source['checksum']['hash_algo'])
|
||||
with open(_tarball, 'rb') as f:
|
||||
# It's potentially a large file, so we chunk it 64kb at a time.
|
||||
_hashbuf = f.read(64000)
|
||||
while len(_hashbuf) > 0:
|
||||
_hash.update(_hashbuf)
|
||||
_hashbuf = f.read(64000)
|
||||
if _hash.hexdigest().lower() != _source['checksum']['value'].lower():
|
||||
return(False)
|
||||
return(True)
|
||||
def _sig_verify(): # TODO: move to utils.valid()?
|
||||
if 'sig' in _source:
|
||||
pass
|
||||
return(True)
|
||||
if os.path.isfile(_tarball):
|
||||
download = _hash_verify()
|
||||
download = _sig_verify()
|
||||
if download:
|
||||
d = download.Download(_remote_tarball)
|
||||
1
bdisk/guests/antergos.py
Symbolic link
1
bdisk/guests/antergos.py
Symbolic link
@@ -0,0 +1 @@
|
||||
archlinux.py
|
||||
1
bdisk/guests/arch.py
Symbolic link
1
bdisk/guests/arch.py
Symbolic link
@@ -0,0 +1 @@
|
||||
archlinux.py
|
||||
128
bdisk/guests/archlinux.py
Normal file
128
bdisk/guests/archlinux.py
Normal file
@@ -0,0 +1,128 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
from .. import utils # LOCAL # do i need to escalate two levels up?
|
||||
|
||||
class Manifest(object):
|
||||
def __init__(self, cfg):
|
||||
self.cfg = cfg
|
||||
self.name = 'archlinux'
|
||||
self.version = None # rolling release
|
||||
self.release = None # rolling release
|
||||
# https://www.archlinux.org/master-keys/
|
||||
# Pierre Schmitz. https://www.archlinux.org/people/developers/#pierre
|
||||
self.gpg_authorities = ['4AA4767BBC9C4B1D18AE28B77F2D434B9741E8AC']
|
||||
self.tarball = None
|
||||
self.sig = None
|
||||
self.mirror = None
|
||||
self.checksum = {'sha1': None,
|
||||
'md5': None}
|
||||
self.verified = False
|
||||
self.arches = ('x86_64', )
|
||||
self.bootsupport = ('uefi', 'bios', 'pxe', 'ipxe', 'iso')
|
||||
self.kernel = '/boot/vmlinuz-linux'
|
||||
self.initrd = '/boot/initramfs-linux.img'
|
||||
# TODO: can this be trimmed down?
|
||||
self.prereqs = ['arch-install-scripts', 'archiso', 'bzip2', 'coreutils', 'customizepkg-scripting', 'cronie',
|
||||
'dhclient', 'dhcp', 'dhcpcd', 'dosfstools', 'dropbear', 'efibootmgr', 'efitools', 'efivar',
|
||||
'file', 'findutils', 'iproute2', 'iputils', 'libisoburn', 'localepurge', 'lz4', 'lzo',
|
||||
'lzop', 'mkinitcpio-nbd', 'mkinitcpio-nfs-utils', 'mkinitcpio-utils', 'nbd', 'ms-sys',
|
||||
'mtools', 'net-tools', 'netctl', 'networkmanager', 'pv', 'python', 'python-pyroute2',
|
||||
'rsync', 'sed', 'shorewall', 'squashfs-tools', 'sudo', 'sysfsutils', 'syslinux',
|
||||
'traceroute', 'vi']
|
||||
self._get_filenames()
|
||||
|
||||
def _get_filenames(self):
|
||||
# TODO: cache this info
|
||||
webroot = 'iso/latest'
|
||||
for m in self.cfg['mirrors']:
|
||||
uri = os.path.join(m, webroot)
|
||||
try:
|
||||
self.tarball = utils.detect().remote_files(uri, regex = ('archlinux-'
|
||||
'bootstrap-'
|
||||
'[0-9]{4}\.'
|
||||
'[0-9]{2}\.'
|
||||
'[0-9]{2}-'
|
||||
'x86_64\.tar\.gz$'))[0]
|
||||
self.sig = '{0}.sig'.format(self.tarball)
|
||||
for h in self.checksum:
|
||||
self.checksum[h] = os.path.join(uri, '{0}sums.txt'.format(h))
|
||||
self.mirror = m
|
||||
break
|
||||
except Exception as e:
|
||||
pass
|
||||
if not self.tarball:
|
||||
raise ValueError('Could not find the tarball URI. Check your network connection.')
|
||||
return()
|
||||
|
||||
|
||||
def extern_prep(cfg, cur_arch = 'x86_64'):
|
||||
import os
|
||||
import re
|
||||
mirrorlist = os.path.join(cfg['build']['paths']['chroot'],
|
||||
cur_arch,
|
||||
'etc/pacman.d/mirrorlist')
|
||||
with open(mirrorlist, 'r') as f:
|
||||
mirrors = []
|
||||
for i in f.readlines():
|
||||
m = re.sub('^\s*#.*$', '', i.strip())
|
||||
if m != '':
|
||||
mirrors.append(m)
|
||||
if not mirrors:
|
||||
# We do this as a fail-safe.
|
||||
mirror = ('\n\n# Added by BDisk\n'
|
||||
'Server = https://arch.mirror.square-r00t.net/'
|
||||
'$repo/os/$arch\n')
|
||||
with open(mirrorlist, 'a') as f:
|
||||
f.write(mirror)
|
||||
return()
|
||||
|
||||
# This will be run before the regular packages are installed. It can be
|
||||
# whatever script you like, as long as it has the proper shebang and doesn't
|
||||
# need additional packages installed.
|
||||
# In Arch's case, we use it for initializing the keyring and installing an AUR
|
||||
# helper.
|
||||
pkg_mgr_prep = """#!/bin/bash
|
||||
|
||||
pacman -Syy
|
||||
pacman-key --init
|
||||
pacman-key --populate archlinux
|
||||
pacman -S --noconfirm --needed base
|
||||
pacman -S --noconfirm --needed base-devel multilib-devel git linux-headers \
|
||||
mercurial subversion vala xorg-server-devel
|
||||
cd /tmp
|
||||
sqrt="https://git.square-r00t.net/BDisk/plain/external"
|
||||
# Temporary until there's another AUR helper that allows dropping privs AND
|
||||
# automatically importing GPG keys.
|
||||
pkg="${sqrt}/apacman-current.pkg.tar.xz?h=4.x_rewrite"
|
||||
curl -sL -o apacman-current.pkg.tar.xz ${pkg}
|
||||
pacman -U --noconfirm apacman-current.pkg.tar.xz
|
||||
rm apacman*
|
||||
"""
|
||||
|
||||
# Special values:
|
||||
# {PACKAGE} = the package name
|
||||
# {VERSION} = the version specified in the <package version= ...> attribute
|
||||
# {REPO} = the repository specified in the <package repo= ...> attribute
|
||||
# If check_cmds are needed to run before installing, set pre_check to True.
|
||||
# Return code 0 means the package is installed already, anything else means we
|
||||
# should try to install it.
|
||||
#### AUR SUPPORT ####
|
||||
packager = {'pre_check': False,
|
||||
'sys_update': ['/usr/bin/apacman', '-S', '-u'],
|
||||
'sync_cmd': ['/usr/bin/apacman', '-S', '-y', '-y'],
|
||||
'check_cmds': {'versioned': ['/usr/bin/pacman',
|
||||
'-Q', '-s',
|
||||
'{PACKAGE}'],
|
||||
'unversioned': ['/usr/bin/pacman',
|
||||
'-Q', '-s',
|
||||
'{PACKAGE}']
|
||||
},
|
||||
'update_cmds': {'versioned': ['/usr/bin/pacman',
|
||||
'-S', '-u',
|
||||
'{PACKAGE}'],
|
||||
'unversioned': ['/usr/bin/pacman',
|
||||
'-S', '-u',
|
||||
'{PACKAGE}']
|
||||
},
|
||||
}
|
||||
1
bdisk/guests/manjaro.py
Symbolic link
1
bdisk/guests/manjaro.py
Symbolic link
@@ -0,0 +1 @@
|
||||
archlinux.py
|
||||
1
bdisk/iPXE.py
Normal file
1
bdisk/iPXE.py
Normal file
@@ -0,0 +1 @@
|
||||
import GIT # LOCAL
|
||||
1
bdisk/logger.py
Normal file
1
bdisk/logger.py
Normal file
@@ -0,0 +1 @@
|
||||
import logging
|
||||
32
bdisk/main.py
Normal file
32
bdisk/main.py
Normal file
@@ -0,0 +1,32 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import confparse # LOCAL
|
||||
|
||||
"""The primary user interface for BDisk. If we are running interactively,
|
||||
parse arguments first, then initiate a BDisk session."""
|
||||
|
||||
def parseArgs():
|
||||
args = argparse.ArgumentParser(description = ('An easy liveCD creator '
|
||||
'built in python. Supports '
|
||||
'hybrid ISOs/USB, iPXE, and '
|
||||
'UEFI.'),
|
||||
epilog = ('https://git.square-r00t.net'))
|
||||
return(args)
|
||||
|
||||
def run(cfg):
|
||||
cfg = confparse.Conf(cfg, validate_cfg = True)
|
||||
cfg.parse_all()
|
||||
|
||||
|
||||
def run_interactive():
|
||||
args = vars(parseArgs().parse_args())
|
||||
args['profile'] = {}
|
||||
for i in ('name', 'id', 'uuid'):
|
||||
args['profile'][i] = args[i]
|
||||
del(args[i])
|
||||
run(args)
|
||||
return()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
396
bdisk/mtree.py
Executable file
396
bdisk/mtree.py
Executable file
@@ -0,0 +1,396 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import copy
|
||||
import datetime
|
||||
import grp
|
||||
import hashlib
|
||||
import os
|
||||
import pathlib
|
||||
import platform
|
||||
import pwd
|
||||
import re
|
||||
import stat
|
||||
from collections import OrderedDict
|
||||
try:
|
||||
import pycksum
|
||||
has_cksum = True
|
||||
except ImportError:
|
||||
has_cksum = False
|
||||
|
||||
# Parse BSD mtree spec files.
|
||||
# On arch, BSD mtree is ported in the AUR as nmtree.
|
||||
# TODO: add a generator class as well? (in process)
|
||||
# TODO: add a checking function as well?
|
||||
|
||||
# The format used for headers
|
||||
_header_strptime_fmt = '%a %b %d %H:%M:%S %Y'
|
||||
|
||||
# Supported hash types (for generation). These are globally available always.
|
||||
_hashtypes = ['md5', 'sha1', 'sha256', 'sha384', 'sha512']
|
||||
# If RIPEMD-160 is supported, we add it (after MD5).
|
||||
if 'ripemd160' in hashlib.algorithms_available:
|
||||
_hashtypes.insert(1, 'rmd160')
|
||||
|
||||
# Iterative to determine which type an item is.
|
||||
_stype_map = {'block': stat.S_ISBLK,
|
||||
'char': stat.S_ISCHR,
|
||||
'dir': stat.S_ISDIR,
|
||||
'fifo': stat.S_ISFIFO,
|
||||
'file': stat.S_ISREG,
|
||||
'link': stat.S_ISLNK,
|
||||
'socket': stat.S_ISSOCK}
|
||||
|
||||
# Regex pattern for cleaning up an octal perm mode into a string representation.
|
||||
_octre = re.compile('^0o')
|
||||
|
||||
class MTreeGen(object):
|
||||
def __init__(self, path):
|
||||
self.path = pathlib.PosixPath(os.path.abspath(os.path.expanduser(path)))
|
||||
# These are used to keep a cached copy of the info.
|
||||
self._sysinfo = {'uids': {}, 'gids': {}}
|
||||
self._build_header()
|
||||
# We use this to keep track of where we are exactly in the tree so we can generate a full absolute path at
|
||||
# any moment relative to the tree.
|
||||
self._path_pointer = copy.deepcopy(self.path)
|
||||
|
||||
|
||||
def paths_iterator(self):
|
||||
for root, dirs, files in os.walk(self.path):
|
||||
for f in files:
|
||||
_fname = self.path.joinpath(f)
|
||||
_stats = self._get_stats(_fname)
|
||||
if not _stats:
|
||||
print(('WARNING: {0} either disappeared while we were trying to parse it or '
|
||||
'it is a broken symlink.').format(_fname))
|
||||
continue
|
||||
# TODO: get /set line here?
|
||||
item = ' {0} \\\n'.format(f)
|
||||
_type = 'file' # TODO: stat this more accurately
|
||||
_cksum = self._gen_cksum(_fname)
|
||||
item += ' {0} {1} {2}\\\n'.format(_stats['size'],
|
||||
_stats['time'],
|
||||
('{0} '.format(_cksum) if _cksum else ''))
|
||||
# TODO: here's where the hashes would get added
|
||||
# TODO: here's where we parse dirs. maybe do that before files?
|
||||
# remember: mtree specs use ..'s to traverse upwards when done with a dir
|
||||
for d in dirs:
|
||||
_dname = self.path.joinpath(d)
|
||||
_stats = self._get_stats(_dname)
|
||||
if not _stats:
|
||||
print(('WARNING: {0} either disappeared while we were trying to parse it or '
|
||||
'it is a broken symlink.').format(_dname))
|
||||
continue
|
||||
# TODO: get /set line here?
|
||||
return()
|
||||
|
||||
|
||||
def _gen_cksum(self, fpath):
|
||||
if not has_cksum:
|
||||
return(None)
|
||||
if not os.path.isfile(fpath):
|
||||
return(None)
|
||||
# TODO: waiting on https://github.com/sobotklp/pycksum/issues/2 for byte iteration (because large files maybe?)
|
||||
c = pycksum.Cksum()
|
||||
with open(fpath, 'rb') as f:
|
||||
c.add(f)
|
||||
return(c.get_cksum())
|
||||
|
||||
|
||||
def _get_stats(self, path):
|
||||
stats = {}
|
||||
try:
|
||||
_st = os.stat(path, follow_symlinks = False)
|
||||
except FileNotFoundError:
|
||||
# Broken symlink? Shouldn't occur since follow_symlinks is False anyways, BUT...
|
||||
return(None)
|
||||
# Ownership
|
||||
stats['uid'] = _st.st_uid
|
||||
stats['gid'] = _st.st_gid
|
||||
if _st.st_uid in self._sysinfo['uids']:
|
||||
stats['uname'] = self._sysinfo['uids'][_st.st_uid]
|
||||
else:
|
||||
_pw = pwd.getpwuid(_st.st_uid).pw_name
|
||||
stats['uname'] = _pw
|
||||
self._sysinfo['uids'][_st.stuid] = _pw
|
||||
if _st.st_gid in self._sysinfo['gids']:
|
||||
stats['gname'] = self._sysinfo['gids'][_st.st_gid]
|
||||
else:
|
||||
_grp = grp.getgrgid(_st.st_gid).gr_name
|
||||
stats['gname'] = _grp
|
||||
self._sysinfo['gids'][_st.stgid] = _grp
|
||||
# Type and Mode
|
||||
for t in _stype_map:
|
||||
if _stype_map[t](_st.st_mode):
|
||||
stats['type'] = t
|
||||
# TODO: need a reliable way of parsing this.
|
||||
# for instance, for /dev/autofs, _st.st_dev = 6 (os.makedev(6) confirms major is 0, minor is 6)
|
||||
# but netBSD mtree (ported) says it's "0xaeb" (2795? or, as str, "®b" apparently).
|
||||
# I'm guessing the kernel determines this, but where is it pulling it from/how?
|
||||
# We can probably do 'format,major,minor' (or, for above, 'linux,0,6').
|
||||
# if t in ('block', 'char'):
|
||||
# stats['device'] = None
|
||||
# Handle symlinks.
|
||||
if t == 'link':
|
||||
_target = path
|
||||
while os.path.islink(_target):
|
||||
_target = os.path.realpath(_target)
|
||||
stats['link'] = _target
|
||||
break
|
||||
stats['mode'] = '{0:0>4}'.format(_octre.sub('', str(oct(stat.S_IMODE(_st.st_mode)))))
|
||||
stats['size'] = _st.st_size
|
||||
stats['time'] = str(float(_st.st_mtime))
|
||||
stats['nlink'] = _st.st_nlink
|
||||
# TODO: "flags" keyword? is that meaningful on linux?
|
||||
stats['flags'] = 'none'
|
||||
return(stats)
|
||||
|
||||
|
||||
|
||||
def _gen_hashes(self, fpath):
|
||||
hashes = OrderedDict({})
|
||||
if not os.path.isfile(fpath):
|
||||
return(hashes)
|
||||
_hashnums = len(_hashtypes)
|
||||
for idx, h in enumerate(_hashtypes):
|
||||
# Stupid naming inconsistencies.
|
||||
_hashname = (h if h is not 'rmd160' else 'ripemd160')
|
||||
_hasher = hashlib.new(_hashname)
|
||||
with open(fpath, 'rb') as f:
|
||||
# Hash 64kb at a time in case it's a huge file. TODO: is this the most ideal chunk size?
|
||||
_hashbuf = f.read(64000)
|
||||
while len(_hashbuf) > 0:
|
||||
_hasher.update(_hashbuf)
|
||||
_hashbuf = f.read(64000)
|
||||
hashes[h] = _hasher.hexdigest()
|
||||
return(hashes)
|
||||
# if idx + 1 < _hashnums:
|
||||
# hashes += ' {0}={1} \\\n'.format(h, _hasher.hexdigest())
|
||||
# else:
|
||||
# hashes += ' {0}={1}\n'.format(h, _hasher.hexdigest())
|
||||
# return(hashes)
|
||||
|
||||
|
||||
def _build_header(self):
|
||||
self.spec = ''
|
||||
_header = OrderedDict({})
|
||||
_header['user'] = pwd.getpwuid(os.geteuid()).pw_name
|
||||
_header['machine'] = platform.node()
|
||||
_header['tree'] = str(self.path)
|
||||
_header['date'] = datetime.datetime.utcnow().strftime(_header_strptime_fmt)
|
||||
for h in _header:
|
||||
self.spec += '#\t{0:>7}: {1}\n'.format(h, _header[h])
|
||||
self.spec += '\n'
|
||||
return()
|
||||
|
||||
|
||||
|
||||
class MTreeParse(object):
|
||||
def __init__(self, spec):
|
||||
if not isinstance(spec, (str, bytes)):
|
||||
raise ValueError('spec must be a raw string of the spec or a bytes object of the string')
|
||||
if isinstance(spec, bytes):
|
||||
try:
|
||||
spec = spec.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
raise ValueError('spec must be a utf-8 encoded set of bytes if using byte mode')
|
||||
self.orig_spec = copy.deepcopy(spec) # For referencing in case someone wanted to write it out.
|
||||
# We NOW need to handle the escaped linebreaking it does.
|
||||
self._specdata = re.sub('\\\\\s+', '', spec).splitlines()
|
||||
self._get_header()
|
||||
self.spec = {'header': self.header,
|
||||
'paths': {}}
|
||||
# Template for an item.
|
||||
# Default keywords are:
|
||||
# flags, gid, link, mode, nlink, size, time, type, uid
|
||||
self._tplitem = {
|
||||
'type': None, # ('block', 'char', 'dir', 'fifo', 'file', 'link', 'socket')
|
||||
# checksum of file (if it's a file) (int)
|
||||
# On all *nix platforms, the cksum(1) utility (which is what the mtree spec uses) follows
|
||||
# the POSIX standard CRC (which is NOT CRC-1/CRC-16 nor CRC32!):
|
||||
# http://pubs.opengroup.org/onlinepubs/009695299/utilities/cksum.html
|
||||
# For a python implementation,
|
||||
# https://stackoverflow.com/questions/6835381/python-equivalent-of-unix-cksum-function
|
||||
# See also crcmod (in PyPi).
|
||||
'cksum': None,
|
||||
# "The device number to use for block or char file types." Should be converted to a tuple of one
|
||||
# of the following:
|
||||
# - (format(str), major(int), minor(int))
|
||||
# - (format(str), major(int), unit(str?), subunit(str?)) (only used on bsdos formats)
|
||||
# - (number(int?), ) ("opaque" number)
|
||||
# Valid formats are, per man page of mtree:
|
||||
# native, 386bsd, 4bsd, bsdos, freebsd, hpux, isc, linux, netbsd, osf1, sco, solaris, sunos,
|
||||
# svr3, svr4, ultrix
|
||||
'device': None,
|
||||
# File flags as symbolic name. BSD-specific thing? TODO: testing on BSD system
|
||||
'flags': [],
|
||||
'ignore': False, # An mtree-internal flag to ignore hierarchy under this item
|
||||
'gid': None, # The group ID (int)
|
||||
'gname': None, # The group name (str)
|
||||
'link': None, # The link target/source, if a link.
|
||||
# The MD5 checksum digest (str? hex?). "md5digest" is a synonym for this, so it's consolidated in
|
||||
# as the same keyword.
|
||||
'md5': None,
|
||||
# The mode (in octal) (we convert it to a python-native int for os.chmod/stat, etc.)
|
||||
# May also be a symbolic value; TODO: map symbolic to octal/int.
|
||||
'mode': None,
|
||||
'nlink': None, # Number of hard links for this item.
|
||||
'optional': False, # This item may or may not be present in the compared directory for checking.
|
||||
'rmd160': None, # The RMD-160 checksum of the file. "rmd160digest" is a synonym.
|
||||
'sha1': None, # The SHA-1 sum. "sha1digest" is a synonym.
|
||||
'sha256': None, # SHA-2 256-bit checksum; "sha256digest" is a synonym.
|
||||
'sha384': None, # SHA-2 384-bit checksum; "sha384digest" is a synonym.
|
||||
'sha512': None, # SHA-2 512-bit checksum; "sha512digest" is a synonym.
|
||||
'size': None, # Size of the file in bytes (int).
|
||||
'tags': [], # mtree-internal tags (comma-separated in the mtree spec).
|
||||
'time': None, # Time the file was last modified (in Epoch fmt as float).
|
||||
'uid': None, # File owner UID (int)
|
||||
'uname': None # File owner username (str)
|
||||
# And lastly, "children" is where the children files/directories go. We don't include it in the template;
|
||||
# it's added programmatically.
|
||||
# 'children': {}
|
||||
}
|
||||
# Global aspects are handled by "/set" directives.
|
||||
# They are restored by an "/unset". Since they're global and stateful, they're handled as a class attribute.
|
||||
self.settings = copy.deepcopy(self._tplitem)
|
||||
self._parse_items()
|
||||
del(self.settings, self._tplitem)
|
||||
|
||||
|
||||
def _get_header(self):
|
||||
self.header = {}
|
||||
_headre = re.compile('^#\s+(user|machine|tree|date):\s')
|
||||
_cmtre = re.compile('^\s*#\s*')
|
||||
_blklnre = re.compile('^\s*$')
|
||||
for idx, line in enumerate(self._specdata):
|
||||
if _headre.search(line): # We found a header item.
|
||||
l = [i.lstrip() for i in _cmtre.sub('', line).split(':', 1)]
|
||||
header = l[0]
|
||||
val = (l[1] if l[1] is not '(null)' else None)
|
||||
if header == 'date':
|
||||
val = datetime.datetime.strptime(val, _header_strptime_fmt)
|
||||
elif header == 'tree':
|
||||
val = pathlib.PosixPath(val)
|
||||
self.header[header] = val
|
||||
elif _blklnre.search(line):
|
||||
break # We've reached the end of the header. Otherwise...
|
||||
else: # We definitely shouldn't be here, but this means the spec doesn't even have a header.
|
||||
break
|
||||
return()
|
||||
|
||||
|
||||
def _parse_items(self):
|
||||
# A pattern (compiled for performance) to match commands.
|
||||
_stngsre = re.compile('^/(un)?set\s')
|
||||
# Per the man page:
|
||||
# "Empty lines and lines whose first non-whitespace character is a hash mark (‘#’) are ignored."
|
||||
_ignre = re.compile('^(\s*(#.*)?)?$')
|
||||
# The following regex is used to quickly and efficiently check for a synonymized hash name.
|
||||
_hashre = re.compile('^(md5|rmd160|sha1|sha256|sha384|sha512)(digest)?$')
|
||||
# The following regex is to test if we need to traverse upwards in the path.
|
||||
_parentre = re.compile('^\.{,2}/?$')
|
||||
# _curpath = self.header['tree']
|
||||
_curpath = pathlib.PosixPath('/')
|
||||
_types = ('block', 'char', 'dir', 'fifo', 'file', 'link', 'socket')
|
||||
# This parses keywords. Used by both item specs and /set.
|
||||
def _kwparse(kwline):
|
||||
out = {}
|
||||
for i in kwline:
|
||||
l = i.split('=', 1)
|
||||
if len(l) < 2:
|
||||
l.append(None)
|
||||
k, v = l
|
||||
if v == 'none':
|
||||
v = None
|
||||
# These are represented as octals.
|
||||
if k in ('mode', ):
|
||||
# TODO: handle symbolic references too (e.g. rwxrwxrwx)
|
||||
if v.isdigit():
|
||||
v = int(v, 8) # Convert from the octal. This can then be used directly with os.chmod etc.
|
||||
# These are represented as ints
|
||||
elif k in ('uid', 'gid', 'cksum', 'nlink'):
|
||||
if v.isdigit():
|
||||
v = int(v)
|
||||
# These are booleans (represented as True by their presence).
|
||||
elif k in ('ignore', 'optional'):
|
||||
v = True
|
||||
# These are lists (comma-separated).
|
||||
elif k in ('flags', 'tags'):
|
||||
if v:
|
||||
v = [i.strip() for i in v.split(',')]
|
||||
# The following are synonyms.
|
||||
elif _hashre.search(k):
|
||||
k = _hashre.sub('\g<1>', k)
|
||||
elif k == 'time':
|
||||
v = datetime.datetime.fromtimestamp(float(v))
|
||||
elif k == 'type':
|
||||
if v not in _types:
|
||||
raise ValueError('{0} not one of: {1}'.format(v, ', '.join(_types)))
|
||||
out[k] = v
|
||||
return(out)
|
||||
def _unset_parse(unsetline):
|
||||
out = {}
|
||||
if unsetline[1] == 'all':
|
||||
return(copy.deepcopy(self._tplitem))
|
||||
for i in unsetline:
|
||||
out[i] = self._tplitem[i]
|
||||
return(out)
|
||||
# The Business-End (TM)
|
||||
for idx, line in enumerate(self._specdata):
|
||||
_fname = copy.deepcopy(_curpath)
|
||||
# Skip these lines
|
||||
if _ignre.search(line):
|
||||
continue
|
||||
l = line.split()
|
||||
if _parentre.search(line):
|
||||
_curpath = _curpath.parent
|
||||
elif not _stngsre.search(line):
|
||||
# So it's an item, not a command.
|
||||
_itemsettings = copy.deepcopy(self.settings)
|
||||
_itemsettings.update(_kwparse(l[1:]))
|
||||
if _itemsettings['type'] == 'dir':
|
||||
# SOMEONE PLEASE let me know if there's a cleaner way to do this.
|
||||
_curpath = pathlib.PosixPath(os.path.normpath(_curpath.joinpath(l[0])))
|
||||
_fname = _curpath
|
||||
else:
|
||||
_fname = pathlib.PosixPath(os.path.normpath(_curpath.joinpath(l[0])))
|
||||
self.spec['paths'][_fname] = _itemsettings
|
||||
else:
|
||||
# It's a command. We can safely split on whitespace since the man page specifies the
|
||||
# values are not to contain whitespace.
|
||||
# /set
|
||||
if l[0] == '/set':
|
||||
del(l[0])
|
||||
self.settings.update(_kwparse(l))
|
||||
# /unset
|
||||
else:
|
||||
self.settings.update(_unset_parse(l))
|
||||
continue
|
||||
return()
|
||||
|
||||
|
||||
def parseArgs():
|
||||
args = argparse.ArgumentParser(description = 'An mtree parser')
|
||||
# TODO: support stdin piping
|
||||
args.add_argument('specfile',
|
||||
help = 'The path to the spec file to parse')
|
||||
return(args)
|
||||
|
||||
|
||||
# Allow to be run as a CLI utility as well.
|
||||
def main():
|
||||
args = vars(parseArgs().parse_args())
|
||||
import os
|
||||
with open(os.path.abspath(os.path.expanduser(args['specfile']))) as f:
|
||||
mt = MTreeParse(f.read())
|
||||
with open('/tmp/newspec', 'w') as f:
|
||||
f.write('\n'.join(mt._specdata))
|
||||
import pprint
|
||||
import inspect
|
||||
del(mt.orig_spec)
|
||||
del(mt._specdata)
|
||||
import shutil
|
||||
pprint.pprint(inspect.getmembers(mt), width = shutil.get_terminal_size()[0])
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
129
bdisk/prompt_strings.py
Normal file
129
bdisk/prompt_strings.py
Normal file
@@ -0,0 +1,129 @@
|
||||
# These are *key* ciphers, for encrypting exported keys.
|
||||
openssl_ciphers = ['aes128', 'aes192', 'aes256', 'bf', 'blowfish',
|
||||
'camellia128', 'camellia192', 'camellia256', 'cast', 'des',
|
||||
'des3', 'idea', 'rc2', 'seed']
|
||||
# These are *hash algorithms* for cert digests.
|
||||
openssl_digests = ['blake2b512', 'blake2s256', 'gost', 'md4', 'md5', 'mdc2',
|
||||
'rmd160', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']
|
||||
|
||||
class PromptStrings(object):
|
||||
gpg = {
|
||||
'attribs': {
|
||||
'algo': {
|
||||
'text': 'the subkey\'s encryption type/algorithm',
|
||||
# The following can ONLY be used for encryption, not signing: elg, cv
|
||||
#'choices': ['rsa', 'dsa', 'elg', 'ed', 'cv', 'nistp', 'brainpool.1', 'secp.k1'],
|
||||
'choices': ['rsa', 'dsa', 'ed', 'nist', 'brainpool.1', 'sec.k1'],
|
||||
#'default': 'rsa'
|
||||
'default': 'ed'
|
||||
},
|
||||
'keysize': {
|
||||
'text': 'the subkey\'s key size (in bits)',
|
||||
'choices': {
|
||||
'rsa': ['1024', '2048', '4096'],
|
||||
'dsa': ['768', '2048', '3072'],
|
||||
#'elg': ['1024', '2048', '4096'], # Invalid for signing, etc.
|
||||
'ed': ['25519'],
|
||||
#'cv': ['25519'],
|
||||
'nistp': ['256', '384', '521'],
|
||||
'brainpool.1': ['256', '384', '512'],
|
||||
'sec.k1': ['256']
|
||||
},
|
||||
'default': {
|
||||
'rsa': '4096',
|
||||
'dsa': '3072',
|
||||
'ed': '25519',
|
||||
'nistp': '521',
|
||||
'brainpool.1': '512',
|
||||
'sec.k1': '256'
|
||||
}
|
||||
}
|
||||
},
|
||||
'params': ['name', 'email', 'comment']
|
||||
}
|
||||
ssl = {
|
||||
'attribs': {
|
||||
'cert': {
|
||||
'hash_algo': {
|
||||
'text': ('What hashing algorithm do you want to use? '
|
||||
'(Default is sha512.)'),
|
||||
'prompt': 'Hashing algorithm: ',
|
||||
'options': openssl_digests,
|
||||
'default': 'aes256'
|
||||
}
|
||||
},
|
||||
'key': {
|
||||
'cipher': {
|
||||
'text': ('What encryption algorithm/cipher do you want to '
|
||||
'use? (Default is aes256.) Use "none" to specify '
|
||||
'a key without a passphrase.'),
|
||||
'prompt': 'Cipher: ',
|
||||
'options': openssl_ciphers + ['none'],
|
||||
'default': 'aes256'
|
||||
},
|
||||
'keysize': {
|
||||
'text': ('What keysize/length (in bits) do you want the '
|
||||
'key to be? (Default is 4096; much higher values '
|
||||
'are possible but are untested and thus not '
|
||||
'supported by this tool; feel free to edit the '
|
||||
'generated configuration by hand.) (If the key '
|
||||
'cipher is "none", this is ignored.)'),
|
||||
'prompt': 'Keysize: ',
|
||||
# TODO: do all openssl_ciphers support these sizes?
|
||||
'options': ['1024', '2048', '4096'],
|
||||
'default': 'aes256'
|
||||
},
|
||||
'passphrase': {
|
||||
'text': ('What passphrase do you want to use for the key? '
|
||||
'If you specified the cipher as "none", this is '
|
||||
'ignored (you can just hit enter).'),
|
||||
'prompt': 'Passphrase (will not echo back): ',
|
||||
'options': None,
|
||||
'default': ''
|
||||
}
|
||||
}
|
||||
},
|
||||
'paths': {
|
||||
'cert': '(or read from) the certificate',
|
||||
'key': '(or read from) the key',
|
||||
'csr': ('(or read from) the certificate signing request (if '
|
||||
'blank, we won\'t write to disk - the operation will '
|
||||
'occur entirely in memory assuming we need to generate/'
|
||||
'sign)')
|
||||
},
|
||||
'paths_ca': {
|
||||
'index': ('(or read from) the CA (Certificate Authority) Database '
|
||||
'index file (if left blank, one will not be used)'),
|
||||
'serial': ('(or read from) the CA (Certificate Authority) '
|
||||
'Database serial file (if left blank, one will not be '
|
||||
'used)'),
|
||||
},
|
||||
'subject': {
|
||||
'countryName': {
|
||||
'text': ('the 2-letter country abbreviation (must conform to '
|
||||
'ISO3166 ALPHA-2)?\n'
|
||||
'Country code: ')
|
||||
},
|
||||
'localityName': {
|
||||
'text': ('the city/town/borough/locality name?\n'
|
||||
'Locality: ')
|
||||
},
|
||||
'stateOrProvinceName': {
|
||||
'text': ('the state/region name (full string)?\n'
|
||||
'Region: ')
|
||||
},
|
||||
'organization': {
|
||||
'text': ('your organization\'s name?\n'
|
||||
'Organization: ')
|
||||
},
|
||||
'organizationalUnitName': {
|
||||
'text': ('your department/role/team/department name?\n'
|
||||
'Organizational Unit: ')
|
||||
},
|
||||
'emailAddress': {
|
||||
'text': ('the email address to be associated with this '
|
||||
'certificate/PKI object?\n'
|
||||
'Email: ')
|
||||
}
|
||||
}
|
||||
}
|
||||
2
bdisk/sync.py
Normal file
2
bdisk/sync.py
Normal file
@@ -0,0 +1,2 @@
|
||||
import shutil
|
||||
import subprocess
|
||||
1156
bdisk/utils.py
Normal file
1156
bdisk/utils.py
Normal file
File diff suppressed because it is too large
Load Diff
1
bdisk/version.py
Normal file
1
bdisk/version.py
Normal file
@@ -0,0 +1 @@
|
||||
BDISK_VERSION = '4.0.0a1'
|
||||
5
bin/bdisk.py
Normal file
5
bin/bdisk.py
Normal file
@@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env python3.6
|
||||
|
||||
# PLACEHOLDER - this will be a thin wrapper installed to /usr/bin/bdisk.
|
||||
import argparse
|
||||
import bdisk
|
||||
4
bin/bdiskcfg.py
Normal file
4
bin/bdiskcfg.py
Normal file
@@ -0,0 +1,4 @@
|
||||
#!/usr/bin/env python3.6
|
||||
|
||||
import argparse
|
||||
import bdisk.confgen as confgen
|
||||
208
bin/build.sh
208
bin/build.sh
@@ -1,208 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# A lot of snippets, inspiration, and some config directives are from https://projects.archlinux.org/archiso.git/ / the ArchLinux ISO layout.
|
||||
# Many thanks and praise are deserved.
|
||||
|
||||
|
||||
#DEBUG
|
||||
#set -x
|
||||
|
||||
echo "Starting at $(date)..."
|
||||
|
||||
## Import settings
|
||||
if [ -f "build.conf" ];
|
||||
then
|
||||
echo "Now importing settings/variables."
|
||||
set -e
|
||||
source extra/build.conf.sample
|
||||
source build.conf
|
||||
set +e
|
||||
else
|
||||
echo "You have not configured a build.conf OR you are not running from the project's root directory (the git repository's working directory).
|
||||
If you are indeed in the correct directory, you may copy the sample at extra/build.conf.sample,
|
||||
edit it for appropriate values, and copy to <PROJECT ROOT>/build.conf"
|
||||
echo 'For now, though, I am using the defaults. If the build fails complaining about a'
|
||||
echo 'missing http user, you need to specify a custom/distro-pertinent one.'
|
||||
cp extra/build.conf.sample build.conf
|
||||
set -e
|
||||
source extra/build.conf.sample
|
||||
set +e
|
||||
fi
|
||||
|
||||
|
||||
## PREPARATION ##
|
||||
|
||||
# safemode browsing enabled. lolz
|
||||
set -e
|
||||
|
||||
# do some basic error checking
|
||||
ARCH=$(uname -m)
|
||||
|
||||
if [[ ${EUID} -ne 0 ]];
|
||||
then
|
||||
#echo "This script must be run as root" 1>&2
|
||||
echo "This script must be run as root."
|
||||
exit 1
|
||||
elif [ -f ${LOCKFILE} ];
|
||||
then
|
||||
echo "Script already running, stale lockfile present, or an error occurred during last run."
|
||||
echo "Please clear ${LOCKFILE} by hand before attempting another build."
|
||||
echo -n "Timestamp of lockfile is: "
|
||||
ls -l ${LOCKFILE} | awk '{print $6" "$7" "$8}'
|
||||
exit 1
|
||||
elif [[ "$(uname -s)" != "Linux" ]];
|
||||
then
|
||||
echo "ERROR: This script is only supported on GNU/Linux."
|
||||
exit 1
|
||||
elif [[ "${ARCH}" != 'x86_64' ]];
|
||||
then
|
||||
echo "Your hardware architecture, ${ARCH}, is not supported. Only x86_64 is supported."
|
||||
echo "Dying now."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Checking directory structure and creating lockfile at ${LOCKFILE}..."
|
||||
touch ${LOCKFILE}
|
||||
|
||||
# make sure the paths exist and then check for an existing chroot session
|
||||
for i in ${BASEDIR} ${CHROOTDIR32} ${CHROOTDIR64} ${BUILDDIR}32 ${BUILDDIR}64 ${ISODIR} ${MOUNTPT} ${TEMPDIR}/{${UXNAME},${DISTNAME}} ${ARCHBOOT} ${SRCDIR} ${TFTPDIR} ${HTTPDIR}/${DISTNAME} ${BASEDIR}/logs;
|
||||
do
|
||||
if [ ! -d ${i} ];
|
||||
then
|
||||
#echo "${i} does not exist - creating."
|
||||
mkdir -p ${i}
|
||||
fi
|
||||
done
|
||||
|
||||
source ${BASEDIR}/lib/00-depcheck.func.sh
|
||||
|
||||
if [ ! -f "./BUILDNO" ];
|
||||
then
|
||||
echo '0' > ./BUILDNO
|
||||
fi
|
||||
|
||||
CHROOTDIR_GLOB="${CHROOTDIR}"
|
||||
BUILDDIR_GLOB="${BUILDDIR}"
|
||||
|
||||
# Set the version.
|
||||
BUILDVERSION="$(git describe --abbrev=0 --tags)-$(git rev-parse --short --verify HEAD)"
|
||||
BUILD="$(cat BUILDNO)"
|
||||
BUILD="$(expr ${BUILD} + 1)"
|
||||
echo ${BUILD} > ./BUILDNO
|
||||
BUILDTIME="$(date)"
|
||||
BUILD_MACHINE="$(hostname -f) (${HOST_DIST})"
|
||||
#BUILD_USERNAME="${SUDO_USER}"
|
||||
#BUILD_USERNAME="$(who am i | awk '{print $1}')"
|
||||
set +e ; logname > /dev/null 2>&1
|
||||
if [[ "${?}" == "0" ]];
|
||||
then
|
||||
BUILD_USERNAME="$(logname)"
|
||||
else
|
||||
BUILD_USERNAME="$(whoami)"
|
||||
fi
|
||||
set -e
|
||||
USERNAME_REAL="$(grep ${BUILD_USERNAME} /etc/passwd | cut -f5 -d':')"
|
||||
|
||||
cat > ${BASEDIR}/VERSION_INFO.txt << EOF
|
||||
Version: ${BUILDVERSION}
|
||||
Build: ${BUILD}
|
||||
Time: ${BUILDTIME}
|
||||
Machine: ${BUILD_MACHINE}
|
||||
User: ${BUILD_USERNAME} (${USERNAME_REAL})
|
||||
EOF
|
||||
|
||||
## FUNCTIONS ##
|
||||
|
||||
#source ${BASEDIR}/lib/00-depcheck.func.sh ## this should be called like, VERYYYY first thing, right after sanity/safety checks and such.
|
||||
source ${BASEDIR}/lib/01-mk.chroot.func.sh ## this is called automatically and only if no chroot exists
|
||||
source ${BASEDIR}/lib/02-holla_atcha_boi.func.sh
|
||||
source ${BASEDIR}/lib/03-release_me.func.sh
|
||||
source ${BASEDIR}/lib/04-facehugger.func.sh
|
||||
source ${BASEDIR}/lib/05-chroot_wrapper.func.sh
|
||||
source ${BASEDIR}/lib/06-jenny_craig.func.sh
|
||||
source ${BASEDIR}/lib/07-centos_is_stupid.func.sh
|
||||
source ${BASEDIR}/lib/08-will_it_blend.func.sh
|
||||
source ${BASEDIR}/lib/09-stuffy.func.sh
|
||||
source ${BASEDIR}/lib/10-yo_dj.func.sh
|
||||
source ${BASEDIR}/lib/11-mentos.func.sh
|
||||
|
||||
## The Business-End(TM) ##
|
||||
|
||||
CHROOTDIR="${CHROOTDIR_GLOB}"
|
||||
BUILDDIR="${BUILDDIR_GLOB}"
|
||||
holla_atcha_boi
|
||||
|
||||
rm -rf ${TEMPDIR}/*
|
||||
release_me 64 > /dev/null 2>&1
|
||||
release_me 32 > /dev/null 2>&1
|
||||
|
||||
# do we need to perform any updates?
|
||||
if [[ ${1} == "update" ]];
|
||||
then
|
||||
mentos
|
||||
centos_is_stupid
|
||||
will_it_blend 32
|
||||
will_it_blend 64
|
||||
yo_dj
|
||||
fi
|
||||
|
||||
# or do we want to just chroot in?
|
||||
if [[ ${1} == "chroot" ]];
|
||||
then
|
||||
chroot_wrapper 64
|
||||
chroot_wrapper 32
|
||||
rm -f ${LOCKFILE}
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# implement for future, needs tweaking- JUST rebuild the ISO.
|
||||
#if [[ ${1} == "respin" ]];
|
||||
#then
|
||||
# if [[ "${MULTIARCH}" == "y" ]];
|
||||
# then
|
||||
# centos_is_stupid
|
||||
# yo_dj any
|
||||
# else
|
||||
# centos_is_stupid
|
||||
# yo_dj 64
|
||||
# centos_is_stupid
|
||||
# yo_dj 32
|
||||
# fi
|
||||
#fi
|
||||
#
|
||||
|
||||
# or are we just building?
|
||||
if [[ ${1} == "build" || -z ${1} || ${1} == "all" ]];
|
||||
then
|
||||
if [[ "${MULTIARCH}" == "y" ]];
|
||||
then
|
||||
centos_is_stupid
|
||||
will_it_blend 64
|
||||
will_it_blend 32
|
||||
yo_dj any
|
||||
else
|
||||
centos_is_stupid
|
||||
will_it_blend 64
|
||||
yo_dj 64
|
||||
centos_is_stupid
|
||||
will_it_blend 32
|
||||
yo_dj 32
|
||||
fi
|
||||
fi
|
||||
|
||||
# clean up, clean up, everybody, everywhere
|
||||
echo "Cleaning up some stuff leftover from the build..."
|
||||
#rm -rf ${TEMPDIR}/*
|
||||
#rm -rf ${SRCDIR}/*
|
||||
cd ${BASEDIR}
|
||||
|
||||
if [[ "${GIT}" == "yes" ]];
|
||||
then
|
||||
echo "Committing changes to git..."
|
||||
git add --all .
|
||||
git commit -m "post-build at $(date)"
|
||||
fi
|
||||
|
||||
# yay! we're done!
|
||||
rm -f ${LOCKFILE}
|
||||
echo "Finished successfully at $(date)!"
|
||||
88
bin/clean.sh
88
bin/clean.sh
@@ -1,88 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "Started at $(date)..."
|
||||
|
||||
## Import settings
|
||||
if [ -f "build.conf" ];
|
||||
then
|
||||
echo "Now importing settings/variables."
|
||||
set -e
|
||||
source extra/build.conf.sample
|
||||
source build.conf
|
||||
set +e
|
||||
else
|
||||
echo "You have not configured a build.conf OR you are not running from the project's root directory (the git repository's working directory).
|
||||
echo "If you are indeed in the correct directory, you may copy the sample at ../extra/build.conf.sample,
|
||||
echo "edit it for appropriate values, and copy to <PROJECT ROOT>/build.conf"
|
||||
echo
|
||||
echo 'This error is fatal. Dying.'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ${EUID} -ne 0 ]];
|
||||
then
|
||||
#echo "This script must be run as root" 1>&2
|
||||
echo "This script must be run as root."
|
||||
echo
|
||||
exit 1
|
||||
elif [ -f ${LOCKFILE} ];
|
||||
then
|
||||
echo "Script already running, stale lockfile present, or an error occurred during last run."
|
||||
echo "Please clear ${LOCKFILE} by hand before attempting another build."
|
||||
echo -n "Timestamp of lockfile is: "
|
||||
ls -l ${LOCKFILE} | awk '{print $6" "$7" "$8}'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Creating lockfile at ${LOCKFILE}..."
|
||||
touch ${LOCKFILE}
|
||||
|
||||
if [[ "${1}" == "all" ]];
|
||||
then
|
||||
DIRS="${CHROOTDIR}root.i686 ${CHROOTDIR}root.x86_64 ${BUILDDIR}32 ${BUILDDIR}64 ${ISODIR} ${TEMPDIR} ${ARCHBOOT} ${SRCDIR} ${TFTPDIR} ${HTTPDIR} ${BASEDIR}/logs"
|
||||
FILES="latest.32.tar.gz latest.64.tar.gz"
|
||||
elif [[ "${1}" == "chroot" ]];
|
||||
then
|
||||
DIRS="${CHROOTDIR}root.i686 ${CHROOTDIR}root.x86_64 ${BUILDDIR}32 ${BUILDDIR}64 ${ISODIR} ${TEMPDIR} ${ARCHBOOT} ${SRCDIR} ${TFTPDIR} ${HTTPDIR}"
|
||||
FILES=""
|
||||
elif [[ "${1}" == "squash" ]];
|
||||
then
|
||||
DIRS="${BUILDDIR}32 ${BUILDDIR}64 ${ISODIR} ${TEMPDIR} ${ARCHBOOT} ${SRCDIR} ${TFTPDIR} ${HTTPDIR}"
|
||||
FILES=""
|
||||
else
|
||||
DIRS="${ISODIR} ${TEMPDIR} ${ARCHBOOT} ${SRCDIR} ${TFTPDIR} ${HTTPDIR}"
|
||||
FILES=""
|
||||
fi
|
||||
|
||||
echo "I will be deleting the contents of: ${DIRS}"
|
||||
echo "I will be deleting the files: ${FILES}"
|
||||
read -p 'Do you wish to continue? [Y/n] ' CONFIRM
|
||||
|
||||
if [ -z "${CONFIRM}" ];
|
||||
then
|
||||
CONFIRM="y"
|
||||
fi
|
||||
|
||||
CONFIRM=${CONFIRM:0:1}
|
||||
CONFIRM=$(echo ${CONFIRM} | tr [[:upper:]] [[:lower:]])
|
||||
|
||||
if [[ "${CONFIRM}" != "y" ]];
|
||||
then
|
||||
echo 'Exiting.'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
for i in "${DIRS}";
|
||||
do
|
||||
rm -rf ${i}/*
|
||||
done
|
||||
|
||||
for i in "${FILES}";
|
||||
do
|
||||
rm -f ${i}
|
||||
done
|
||||
|
||||
rm -f ${LOCKFILE}
|
||||
|
||||
echo "Finished successfully at $(date)!"
|
||||
@@ -1,6 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
curl -s -o /tmp/mirrorlist.tmp "https://www.archlinux.org/mirrorlist/?country=US&protocol=http&protocol=https&ip_version=4&use_mirror_status=on"
|
||||
sed -i -e 's/^#Server/Server/' /tmp/mirrorlist.tmp
|
||||
rankmirrors -n 6 /tmp/mirrorlist.tmp > extra/mirrorlist
|
||||
sed -i -e '/^##/d' extra/mirrorlist
|
||||
3
bin/xmllint.sh
Executable file
3
bin/xmllint.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
xmllint -schema /opt/dev/bdisk/bdisk/bdisk.xsd /opt/dev/bdisk/docs/examples/multi_profile.xml --noout
|
||||
33
docs/FAQ
33
docs/FAQ
@@ -1,33 +0,0 @@
|
||||
BDisk Frequently Asked(/Unasked) Questions
|
||||
|
||||
|
||||
|
||||
0.) Why does it take so long to build?
|
||||
1.) Why is the generated ISO file so big?
|
||||
2.) How do I find the version/release/etc. number of an ISO?
|
||||
|
||||
|
||||
=========================================================
|
||||
|
||||
|
||||
|
||||
0.) WHY DOES IT TAKE SO LONG TO BUILD?
|
||||
A: This typically occurs when you're building from within a LiveCD/LiveUSB situation, in a VM/container/etc., or on a headless server.
|
||||
If this is the case, you may run into what appears to be "stalling", especially while keys are generating for the chroots.
|
||||
Thankfully, there is an easy fix. You can install the "haveged"(http://www.issihosts.com/haveged/) software and run it. This will
|
||||
show an immediate and non-negligible improvement for the above contexts. If you have extra power to throw at it (or are using a dedicated build box)
|
||||
as well, I recommend enabling I_AM_A_RACECAR in your build.conf. BDisk will then be more aggressive with its resource consumption.
|
||||
|
||||
|
||||
1.) WHY IS THE GENERATED ISO FILE SO BIG?
|
||||
A: You may have enabled a LOT of packages in extra/packages.(32|64|both). Or you're using the default set of packages, which tries to include a LOT
|
||||
of different (and in some cases, redundant) packages for widespread utilization and usage. In addition, keep in mind that BDisk builds a single ISO
|
||||
that can be used on both i686 architectures AND full x86_64 architectures ("AMD64" as you may sometimes see it referenced). Because it doesn't cheat
|
||||
and just use a 64-bit kernel with a 32-bit userland, it needs two different squash images on each ISO- one for 32-bit userland and one for 64-bit
|
||||
userland.
|
||||
|
||||
2.) HOW DO I FIND THE VERSION/RELEASE/ETC. NUMBER OF AN ISO?
|
||||
A: This can be found in a multitude of places. The full-size ISO file (iso/<distname>-<git tag>-<git rev number>-(32|64|any).iso) should have the
|
||||
version right in the file name. If you want more detailed information (or perhaps you renamed the file), you can mount the ISO as loopback in GNU/Linux,
|
||||
*BSD, or Mac OS X and check /path/to/mounted/iso/VERSION_INTO.txt. Lastly, within the runtime itself (especially handy if booting via iPXE), you can
|
||||
check /root/VERSION_INFO.txt within the running live environment.
|
||||
170
docs/README
170
docs/README
@@ -1,170 +0,0 @@
|
||||
######################################################################################################################################
|
||||
##### BDisk #####
|
||||
##### #####
|
||||
##### Written by Brent Saner #####
|
||||
##### <bts@square-r00t.net> #####
|
||||
##### Built upon my (Brent's) 'BDisk' ISO-building framework (http://bdisk.square-r00t.net) #####
|
||||
######################################################################################################################################
|
||||
|
||||
!!!!!! WARNING !!!!!!!
|
||||
I do NOT recommend running this on a machine that is using wireless, as it will download a LOT of data.
|
||||
If you ignore this warning, be prepared to explain to anyone you share your wifi with why Facebook takes 20 seconds to load,
|
||||
because they WILL ask.
|
||||
|
||||
It should be fine over ethernet, since hardware switches are much faster and efficient than a single duplex wireless radio.
|
||||
|
||||
Future versions, once this project has a dumping ground, will fetch highly compressed snapshotted chroot filesystems instead
|
||||
of dynamically building the entire install chroots (both x86_64 and i686) instead (with the option of building fresh locally,
|
||||
disabled by default).
|
||||
Till then, sorry for the inconvenience.
|
||||
!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
|
||||
## Why Arch? ##
|
||||
Because it's a largely easy-to-use, well-documented (https://wiki.archlinux.org/) distro. It's no-frills and incredibly
|
||||
flexible/customizable, and can be made rather slim. It's also very friendly to run as a chroot inside any other distro.
|
||||
|
||||
All commands below should be issued in the root working directory of this git repository. (For me, this is
|
||||
/opt/dev/work/BDisk but this may be entirely different for you, depending on where you cloned the repository to.)
|
||||
|
||||
## Features ##
|
||||
-Builds a hybrid ISO
|
||||
A hybrid ISO allows one to simply dd if=/path/to/file.iso of=/dev/<USB STICK> instead of using e.g. UNetBootin while also
|
||||
working with traditional optical media.
|
||||
-Builds a dual-architecture ISO
|
||||
BDisk allows you to create both a 32-bit and 64-bit ISO, either separately or part of the same ISO, and customize each
|
||||
based on architecture.
|
||||
-Supports both BIOS and UEFI booting in one ISO
|
||||
It can be difficult finding a live distribution that offers full UEFI support, which is frustrating if you're trying to
|
||||
install e.g. UEFI-enabled Gentoo, for instance. Not only does this ISO framework build support for both in the same ISO,
|
||||
but it also includes some UEFI shells as well.
|
||||
-Arch install scripts
|
||||
This live distro, assuming default packages are kept, should be 100% compatible with the Arch install guide
|
||||
(https://wiki.archlinux.org/index.php/installation_guide).
|
||||
-Allows for non-interactive runs, is git-friendly, logs all output while keeping the runtime output relatively sane, and allows
|
||||
the inclusion of arbitrary files in the finished filesystem.
|
||||
-TFP/HTTP/PXE support
|
||||
It will automatically copy over some files to tftpboot/ and http/, greatly saving some time in PXE booting. It also
|
||||
supports, by default, booting PXE with HTTP fetching for the squashed filesystems. Say goodbye to NFS. You can even
|
||||
automatically rsync over after the build, if you wish, to your webserver (see sub-point)
|
||||
-Additionally, it supports iPXE if enabled in the configuration file. Please see examples/HTTP for an example of how to
|
||||
lay this out and what sort of directory hierarchy is needed.
|
||||
-Automatic versioning based on git tags
|
||||
|
||||
## Prerequisites ##
|
||||
-At least ~20Gb disk space free (remember, you need room for not only four chroots (one developing and one staging for i686/x86_64),
|
||||
but also disk space for working, the finished ISO(s), etc.)
|
||||
|
||||
-The following packages installed on the build host:
|
||||
(NOTE: future versions will have dependencies checked automatically and installed if necessary,
|
||||
depending on distro your host build machine is. the OS detection and package installation component of that isn't
|
||||
done yet though. 2014.10.31)
|
||||
(NOTE2: Make sure all the packages you specify are valid package names. A mistyped package name will cause the chroot creation to
|
||||
break and everything else will break as a result.)
|
||||
|
||||
curl
|
||||
dosfstools
|
||||
libisoburn ("libisofs" on CentOS)
|
||||
lynx
|
||||
rsync
|
||||
sed
|
||||
squashfs-tools
|
||||
xorriso (in RPMForge repo for CentOS 7)
|
||||
xz
|
||||
|
||||
and *probably* a few others. The scripts run with set -e for the most part, so if something's missed, you'll know.
|
||||
Oh, you will know.
|
||||
|
||||
|
||||
## Configuration ##
|
||||
See extra/build.conf.sample. Copy to <PROJECT ROOT>/build.conf if you wish to modify any of the values, otherwise the defaults
|
||||
will be used. I recommend enabling I_AM_A_RACECAR if you have the hardware for it, as it can speed things up greatly.
|
||||
The file should be well-commented.
|
||||
|
||||
Also note the following files/paths:
|
||||
|
||||
-bin:
|
||||
The building/cleaning scripts.
|
||||
--/build.sh:
|
||||
Supports several modes:
|
||||
bin/build.sh update Updates existing chroots (does an apacman -Syyu --devel, copies over fresh files from
|
||||
overlays, etc.)
|
||||
bin/build.sh chroot Chroots you into the runtime for any interactive tasks you may wish to run.
|
||||
bin/build.sh build (DEFAULT) Builds the chroots, ISO, etc.
|
||||
bin/build.sh all Same as bin/build.sh build
|
||||
bin/build.sh Same as bin/build.sh build
|
||||
--/clean.sh:
|
||||
Supports several modes:
|
||||
bin/clean.sh all Clears everything out to a near-pristine working state. For convenience, it DOES NOT clear
|
||||
out build.conf.
|
||||
bin/clean.sh chroot Clears out only the working spaces, old ISOs, and the chroot directories. Hnady if you want to "start
|
||||
fresh" but still want to keep logs from earlier runs.
|
||||
bin/clean.sh squash Clears out only the working spaces and old ISOs. Useful if you're on a slow connection and would
|
||||
rather update the chroots instead of downloading filesystem snapshots again.
|
||||
bin/clean.sh Only clears workspaces and old ISOs.
|
||||
--/mirror.lst.sh:
|
||||
Builds a fresh mirror list. Note that it is US based.
|
||||
|
||||
-examples:
|
||||
Included recommendation for how to lay things out, etc.
|
||||
--/HTTP:
|
||||
Recommended layout for webserver layout if using iPXE.
|
||||
-extra:
|
||||
Supporting files for the base building system (mirrorlist, etc.).
|
||||
--/${UXNAME}.png:
|
||||
A 640x480 8-bit RGBA colour PNG which will be used as the background for the bootsplash (if booting via BIOS and not UEFI)
|
||||
--/bootstrap/apacman-*.tar.xz:
|
||||
An AUR-enabled package manager. Necessary for AUR support.
|
||||
--/build.conf.sample:
|
||||
Sample/default config file. If you wish to override any settings, copy to <PROJECT ROOT>/build.conf and make your changes there.
|
||||
First run of the script will do this for you automatically.
|
||||
--/mirrorlist:
|
||||
A set of default mirrorlists to be used for the chroots during building and runtime. Feel free to replace with your own
|
||||
mirrorlist (current list is speed-optimized for east coast US).
|
||||
--/packages.32:
|
||||
A list of packages to install in the base system (32-bit runtime only).
|
||||
--/packages.64:
|
||||
A list of packages to install in the base system (64-bit runtime only).
|
||||
--/packages.both:
|
||||
A list of packages to install in the base system (these should be installed and are supported on both 64- AND 32-bit)
|
||||
--/pre-build.d:
|
||||
Contains files injected into the system. Both 64-bit and 32-bit environments. Note: be sure to place them in hierarchical order
|
||||
(e.g. if you wish to have a file at /usr/foo/bar, you will need to place it in <PROJECT ROOT>/extra/pre-build.d/usr/foo/bar)
|
||||
--/pre-build.d/32:
|
||||
Same as above, but only for 32-bit environments.
|
||||
--/pre-build.d/64:
|
||||
You get the picture.
|
||||
-http:
|
||||
Files to be hosted for PXE booting the environment go here. Set this as your root/DocumentRoot in nginx/Apache (or, ideally,
|
||||
copy over to a separate webserver). This directory is wiped out during any bin/clean.sh operation.
|
||||
-lib:
|
||||
The "guts" of BDisk.
|
||||
-logs:
|
||||
Here you can find full output of the runs. They are prefixed with run's PID number, and named after the function they occur in.
|
||||
-overlay:
|
||||
These files are applied AFTER the initial setup of the chroots. Same hierarchy rules as extra/pre-build.d.
|
||||
-README:
|
||||
This file.
|
||||
-src:
|
||||
Supporting source code/source code from other projects.
|
||||
--ipxe/:
|
||||
For iPXE support.
|
||||
--ipxe_local/:
|
||||
Various patches and supporting configs to tweak the iPXE build.
|
||||
-tftpboot:
|
||||
Files to be served via TFTP for PXE booting. This directory is wiped out during any bin/clean.sh operation.
|
||||
-TODO:
|
||||
This is just what I'm using to track stuff I want to add.
|
||||
|
||||
|
||||
You may notice other files come and go; they're mostly there for extra goodies/used to determine other things.
|
||||
|
||||
## (Re)Building ##
|
||||
Building must be done as root, and on an Arch x86_64 system (future versions will allow for non-Arch distros).
|
||||
|
||||
# bin/build.sh
|
||||
|
||||
Yeah. It's that easy. The finished product is in iso/.
|
||||
|
||||
|
||||
If you want more verbosity, check out the logs/ directory.
|
||||
68
docs/TODO
68
docs/TODO
@@ -1,68 +0,0 @@
|
||||
## General ##
|
||||
|
||||
-include benchmarking
|
||||
-- http://sourceforge.net/projects/unixbench/
|
||||
-- https://code.google.com/p/byte-unixbench/
|
||||
-- https://github.com/akopytov/sysbench
|
||||
-- (http://blog.due.io/2014/linode-digitalocean-and-vultr-comparison/ etc.)
|
||||
-package in AUR
|
||||
-base rewrite in python. pyalpm may come in handy here.
|
||||
|
||||
|
||||
## NETWORKING ##
|
||||
|
||||
-shorewall/some other firewall?
|
||||
-WISH: locked-down VPN?
|
||||
-autodetection/configuration of network. DHCP is currently running by default, but does it need to support IPv6? if so, how would the user configure their network?
|
||||
-SECURE SSH: https://stribika.github.io/2015/01/04/secure-secure-shell.html
|
||||
-DISABLE NETWORKMANAGER AND "fi.w1.wpa_supplicant1"??? keeps spawning wpa_supplicant (and thusly killing networking proper)
|
||||
-for netboot, custom user agent (should be defined by build.conf)
|
||||
--iPXE's curl
|
||||
--initrd's curl
|
||||
|
||||
|
||||
## Building ##
|
||||
|
||||
-GUMMIBOOT IS GONE FROM THE REPOS. I could repackage it, but better to just see what the hell archiso's doing.
|
||||
-WISH: Better logging[0]
|
||||
-WISH: signing for secureboot releases (PreLoader and gummiboot handle this okay, but require manual intervention
|
||||
-use manual chrooting functions ONLY if distro not detected as arch. if /usr/bin/systemd-nspawn exists, use that instead
|
||||
--does arch-chroot work across all distros? see https://wiki.archlinux.org/index.php/Install_bundled_32-bit_system_in_Arch64 and https://wiki.archlinux.org/index.php/Chroot
|
||||
--i think this might be unnecessary. testing across other major distros is necessary, but i think i can just use the chroot'd arch-chroot
|
||||
-tweak build.conf (and build.conf.sample) to source the pwd and set as BASEDIR ***if*** the project resources are present in pwd, otherwise throw warning
|
||||
--this is half-done;PWD is currently used by default.
|
||||
-does gummiboot? loader? wtfever it's called support splash backgrounds? can i implement that differently somehow?
|
||||
--yes, see e.g. https://www.reddit.com/r/archlinux/comments/3bwgf0/where_put_the_splasharchbmp_to_splash_screen_boot/
|
||||
-strip out/remove unnecessary and orphan packages (e.g. gcc, make, automake, etc.)
|
||||
-incorporate iPXE tweaks:
|
||||
--http://ipxe.org/crypto
|
||||
--http://ipxe.org/cmd/imgtrust
|
||||
--http://ipxe.org/cmd/imgverify
|
||||
--enable use of custom CA/self-signed certs for HTTPS etc. DONE, partially. need to incorporate codesign certs/keys. routines, conf variables
|
||||
-enable mirror= kernel commandline.
|
||||
--if mirror_(NAME) is present, use that as repo name.
|
||||
--if it starts with /, treat as mirrorlist (Include); otherwise use Server =
|
||||
--if it has mirror_SIG-X, set signature options e.g. _SIG-N would be "SigLevel = Never"
|
||||
-iPXE background support. sed -rf "${BASEDIR}/src/ipxe_local/script.sed" ${SRCDIR}/ipxe/src/config/general.h ; sed -rf "${BASEDIR}/src/ipxe_local/script2.sed" ${SRCDIR}/ipxe/src/config/console.h
|
||||
--note that iPXE VESAFB console is not (yet) supported in EFI, so this is on hold.
|
||||
|
||||
## Split into Separate Tools CD ##
|
||||
|
||||
-include WinMTR, build Mac OS X MTR for dist/tools on CD
|
||||
-include pre-compiled LibreCrypt for opening LUKS parts on Windows (https://github.com/t-d-k/LibreCrypt)
|
||||
--curl -s https://raw.githubusercontent.com/t-d-k/LibreCrypt/master/README.md | egrep 'InstallLibreCrypt_v[A-Za-z0-9\.]*.exe' | cut -f2 -d'"'
|
||||
|
||||
|
||||
__________________________________________________________
|
||||
FOOTNOTES:
|
||||
|
||||
|
||||
[0] I'd really like to implement the following in build.conf; like:
|
||||
http://forums.fedoraforum.org/showthread.php?t=275743
|
||||
# The following is the setting for "verbosity". A more accurate way of saying it is how output should be handled.
|
||||
# Note that for it to be properly parsed, it MUST be in the form of a linear array (e.g. VAR=(1 2 3) ).
|
||||
# '| tee -a ${BASEDIR}/logs/${FUNCNAME}.$(date +%s)' means "display output for STDOUT and STDERR, and also log STDOUT to logs/<function name>.EPOCH_TIME"
|
||||
# '2>&1 /dev/null' means "hide STDOUT and STDERR, no logging"
|
||||
# '>> ${BASEDIR}/logs/${FUNCNAME}.$(date +%s) 2>&1' means "log both STDOUT and STDERR to logs/<function name>.EPOCH_TIME, no output"
|
||||
# '>> ${BASEDIR}/logs/${FUNCNAME}.$(date +%s)' means "log STDOUT to logs/<function name>.EPOCH_TIME, display (but don't log) STDERR)"
|
||||
# '' means "no logging; display both STDOUT and STDERR"
|
||||
288
docs/examples/multi_profile.xml
Normal file
288
docs/examples/multi_profile.xml
Normal file
@@ -0,0 +1,288 @@
|
||||
<?xml version='1.0' encoding='UTF-8'?>
|
||||
<bdisk xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://bdisk.square-r00t.net/" xsi:schemaLocation="http://bdisk.square-r00t.net bdisk.xsd">
|
||||
<profile name="default" id="1" uuid="8cdd6bcb-c147-4a63-9779-b5433c510dbc">
|
||||
<meta>
|
||||
<names>
|
||||
<name>BDISK</name>
|
||||
<!--<name>{xpath%../uxname/text()}</name>-->
|
||||
<uxname>bdisk</uxname>
|
||||
<!-- Just like with previous versions of BDisk, you can reference other values...
|
||||
but now with the neat benefits of XPath! Everything you could do in build.ini's and more.
|
||||
See https://www.w3schools.com/xml/xpath_syntax.asp
|
||||
If you need a literal curly brace, double them (e.g. for "{foo}", use "{{foo}}"),
|
||||
UNLESS it's in a <regexes><pattern> as part of the expression. Those are taken as literal strings. -->
|
||||
<pname>{xpath%../name/text()}</pname>
|
||||
</names>
|
||||
<desc>A rescue/restore live environment.</desc>
|
||||
<dev>
|
||||
<author>A. Dev Eloper</author>
|
||||
<email>dev@domain.tld</email>
|
||||
<website>https://domain.tld/~dev</website>
|
||||
</dev>
|
||||
<uri>https://domain.tld/projname</uri>
|
||||
<ver>1.0.0</ver>
|
||||
<!-- This is the VERY FIRST value parsed, and is required. It controls how many levels of {xpath%...} to recurse. -->
|
||||
<!-- If the maximum level is reached, the substitution will evaluate as blank. -->
|
||||
<max_recurse>5</max_recurse>
|
||||
<!-- You need to store regex patterns here and reference them in a special way later, and it's only valid for certain
|
||||
items. See the manual for more information. NO btags within the patterns is allowed. -->
|
||||
<regexes>
|
||||
<pattern id="tarball_x86_64">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-x86_64\.tar\.gz$</pattern>
|
||||
<pattern id="sig_x86_64">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-x86_64\.tar\.gz\.sig$</pattern>
|
||||
<pattern id="tarball_i686">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-i686\.tar\.gz$</pattern>
|
||||
<pattern id="sig_i686">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-i686\.tar\.gz\.sig$</pattern>
|
||||
</regexes>
|
||||
<!-- You can also define variables. NO xpath or regex btags, and they can't be used within other btags! -->
|
||||
<variables>
|
||||
<variable id="bdisk_root">/var/tmp/BDisk</variable>
|
||||
</variables>
|
||||
</meta>
|
||||
<accounts>
|
||||
<!-- Salted/hashed password is "test" -->
|
||||
<rootpass hashed="true">$6$7KfIdtHTcXwVrZAC$LZGNeMNz7v5o/cYuA48FAxtZynpIwO5B1CPGXnOW5kCTVpXVt4SypRqfM.AoKkFt/O7MZZ8ySXJmxpELKmdlF1</rootpass>
|
||||
<user sudo="true">
|
||||
<username>{xpath%//meta/names/uxname/text()}</username>
|
||||
<!-- You can also use substitution from different profiles in this same configuration: -->
|
||||
<!-- <username>{xpath%//profile[@name='another_profile']/meta/names/uxname"}</username> -->
|
||||
<comment>{xpath%//meta/dev/author/text()}</comment>
|
||||
<password hashed="false" hash_algo="sha512" salt="auto">testpassword</password>
|
||||
</user>
|
||||
<user sudo="false">
|
||||
<username>testuser</username>
|
||||
<comment>Test User</comment>
|
||||
<password hashed="false" hash_algo="sha512" salt="auto">anothertestpassword</password>
|
||||
</user>
|
||||
</accounts>
|
||||
<sources>
|
||||
<source arch="x86_64">
|
||||
<mirror>http://archlinux.mirror.domain.tld</mirror>
|
||||
<rootpath>/iso/latest</rootpath>
|
||||
<tarball flags="regex latest">{regex%tarball_x86_64}</tarball>
|
||||
<checksum hash_algo="sha1" explicit="false" flags="latest">sha1sums.txt</checksum>
|
||||
<sig keys="7F2D434B9741E8AC" keyserver="hkp://pool.sks-keyservers.net" flags="regex latest">{regex%sig_x86_64}</sig>
|
||||
</source>
|
||||
<source arch="i686">
|
||||
<mirror>http://archlinux32.mirror.domain.tld</mirror>
|
||||
<rootpath>/iso/latest</rootpath>
|
||||
<tarball flags="regex latest">{regex%tarball_i686}</tarball>
|
||||
<checksum hash_algo="sha512" explicit="true">cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e</checksum>
|
||||
<sig keys="248BF41F9BDD61D41D060AE774EDA3C6B06D0506" keyserver="hkp://pool.sks-keyservers.net" flags="regex latest">{regex%sig_i686}</sig>
|
||||
</source>
|
||||
</sources>
|
||||
<build its_full_of_stars="true">
|
||||
<paths>
|
||||
<base>{variable%bdisk_root}/base</base>
|
||||
<cache>{variable%bdisk_root}/cache</cache>
|
||||
<chroot>{variable%bdisk_root}/chroots</chroot>
|
||||
<overlay>{variable%bdisk_root}/overlay</overlay>
|
||||
<templates>{variable%bdisk_root}/templates</templates>
|
||||
<mount>/mnt/{xpath%//meta/names/uxname/text()}</mount>
|
||||
<distros>{variable%bdisk_root}/distros</distros>
|
||||
<dest>{variable%bdisk_root}/results</dest>
|
||||
<iso>{variable%bdisk_root}/iso_overlay</iso>
|
||||
<http>{variable%bdisk_root}/http</http>
|
||||
<tftp>{variable%bdisk_root}/tftp</tftp>
|
||||
<pki>{variable%bdisk_root}/pki</pki>
|
||||
</paths>
|
||||
<basedistro>archlinux</basedistro>
|
||||
</build>
|
||||
<iso sign="true" multi_arch="true"/>
|
||||
<ipxe sign="true" iso="true">
|
||||
<uri>{xpath%//meta/dev/website/text()}/ipxe</uri>
|
||||
</ipxe>
|
||||
<pki overwrite="false">
|
||||
<!-- http://ipxe.org/crypto -->
|
||||
<ca>
|
||||
<cert hash_algo="sha512">{xpath%../../../build/paths/pki/text()}/ca.crt</cert>
|
||||
<!-- If csr is self-enclosed (<csr />), we'll just generate and use a CSR in-memory.
|
||||
Assuming we need to generate a certificate, anyways.
|
||||
If you want to write it out to disk (for debugging, etc.) OR use one already generated,
|
||||
then provide a path.
|
||||
e.g.:
|
||||
<csr>{xpath%build/paths/ssl/text()}/ca.csr</csr> -->
|
||||
<csr/>
|
||||
<!-- If you use an index file (or want to) to serialize client certificates, specify it here. -->
|
||||
<!-- It must conform to CADB spec (https://pki-tutorial.readthedocs.io/en/latest/cadb.html). -->
|
||||
<!-- You should probably also specify a serial file if so. -->
|
||||
<!-- Both of these are entirely optional if you aren't using an existing PKI. -->
|
||||
<index>{xpath%../../../build/paths/pki/text()}/index.txt</index>
|
||||
<serial>{xpath%../../../build/paths/pki/text()}/serial</serial>
|
||||
<!-- If you specify a cipher, the key will be encrypted to the passphrase provided by the passphrase attribute.
|
||||
If the key is encrypted (either a pre-existing or a created one) but passphrase is not provided, you will
|
||||
be (securely) prompted for the passphrase to unlock it/add a passphrase to it. -->
|
||||
<key cipher="none" passphrase="none" keysize="4096">{xpath%../../../build/paths/pki/text()}/ca.key</key>
|
||||
<subject>
|
||||
<commonName>domain.tld</commonName>
|
||||
<countryName>XX</countryName>
|
||||
<localityName>Some City</localityName>
|
||||
<stateOrProvinceName>Some State</stateOrProvinceName>
|
||||
<organization>Some Org, Inc.</organization>
|
||||
<organizationalUnitName>Department Name</organizationalUnitName>
|
||||
<emailAddress>{xpath%../../../../meta/dev/email/text()}</emailAddress>
|
||||
</subject>
|
||||
</ca>
|
||||
<client>
|
||||
<cert hash_algo="sha512">{xpath%../../../build/paths/pki/text()}/{xpath%../../../meta/names/uxname/text()}.crt</cert>
|
||||
<csr/>
|
||||
<key cipher="none" passphrase="none" keysize="4096">{xpath%//build/paths/pki/text()}/{xpath%../../../meta/names/uxname/text()}.key</key>
|
||||
<subject>
|
||||
<commonName>website.tld</commonName>
|
||||
<countryName>XX</countryName>
|
||||
<localityName>Some City</localityName>
|
||||
<stateOrProvinceName>Some State</stateOrProvinceName>
|
||||
<organization>Some Org, Inc.</organization>
|
||||
<organizationalUnitName>Department Name</organizationalUnitName>
|
||||
<emailAddress>{xpath%../../../../meta/dev/email/text()}</emailAddress>
|
||||
</subject>
|
||||
</client>
|
||||
</pki>
|
||||
<!-- If prompt_passphrase is false and passphrase attribute is not given for a gpg element, we will try to use a
|
||||
blank passphrase for all operations. -->
|
||||
<gpg keyid="none" gnupghome="none" publish="false" prompt_passphrase="false">
|
||||
<!-- The below is only used if we are generating a key (i.e. keyid="none"). -->
|
||||
<key algo="rsa" keysize="4096" expire="0">
|
||||
<name>{xpath%../../../meta/dev/author/text()}</name>
|
||||
<email>{xpath%../../../meta/dev/email/text()}</email>
|
||||
<!-- If present, the subkey element will create a secondary key used *only* for signing. This is good security practice. Obviously, this is only used if we are creating a new (master) key. -->
|
||||
<subkey algo="ed" keysize="25519" expire="0"/>
|
||||
<comment>for {xpath%../../../meta/names/pname/text()} [autogenerated] | {xpath%../../../meta/uri/text()} | {xpath%../../../meta/desc/text()}</comment>
|
||||
</key>
|
||||
</gpg>
|
||||
<sync>
|
||||
<!-- ipxe includes the http directory. or should, anyways. -->
|
||||
<ipxe enabled="true">/srv/http/{xpath%../../meta/names/uxname/text()}</ipxe>
|
||||
<tftp enabled="true">/tftproot/{xpath%../../meta/names/uxname/text()}</tftp>
|
||||
<iso enabled="true">/srv/http/isos/{xpath%../../meta/names/uxname/text()}</iso>
|
||||
<gpg enabled="true" format="asc">/srv/http/{xpath%../../meta/names/uxname/text()}/pubkey.asc</gpg>
|
||||
<rsync enabled="true">
|
||||
<user>root</user>
|
||||
<host>mirror.domain.tld</host>
|
||||
<port>22</port>
|
||||
<pubkey>~/.ssh/id_ed25519</pubkey>
|
||||
</rsync>
|
||||
</sync>
|
||||
</profile>
|
||||
<profile name="alternate" id="2" uuid="2ed07c19-2071-4d66-8569-da40475ba716">
|
||||
<meta>
|
||||
<names>
|
||||
<name>ALTCD</name>
|
||||
<uxname>bdisk_alt</uxname>
|
||||
<pname>{xpath%../name/text()}</pname>
|
||||
</names>
|
||||
<desc>Another rescue/restore live environment.</desc>
|
||||
<dev>
|
||||
<author>Another Dev Eloper</author><!-- You can reference other profiles within the same configuration. -->
|
||||
<email>{xpath%//profile[@name="default"]/meta/dev/email/text()}</email>
|
||||
<website>{xpath%//profile[@name="default"]/meta/dev/website/text()}</website>
|
||||
</dev>
|
||||
<uri>https://domain.tld/projname</uri>
|
||||
<ver>0.0.1</ver>
|
||||
<max_recurse>5</max_recurse>
|
||||
<regexes>
|
||||
<pattern id="tarball_x86_64">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-x86_64\.tar\.gz$</pattern>
|
||||
<pattern id="sig_x86_64">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-x86_64\.tar\.gz\.sig$</pattern>
|
||||
<pattern id="tarball_i686">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-i686\.tar\.gz$</pattern>
|
||||
<pattern id="sig_i686">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-i686\.tar\.gz\.sig$</pattern>
|
||||
</regexes>
|
||||
<variables>
|
||||
<variable id="bdisk_root">/var/tmp/BDisk</variable>
|
||||
</variables>
|
||||
</meta>
|
||||
<accounts>
|
||||
<rootpass hashed="false">atotallyinsecurepassword</rootpass>
|
||||
<user sudo="false">
|
||||
<username>testuser</username>
|
||||
<comment>Test User</comment>
|
||||
<password hashed="false" hash_algo="sha512" salt="auto">atestpassword</password>
|
||||
</user>
|
||||
</accounts>
|
||||
<sources>
|
||||
<source arch="x86_64">
|
||||
<mirror>http://archlinux.mirror.domain.tld</mirror>
|
||||
<rootpath>/iso/latest</rootpath>
|
||||
<tarball flags="regex latest">{regex%tarball_x86_64}</tarball>
|
||||
<checksum hash_algo="sha1" explicit="false" flags="latest">sha1sums.txt</checksum>
|
||||
<sig keys="7F2D434B9741E8AC" keyserver="hkp://pool.sks-keyservers.net" flags="regex latest">{regex%sig_x86_64}</sig>
|
||||
</source>
|
||||
<source arch="i686">
|
||||
<mirror>http://archlinux32.mirror.domain.tld</mirror>
|
||||
<rootpath>/iso/latest</rootpath>
|
||||
<tarball flags="regex latest">{regex%tarball_i686}</tarball>
|
||||
<checksum hash_algo="sha512" explicit="true">cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e</checksum>
|
||||
<sig keys="248BF41F9BDD61D41D060AE774EDA3C6B06D0506" keyserver="hkp://pool.sks-keyservers.net" flags="regex latest">{regex%sig_i686}</sig>
|
||||
</source>
|
||||
</sources>
|
||||
<build its_full_of_stars="true">
|
||||
<paths>
|
||||
<base>{variable%bdisk_root}/base</base>
|
||||
<cache>{variable%bdisk_root}/cache</cache>
|
||||
<chroot>{variable%bdisk_root}/chroots</chroot>
|
||||
<overlay>{variable%bdisk_root}/overlay</overlay>
|
||||
<templates>{variable%bdisk_root}/templates</templates>
|
||||
<mount>/mnt/{xpath%//meta/names/uxname/text()}</mount>
|
||||
<distros>{variable%bdisk_root}/distros</distros>
|
||||
<dest>{variable%bdisk_root}/results</dest>
|
||||
<iso>{variable%bdisk_root}/iso_overlay</iso>
|
||||
<http>{variable%bdisk_root}/http</http>
|
||||
<tftp>{variable%bdisk_root}/tftp</tftp>
|
||||
<pki>{variable%bdisk_root}/pki</pki>
|
||||
</paths>
|
||||
<basedistro>archlinux</basedistro>
|
||||
</build>
|
||||
<iso sign="true" multi_arch="true"/>
|
||||
<ipxe sign="true" iso="true">
|
||||
<uri>{xpath%//meta/dev/website/text()}/ipxe</uri>
|
||||
</ipxe>
|
||||
<pki overwrite="false">
|
||||
<ca>
|
||||
<cert hash_algo="sha512">{xpath%../../../build/paths/pki/text()}/ca.crt</cert>
|
||||
<csr/>
|
||||
<index>{xpath%../../../build/paths/pki/text()}/index.txt</index>
|
||||
<serial>{xpath%../../../build/paths/pki/text()}/serial</serial>
|
||||
<key cipher="none" passphrase="none" keysize="4096">{xpath%../../../build/paths/pki/text()}/ca.key</key>
|
||||
<subject>
|
||||
<commonName>domain.tld</commonName>
|
||||
<countryName>XX</countryName>
|
||||
<localityName>Some City</localityName>
|
||||
<stateOrProvinceName>Some State</stateOrProvinceName>
|
||||
<organization>Some Org, Inc.</organization>
|
||||
<organizationalUnitName>Department Name</organizationalUnitName>
|
||||
<emailAddress>{xpath%../../../../meta/dev/email/text()}</emailAddress>
|
||||
</subject>
|
||||
</ca>
|
||||
<client>
|
||||
<cert hash_algo="sha512">{xpath%../../../build/paths/pki/text()}/{xpath%../../../meta/names/uxname/text()}.crt</cert>
|
||||
<csr/>
|
||||
<key cipher="none" passphrase="none" keysize="4096">{xpath%//build/paths/pki/text()}/{xpath%../../../meta/names/uxname/text()}.key</key>
|
||||
<subject>
|
||||
<commonName>website.tld</commonName>
|
||||
<countryName>XX</countryName>
|
||||
<localityName>Some City</localityName>
|
||||
<stateOrProvinceName>Some State</stateOrProvinceName>
|
||||
<organization>Some Org, Inc.</organization>
|
||||
<organizationalUnitName>Department Name</organizationalUnitName>
|
||||
<emailAddress>{xpath%../../../../meta/dev/email/text()}</emailAddress>
|
||||
</subject>
|
||||
</client>
|
||||
</pki>
|
||||
<gpg keyid="none" gnupghome="none" publish="false" prompt_passphrase="false">
|
||||
<key algo="rsa" keysize="4096" expire="0">
|
||||
<name>{xpath%../../../meta/dev/author/text()}</name>
|
||||
<email>{xpath%../../../meta/dev/email/text()}</email>
|
||||
<comment>for {xpath%../../../meta/names/pname/text()} [autogenerated] | {xpath%../../../meta/uri/text()} | {xpath%../../../meta/desc/text()}</comment>
|
||||
</key>
|
||||
</gpg>
|
||||
<sync>
|
||||
<ipxe enabled="true">/srv/http/{xpath%../../meta/names/uxname/text()}</ipxe>
|
||||
<tftp enabled="true">/tftproot/{xpath%../../meta/names/uxname/text()}</tftp>
|
||||
<iso enabled="true">/srv/http/isos/{xpath%../../meta/names/uxname/text()}</iso>
|
||||
<gpg enabled="true" format="asc">/srv/http/{xpath%../../meta/names/uxname/text()}/pubkey.asc</gpg>
|
||||
<rsync enabled="true">
|
||||
<user>root</user>
|
||||
<host>mirror.domain.tld</host>
|
||||
<port>22</port>
|
||||
<pubkey>~/.ssh/id_ed25519</pubkey>
|
||||
</rsync>
|
||||
</sync>
|
||||
</profile>
|
||||
</bdisk>
|
||||
84
docs/examples/regen_multi.py
Executable file
84
docs/examples/regen_multi.py
Executable file
@@ -0,0 +1,84 @@
|
||||
#!/usr/bin/env python3.6
|
||||
|
||||
import copy
|
||||
from lxml import etree, objectify
|
||||
|
||||
#parser = etree.XMLParser(remove_blank_text = True)
|
||||
parser = etree.XMLParser(remove_blank_text = False)
|
||||
|
||||
# We need to append to a new root because you can't edit nsmap, and you can't
|
||||
# xpath on an element with a naked namespace (e.g. 'xlmns="..."').
|
||||
ns = {None: 'http://bdisk.square-r00t.net/',
|
||||
'xsi': 'http://www.w3.org/2001/XMLSchema-instance'}
|
||||
xsi = {'{http://www.w3.org/2001/XMLSchema-instance}schemaLocation':
|
||||
'http://bdisk.square-r00t.net bdisk.xsd'}
|
||||
new_cfg = etree.Element('bdisk', nsmap = ns, attrib = xsi)
|
||||
new_cfg.text = '\n '
|
||||
|
||||
with open('single_profile.xml', 'rb') as f:
|
||||
xml = etree.fromstring(f.read(), parser)
|
||||
|
||||
|
||||
roottree = xml.getroottree()
|
||||
for elem in roottree.getiterator():
|
||||
if not hasattr(elem.tag, 'find'):
|
||||
continue
|
||||
i = elem.tag.find('}')
|
||||
if i >= 0:
|
||||
elem.tag = elem.tag[i + 1:]
|
||||
objectify.deannotate(roottree, cleanup_namespaces = True)
|
||||
|
||||
|
||||
single_profile = xml.xpath('/bdisk/profile[1]')[0]
|
||||
alt_profile = copy.deepcopy(single_profile)
|
||||
for c in alt_profile.xpath('//comment()'):
|
||||
p = c.getparent()
|
||||
p.remove(c)
|
||||
|
||||
# Change the profile identifiers
|
||||
alt_profile.attrib['name'] = 'alternate'
|
||||
alt_profile.attrib['id'] = '2'
|
||||
alt_profile.attrib['uuid'] = '2ed07c19-2071-4d66-8569-da40475ba716'
|
||||
|
||||
meta_tags = {'name': 'ALTCD',
|
||||
'uxname': 'bdisk_alt',
|
||||
'pname': '{xpath%../name/text()}',
|
||||
'desc': 'Another rescue/restore live environment.',
|
||||
'author': 'Another Dev Eloper',
|
||||
'email': '{xpath%//profile[@name="default"]/meta/dev/email/text()}',
|
||||
'website': '{xpath%//profile[@name="default"]/meta/dev/website/text()}',
|
||||
'ver': '0.0.1'}
|
||||
# Change the names
|
||||
meta = alt_profile.xpath('/profile/meta')[0]
|
||||
for e in meta.iter():
|
||||
if e.tag in meta_tags:
|
||||
e.text = meta_tags[e.tag]
|
||||
|
||||
accounts_tags = {'rootpass': 'atotallyinsecurepassword',
|
||||
'username': 'testuser',
|
||||
'comment': 'Test User',
|
||||
'password': 'atestpassword'}
|
||||
accounts = alt_profile.xpath('/profile/accounts')[0]
|
||||
for e in accounts.iter():
|
||||
if e.tag in accounts_tags:
|
||||
e.text = accounts_tags[e.tag]
|
||||
if e.tag == 'rootpass':
|
||||
e.attrib['hashed'] = 'false'
|
||||
elif e.tag == 'user':
|
||||
e.attrib['sudo'] = 'false'
|
||||
# Delete the second user
|
||||
accounts.remove(accounts[2])
|
||||
author = alt_profile.xpath('/profile/meta/dev/author')[0]
|
||||
author.addnext(etree.Comment(
|
||||
' You can reference other profiles within the same configuration. '))
|
||||
#xml.append(alt_profile)
|
||||
|
||||
for child in xml.xpath('/bdisk/profile'):
|
||||
new_cfg.append(copy.deepcopy(child))
|
||||
new_cfg.append(alt_profile)
|
||||
|
||||
with open('multi_profile.xml', 'wb') as f:
|
||||
f.write(etree.tostring(new_cfg,
|
||||
pretty_print = True,
|
||||
encoding = 'UTF-8',
|
||||
xml_declaration = True))
|
||||
55
docs/examples/simple_profile.xml
Normal file
55
docs/examples/simple_profile.xml
Normal file
@@ -0,0 +1,55 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<bdisk>
|
||||
<profile name="simple" id="1" uuid="7b9128d2-0ba5-4302-9b3c-9951401853e5">
|
||||
<meta>
|
||||
<names>
|
||||
<name>BDISK</name>
|
||||
<uxname>BDisk</uxname>
|
||||
<pname>BDisk</pname>
|
||||
</names>
|
||||
<desc>A rescue/restore live environment.</desc>
|
||||
<dev>
|
||||
<author>A. Dev Eloper</author>
|
||||
<email>dev@domain.tld</email>
|
||||
<website>https://domain.tld/~dev</website>
|
||||
</dev>
|
||||
<uri>https://domain.tld/projname</uri>
|
||||
<ver>1.0.0</ver>
|
||||
<max_recurse>5</max_recurse>
|
||||
<regexes/>
|
||||
<variables/>
|
||||
</meta>
|
||||
<accounts>
|
||||
<rootpass hashed="no" hash_algo="sha512" salt="auto">testpassword</rootpass>
|
||||
</accounts>
|
||||
<sources>
|
||||
<source arch="x86_64">
|
||||
<mirror>http://archlinux.mirror.domain.tld</mirror>
|
||||
<rootpath>/iso/latest</rootpath>
|
||||
<tarball>archlinux-bootstrap-2018.05.01-x86_64.tar.gz</tarball>
|
||||
<checksum/>
|
||||
<sig/>
|
||||
</source>
|
||||
</sources>
|
||||
<build its_full_of_stars="no">
|
||||
<paths>
|
||||
<base>/tmp/bdisk/base</base>
|
||||
<cache>/tmp/bdisk/cache</cache>
|
||||
<chroot>/tmp/bdisk/chroots</chroot>
|
||||
<overlay>/tmp/bdisk/overlay</overlay>
|
||||
<templates>/tmp/bdisk/templates</templates>
|
||||
<mount>/mnt/bdisk</mount>
|
||||
<distros>/tmp/bdisk/distros</distros>
|
||||
<dest>/tmp/bdisk/results</dest>
|
||||
<iso>/tmp/bdisk/iso_overlay</iso>
|
||||
<http>/tmp/bdisk/http</http>
|
||||
<tftp>/tmp/bdisk/tftp</tftp>
|
||||
<pki>/tmp/bdisk/pki</pki>
|
||||
</paths>
|
||||
<basedistro>archlinux</basedistro>
|
||||
</build>
|
||||
<iso sign="no" multi_arch="no" />
|
||||
<gpg/>
|
||||
<sync/>
|
||||
</profile>
|
||||
</bdisk>
|
||||
188
docs/examples/single_profile.xml
Normal file
188
docs/examples/single_profile.xml
Normal file
@@ -0,0 +1,188 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<bdisk xmlns="http://bdisk.square-r00t.net/"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://bdisk.square-r00t.net bdisk.xsd">
|
||||
<profile name="default" id="1" uuid="8cdd6bcb-c147-4a63-9779-b5433c510dbc">
|
||||
<meta>
|
||||
<names>
|
||||
<name>BDISK</name>
|
||||
<!--<name>{xpath%../uxname/text()}</name>-->
|
||||
<uxname>bdisk</uxname>
|
||||
<!-- Just like with previous versions of BDisk, you can reference other values...
|
||||
but now with the neat benefits of XPath! Everything you could do in build.ini's and more.
|
||||
See https://www.w3schools.com/xml/xpath_syntax.asp
|
||||
If you need a literal curly brace, double them (e.g. for "{foo}", use "{{foo}}"),
|
||||
UNLESS it's in a <regexes><pattern> as part of the expression. Those are taken as literal strings. -->
|
||||
<pname>{xpath%../name/text()}</pname>
|
||||
</names>
|
||||
<desc>A rescue/restore live environment.</desc>
|
||||
<dev>
|
||||
<author>A. Dev Eloper</author>
|
||||
<email>dev@domain.tld</email>
|
||||
<website>https://domain.tld/~dev</website>
|
||||
</dev>
|
||||
<uri>https://domain.tld/projname</uri>
|
||||
<ver>1.0.0</ver>
|
||||
<!-- This is the VERY FIRST value parsed, and is required. It controls how many levels of {xpath%...} to recurse. -->
|
||||
<!-- If the maximum level is reached, the substitution will evaluate as blank. -->
|
||||
<max_recurse>5</max_recurse>
|
||||
<!-- You need to store regex patterns here and reference them in a special way later, and it's only valid for certain
|
||||
items. See the manual for more information. NO btags within the patterns is allowed. -->
|
||||
<regexes>
|
||||
<pattern id="tarball_x86_64">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-x86_64\.tar\.gz$</pattern>
|
||||
<pattern id="sig_x86_64">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-x86_64\.tar\.gz\.sig$
|
||||
</pattern>
|
||||
<pattern id="tarball_i686">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-i686\.tar\.gz$</pattern>
|
||||
<pattern id="sig_i686">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-i686\.tar\.gz\.sig$</pattern>
|
||||
</regexes>
|
||||
<!-- You can also define variables. NO xpath or regex btags, and they can't be used within other btags! -->
|
||||
<variables>
|
||||
<variable id="bdisk_root">/var/tmp/BDisk</variable>
|
||||
</variables>
|
||||
</meta>
|
||||
<accounts>
|
||||
<!-- Salted/hashed password is "test" -->
|
||||
<rootpass hashed="true">$6$7KfIdtHTcXwVrZAC$LZGNeMNz7v5o/cYuA48FAxtZynpIwO5B1CPGXnOW5kCTVpXVt4SypRqfM.AoKkFt/O7MZZ8ySXJmxpELKmdlF1</rootpass>
|
||||
<user sudo="true">
|
||||
<username>{xpath%../../../meta/names/uxname/text()}</username>
|
||||
<!-- You can also use substitution from different profiles in this same configuration: -->
|
||||
<!-- <username>{xpath%//profile[@name='another_profile']/meta/names/uxname"}</username> -->
|
||||
<comment>{xpath%../../../meta/dev/author/text()}</comment>
|
||||
<password hashed="false"
|
||||
hash_algo="sha512"
|
||||
salt="auto">testpassword</password>
|
||||
</user>
|
||||
<user sudo="false">
|
||||
<username>testuser</username>
|
||||
<comment>Test User</comment>
|
||||
<password hashed="false"
|
||||
hash_algo="sha512"
|
||||
salt="auto">anothertestpassword</password>
|
||||
</user>
|
||||
</accounts>
|
||||
<sources>
|
||||
<source arch="x86_64">
|
||||
<mirror>http://archlinux.mirror.domain.tld</mirror>
|
||||
<rootpath>/iso/latest</rootpath>
|
||||
<tarball flags="regex latest">{regex%tarball_x86_64}</tarball>
|
||||
<checksum hash_algo="sha1"
|
||||
explicit="false"
|
||||
flags="latest">sha1sums.txt</checksum>
|
||||
<sig keys="7F2D434B9741E8AC"
|
||||
keyserver="hkp://pool.sks-keyservers.net"
|
||||
flags="regex latest">{regex%sig_x86_64}</sig>
|
||||
</source>
|
||||
<source arch="i686">
|
||||
<mirror>http://archlinux32.mirror.domain.tld</mirror>
|
||||
<rootpath>/iso/latest</rootpath>
|
||||
<tarball flags="regex latest">{regex%tarball_i686}</tarball>
|
||||
<checksum hash_algo="sha512"
|
||||
explicit="true">cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e </checksum>
|
||||
<sig keys="248BF41F9BDD61D41D060AE774EDA3C6B06D0506"
|
||||
keyserver="hkp://pool.sks-keyservers.net"
|
||||
flags="regex latest">{regex%sig_i686}</sig>
|
||||
</source>
|
||||
</sources>
|
||||
<packages>
|
||||
<package repo="core">openssh</package>
|
||||
</packages>
|
||||
<build its_full_of_stars="true">
|
||||
<paths>
|
||||
<base>{variable%bdisk_root}/base</base>
|
||||
<cache>{variable%bdisk_root}/cache</cache>
|
||||
<chroot>{variable%bdisk_root}/chroots</chroot>
|
||||
<overlay>{variable%bdisk_root}/overlay</overlay>
|
||||
<templates>{variable%bdisk_root}/templates</templates>
|
||||
<mount>/mnt/{xpath%//meta/names/uxname/text()}</mount>
|
||||
<distros>{variable%bdisk_root}/distros</distros>
|
||||
<dest>{variable%bdisk_root}/results</dest>
|
||||
<iso>{variable%bdisk_root}/iso_overlay</iso>
|
||||
<http>{variable%bdisk_root}/http</http>
|
||||
<tftp>{variable%bdisk_root}/tftp</tftp>
|
||||
<pki>{variable%bdisk_root}/pki</pki>
|
||||
</paths>
|
||||
<basedistro>archlinux</basedistro>
|
||||
</build>
|
||||
<iso sign="true" multi_arch="true"/>
|
||||
<ipxe sign="true" iso="true">
|
||||
<uri>{xpath%//meta/dev/website/text()}/ipxe</uri>
|
||||
</ipxe>
|
||||
<pki overwrite="false">
|
||||
<!-- http://ipxe.org/crypto -->
|
||||
<ca>
|
||||
<cert hash_algo="sha512">{xpath%../../../build/paths/pki/text()}/ca.crt</cert>
|
||||
<!-- If csr is self-enclosed (<csr />), we'll just generate and use a CSR in-memory.
|
||||
Assuming we need to generate a certificate, anyways.
|
||||
If you want to write it out to disk (for debugging, etc.) OR use one already generated,
|
||||
then provide a path.
|
||||
e.g.:
|
||||
<csr>{xpath%build/paths/ssl/text()}/ca.csr</csr> -->
|
||||
<csr/>
|
||||
<!-- If you use an index file (or want to) to serialize client certificates, specify it here. -->
|
||||
<!-- It must conform to CADB spec (https://pki-tutorial.readthedocs.io/en/latest/cadb.html). -->
|
||||
<!-- You should probably also specify a serial file if so. -->
|
||||
<!-- Both of these are entirely optional if you aren't using an existing PKI. -->
|
||||
<index>{xpath%../../../build/paths/pki/text()}/index.txt</index>
|
||||
<serial>{xpath%../../../build/paths/pki/text()}/serial</serial>
|
||||
<!-- If you specify a cipher, the key will be encrypted to the passphrase provided by the passphrase attribute.
|
||||
If the key is encrypted (either a pre-existing or a created one) but passphrase is not provided, you will
|
||||
be (securely) prompted for the passphrase to unlock it/add a passphrase to it. -->
|
||||
<key cipher="none"
|
||||
passphrase="none"
|
||||
keysize="4096">{xpath%../../../build/paths/pki/text()}/ca.key</key>
|
||||
<subject>
|
||||
<commonName>domain.tld</commonName>
|
||||
<countryName>XX</countryName>
|
||||
<localityName>Some City</localityName>
|
||||
<stateOrProvinceName>Some State</stateOrProvinceName>
|
||||
<organization>Some Org, Inc.</organization>
|
||||
<organizationalUnitName>Department Name</organizationalUnitName>
|
||||
<emailAddress>{xpath%../../../../meta/dev/email/text()}</emailAddress>
|
||||
</subject>
|
||||
</ca>
|
||||
<client>
|
||||
<cert hash_algo="sha512">{xpath%../../../build/paths/pki/text()}/{xpath%../../../meta/names/uxname/text()}.crt</cert>
|
||||
<csr/>
|
||||
<key cipher="none"
|
||||
passphrase="none"
|
||||
keysize="4096">{xpath%//build/paths/pki/text()}/{xpath%../../../meta/names/uxname/text()}.key</key>
|
||||
<subject>
|
||||
<commonName>website.tld</commonName>
|
||||
<countryName>XX</countryName>
|
||||
<localityName>Some City</localityName>
|
||||
<stateOrProvinceName>Some State</stateOrProvinceName>
|
||||
<organization>Some Org, Inc.</organization>
|
||||
<organizationalUnitName>Department Name</organizationalUnitName>
|
||||
<emailAddress>{xpath%../../../../meta/dev/email/text()}</emailAddress>
|
||||
</subject>
|
||||
</client>
|
||||
</pki>
|
||||
<!-- If prompt_passphrase is "false" and passphrase attribute is not given for a gpg element, we will try to use a
|
||||
blank passphrase for all operations. -->
|
||||
<gpg keyid="none"
|
||||
gnupghome="none"
|
||||
publish="false"
|
||||
prompt_passphrase="false">
|
||||
<!-- The below is only used if we are generating a key (i.e. keyid="none"). -->
|
||||
<key algo="rsa" keysize="4096" expire="0">
|
||||
<name>{xpath%../../../meta/dev/author/text()}</name>
|
||||
<email>{xpath%../../../meta/dev/email/text()}</email>
|
||||
<comment>for {xpath%../../../meta/names/pname/text()} [autogenerated] | {xpath%../../../meta/uri/text()} | {xpath%../../../meta/desc/text()}</comment>
|
||||
</key>
|
||||
</gpg>
|
||||
<sync>
|
||||
<!-- ipxe includes the http directory. or should, anyways. -->
|
||||
<ipxe enabled="true">/srv/http/{xpath%../../meta/names/uxname/text()}</ipxe>
|
||||
<tftp enabled="true">/tftproot/{xpath%../../meta/names/uxname/text()}</tftp>
|
||||
<iso enabled="true">/srv/http/isos/{xpath%../../meta/names/uxname/text()}</iso>
|
||||
<gpg enabled="true"
|
||||
format="asc">/srv/http/{xpath%../../meta/names/uxname/text()}/pubkey.asc</gpg>
|
||||
<rsync enabled="true">
|
||||
<user>root</user>
|
||||
<host>mirror.domain.tld</host>
|
||||
<port>22</port>
|
||||
<pubkey>~/.ssh/id_ed25519</pubkey>
|
||||
</rsync>
|
||||
</sync>
|
||||
</profile>
|
||||
</bdisk>
|
||||
6
docs/manual/BODY.adoc
Normal file
6
docs/manual/BODY.adoc
Normal file
@@ -0,0 +1,6 @@
|
||||
include::USER.adoc[]
|
||||
include::DEV.adoc[]
|
||||
include::BOOT.adoc[]
|
||||
include::FURTHER.adoc[]
|
||||
include::FAQ.adoc[]
|
||||
//include::FOOT.adoc[]
|
||||
5
docs/manual/BOOT.adoc
Normal file
5
docs/manual/BOOT.adoc
Normal file
@@ -0,0 +1,5 @@
|
||||
= Netboot
|
||||
|
||||
It's possible to netboot my personal build of BDisk (or any environment built with BDisk, but this serves as an example for your own setup). I mostly keep this up for emergencies in case I need it, but it's good to show you that yes, you can boot a 2GB+ squashed and compressed filesystem from a <50MB image file.
|
||||
|
||||
include::netboot/HOWTO.adoc[]
|
||||
5
docs/manual/DEV.adoc
Normal file
5
docs/manual/DEV.adoc
Normal file
@@ -0,0 +1,5 @@
|
||||
= Developer Manual
|
||||
|
||||
BDisk can be sourced for other projects, as it's written in a modular manner. Version 4.x aims to be installable as a standard Python module. This developer manual intends to provide information you may need to change parts of BDisk (or change how it behaves) -- it *is* opensource, after all!
|
||||
|
||||
include::dev/FUNCTIONS.adoc[]
|
||||
5
docs/manual/FAQ.adoc
Normal file
5
docs/manual/FAQ.adoc
Normal file
@@ -0,0 +1,5 @@
|
||||
= FAQ
|
||||
|
||||
Here you will find some answers to Frequently Asked Questions I've received about this project. Please be sure to check this list before <<FURTHER.adoc#_bug_reports_feature_requests, opening a bug report>> or sending a patch!
|
||||
|
||||
include::faq/INDEX.adoc[]
|
||||
8
docs/manual/FOOT.adoc
Normal file
8
docs/manual/FOOT.adoc
Normal file
@@ -0,0 +1,8 @@
|
||||
//[appendix]
|
||||
//= User Manual
|
||||
//[appendix]
|
||||
//= Developer Manual
|
||||
//[appendix]
|
||||
//= Netboot
|
||||
//[appendix]
|
||||
//= Bug Reports/Feature Requests
|
||||
8
docs/manual/FURTHER.adoc
Normal file
8
docs/manual/FURTHER.adoc
Normal file
@@ -0,0 +1,8 @@
|
||||
= Further Reading/Resources
|
||||
|
||||
Here you will find further info, other resources, and such relating to BDisk.
|
||||
|
||||
include::further/PASSWORDS.adoc[]
|
||||
include::further/BUGS.adoc[]
|
||||
include::further/CONTACT.adoc[]
|
||||
|
||||
51
docs/manual/HEAD.adoc
Normal file
51
docs/manual/HEAD.adoc
Normal file
@@ -0,0 +1,51 @@
|
||||
= BDisk User and Developer Manual
|
||||
Brent Saner <bts@square-r00t.net>
|
||||
v2.0, 2018-05
|
||||
:doctype: book
|
||||
:data-uri:
|
||||
:imagesdir: images
|
||||
:sectlinks:
|
||||
:toc: preamble
|
||||
:toc2: left
|
||||
:idprefix:
|
||||
:sectnums:
|
||||
:toclevels: 2
|
||||
// So there's currently a "bug" in that the TOC will display with continued numbering across parts.
|
||||
// I essentially want the opposite of https://github.com/asciidoctor/asciidoctor/issues/979 TODO
|
||||
|
||||
[dedication]
|
||||
= Thanks
|
||||
See CREDITS in the project source for a list of thanks.
|
||||
|
||||
|
||||
[preface]
|
||||
= Preface
|
||||
=== About the Author
|
||||
I am a GNU/Linux Systems/Network Administrator/Engineer- I wear a lot of hats. I have a lot of side projects to keep me busy when I’m not working at _${dayjob}_, mostly to assist in other side projects and become more efficient and proficient at those tasks. “Shaving the yak,” footnote:[See http://catb.org/jargon/html/Y/yak-shaving.html] indeed.
|
||||
|
||||
A lot of research went into how low-level boot operations take place when writing BDisk, both in BIOS and UEFI footnote:[*Unified Extensible Firmware Interface.* UEFI is not BIOS, and BIOS is not UEFI.] (and corresponding concepts such as Secureboot, etc.) which is no easy task to understand and very commonly misunderstood. (For instance, a common misconception is that UEFI necessarily implies Secureboot. This is quite far from the truth and UEFI by itself is quite a useful replacement for BIOS). I invite you to do research into the specifications yourself; it's rather fascinating.
|
||||
|
||||
|
||||
=== What is BDisk?
|
||||
BDisk refers to both a live distribution I use in my own uses (for rescue situations, recovery, etc.) but foremost and most importantly, it also refers to the tool I use for building that distribution. In other words, it's both a complete GNU/Linux distribution you can run entirely from USB/CD/DVD/etc. (without needing to install it to your hard drive)... and also the name of a tool to create a custom GNU/Linux install. The latter is what this project and documentation refer to when the word “BDisk” is used.
|
||||
|
||||
This documentation was started when I rewrote BDisk in Python 3.x; versions 0.x-2.x of BDisk were written in Bash, and horribly inelegant and rigid. It was a valiant effort, and *mostly* worked. Until it stopped working. To my knowledge, it is (or was) in use by https://ninjaos.org[NinjaOS^] as well as a project for education purposes in Indonesia, though I imagine it's in use other places as well. Ideally it should help those wishing to offer specialized GNU/Linux live media or install CDs.
|
||||
|
||||
Version 4.x is an entire rewrite to be much more modular and implement a much more flexible structure based on feature requests that have accumulated over time. footnote:[I should take the time to note that I am still quite new to Python so expect there to be plenty of optimizations to be made and general WTF-ery from seasoned Python developers. If you encounter any bugs or improvements, please <<FURTHER.adoc#_bug_reports_feature_requests,report them>>! It'd be much appreciated.]
|
||||
|
||||
One of my main goals was to make BDisk as easy to use as possible. This is surprisingly hard to do- it’s quite challenging to try to approach software you’ve written with the mindset of someone other than you.
|
||||
|
||||
It’s my hope that by releasing this utility (and documenting it), you can use it and save some time for yourself as well (and hopefully get the chance to learn a bit more in the process!).
|
||||
|
||||
It of course is not the <<i_don_t_like_bdisk_are_there_any_other_alternatives,only live media creator>> out there, but most others only focus on remastering an existing ISO, or creating an installer ISO -- not creating a custom live-centric environment.
|
||||
|
||||
=== Copyright/Licensing
|
||||
The BDisk code is https://www.gnu.org/licenses/gpl-3.0.en.html[GPLv3-licensed^]. This means that you can use it for business reasons, personal reasons, modify it, etc. Please be sure to familiarize yourself with the full set of terms. You can find the full license in `docs/LICENSE`.
|
||||
|
||||
image::https://www.gnu.org/graphics/gplv3-127x51.png[GPLv3,align="center"]
|
||||
|
||||
This document, and all other associated author-generated documentation, are released under the http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons CC-BY-SA 4.0^] copyright. It's essentially the GPL for non-software, so similar terms apply.
|
||||
|
||||
image::https://i.creativecommons.org/l/by-sa/4.0/88x31.png[CC-BY-SA_4.0,align="center"]
|
||||
|
||||
include::BODY.adoc[]
|
||||
13
docs/manual/TODO
Normal file
13
docs/manual/TODO
Normal file
@@ -0,0 +1,13 @@
|
||||
- dev/{FUNCTIONS.adoc,functions/}
|
||||
need to update with new subpackages/functions etc.
|
||||
|
||||
- script macOS tool for imaging to USB?
|
||||
|
||||
- in faq/LONGTIME.adoc, in ==== Configuring the local mirror and ==== Configuring BDisk, mirrorlist should be part of the archlinux plugin - NOT a distributed hardcoded file. (can we then get rid of <paths><base> entirely?)
|
||||
|
||||
- in faq/ISOBIG.adoc and the doc section it references, make sure we reference that the package lists are now in the environment plugin!
|
||||
|
||||
- change all references to build.ini to something like "BDisk configuration file"
|
||||
|
||||
- reminder: users can specify a local file source for <sources><source> items by using "file:///absolute/path/to/file"
|
||||
-- todo: add http auth, ftp, ftps
|
||||
22
docs/manual/USER.adoc
Normal file
22
docs/manual/USER.adoc
Normal file
@@ -0,0 +1,22 @@
|
||||
= User Manual
|
||||
|
||||
BDisk was ultimately designed to make your life easier. "Why would I possibly need yet another LiveCD/LiveUSB?" Well, that's sort of the point- by customizing a live distribution of GNU/Linux to _your_ particular needs/desires/whimsy, you can do away with the multiple other images you keep around. It's designed to let you create a fully customized distribution/live environment.
|
||||
|
||||
Using BDisk, you can:
|
||||
|
||||
* Install GNU/Linux (https://wiki.archlinux.org/index.php/installation_guide[Arch^], https://watchmysys.com/blog/2015/02/installing-centos-7-with-a-chroot/[CentOS^], https://www.debian.org/releases/stable/amd64/apds03.html.en[Debian^], https://wiki.gentoo.org/wiki/Handbook:AMD64#Installing_Gentoo[Gentoo^], https://help.ubuntu.com/lts/installation-guide/powerpc/apds04.html[Ubuntu^]...). BDisk's flagship and guaranteed guest distro may be Arch-based, but many if not most other distros offer ways to install from any GNU/Linux live distribution. Plus, with the 4.x rewrite, it is possible to add support for any modern GNU/Linux guest distro.
|
||||
** This means one could easily create an http://aif.square-r00t.net/[automated Arch install ISO^], or Gentoo installer, etc.
|
||||
* Perform disk maintenance (https://raid.wiki.kernel.org/index.php/RAID_setup[mdadm^], fdisk / http://www.rodsbooks.com/gdisk/[gdisk^], http://gparted.org/[gparted^], https://www.thomas-krenn.com/en/wiki/StorCLI[storcli^], etc.). Need to replace that disk in your RAID and you don't have hotswap? Not a problem!
|
||||
* Rescue, recover, wipe (http://www.sleuthkit.org/sleuthkit/[scalpel^], http://www.andybev.com/index.php/Nwipe[nwipe^], http://foremost.sourceforge.net/[foremost^], etc.). Chances are this is why you booted a live distro in the first place, yes?
|
||||
* Boot over the Internet (or LAN). Burning a new image to CD/DVD/USB is a pain. BDisk has built-in support for http://ipxe.org/[iPXE^] (and traditional PXE setups). Update the filesystem image once, deploy it everywhere.
|
||||
* And much, much more.
|
||||
** Seriously.
|
||||
|
||||
This manual will give you the information you need to build your very own live GNU/Linux distribution.
|
||||
|
||||
include::user/GETTING_STARTED.adoc[]
|
||||
include::user/IMPORTANT_CONCEPTS.adoc[]
|
||||
include::user/PROJECT_LAYOUT.adoc[]
|
||||
include::user/BUILDINI.adoc[]
|
||||
include::user/ADVANCED.adoc[]
|
||||
include::user/BUILDING.adoc[]
|
||||
24
docs/manual/dev/FUNCTIONS.adoc
Normal file
24
docs/manual/dev/FUNCTIONS.adoc
Normal file
@@ -0,0 +1,24 @@
|
||||
== Layout of BDisk functions
|
||||
These functions exist in <<_bdisk_,`bdisk/`>>.
|
||||
|
||||
include::functions/BCHROOT.adoc[]
|
||||
|
||||
=== `bdisk.py`
|
||||
This file is a sort of "wrapper" -- it pulls all the other files in this directory together into a single usable Python script. In other words, to build a BDisk distribution, you would simply run `bdisk/bdisk.py` -- that's it! See <<building_a_bdisk_iso>>.
|
||||
|
||||
It contains no functions, it just contains minimal logic to tie all the other functions together.
|
||||
|
||||
include::functions/BGPG.adoc[]
|
||||
|
||||
include::functions/BSSL.adoc[]
|
||||
|
||||
include::functions/BSYNC.adoc[]
|
||||
|
||||
include::functions/BUILD.adoc[]
|
||||
|
||||
include::functions/HOST.adoc[]
|
||||
|
||||
include::functions/IPXE.adoc[]
|
||||
|
||||
include::functions/PREP.adoc[]
|
||||
|
||||
29
docs/manual/dev/functions/BCHROOT.adoc
Normal file
29
docs/manual/dev/functions/BCHROOT.adoc
Normal file
@@ -0,0 +1,29 @@
|
||||
=== `bchroot.py`
|
||||
This file controls creation of the chroots -- the directories in which BDisk builds the actual system that is booted into.
|
||||
|
||||
==== chroot(_chrootdir_, _chroot_hostname_, _cmd_ = '`/root/pre-build.sh`')
|
||||
This function manages mounting the mountpoints for the chroot(s) in preparation for the images of the live media. It also runs <<changing_the_build_process,the inner chroot preparation script>>. Returns `chrootdir` (same as the paramater provided).
|
||||
|
||||
===== chrootdir
|
||||
The directory where the filesystem tree for the chroot lies. Absolute path only.
|
||||
|
||||
===== chroot_hostname
|
||||
The hostname to use for the guest.
|
||||
|
||||
NOTE: This paramater may be removed in future versions.
|
||||
|
||||
===== cmd
|
||||
The command to run inside the chroot once all the mountpoints are set up.
|
||||
|
||||
==== chrootUnmount(_chrootdir_)
|
||||
Unmount the mounts set up in <<chroot_em_chrootdir_em_em_chroot_hostname_em_em_cmd_em_root_pre_build_sh,chroot()>>.
|
||||
|
||||
===== chrootdir
|
||||
See <<chrootdir>>.
|
||||
|
||||
==== chrootTrim(_build_)
|
||||
This function performs some cleanup and optimizations to the chroot(s).
|
||||
|
||||
===== build
|
||||
A dictionary of <<code_build_code>>'s values (with some additional keys/values added). See <<parseconfig_em_confs_em,parseConfig()>>.
|
||||
|
||||
41
docs/manual/dev/functions/BGPG.adoc
Normal file
41
docs/manual/dev/functions/BGPG.adoc
Normal file
@@ -0,0 +1,41 @@
|
||||
=== `bGPG.py`
|
||||
This contains functions having to do with GPG -- signing files, verifying other signatures, generating a key (if one wasn't specified), using a key (if one was specified), etc.
|
||||
|
||||
==== genGPG(_conf_)
|
||||
This function controls generating (or "importing" an existing) GnuPG key for use with other operations. Returns `gpg`, a <<optional,PyGPGME>> object.
|
||||
|
||||
===== conf
|
||||
A dictionary of the <<the_code_build_ini_code_file,configuration>> (with some additional keys/values added). See (TODO: link to host.py's config parser).
|
||||
|
||||
==== killStaleAgent(_conf_)
|
||||
This function kills off any stale GnuPG agents running. Not doing so can cause some strange behaviour both during the build process and on the host.
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
==== signIMG(_path_, _conf_)
|
||||
This function signs a given file with the keys BDisk was either configured to use or automatically generated.
|
||||
|
||||
===== path
|
||||
The full, absolute path to the file to be signed. An https://www.gnupg.org/gph/en/manual/r1290.html[ASCII-armored^] https://www.gnupg.org/gph/en/manual/x135.html[detached^] signature (plaintext) will be generated at `_path_.asc`, and a binary detached signature will be generated at `_path_.sig`.
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
==== gpgVerify(_sigfile_, _datafile_, _conf_)
|
||||
This function verifies a detatched signature against a file containing data. Returns *True* if the file verifies, or *False* if not.
|
||||
|
||||
===== sigfile
|
||||
The detached signature file. Can be ASCII-armored or binary format. Full/absolute path only.
|
||||
|
||||
===== datafile
|
||||
The file containing the data to be verified. Full/absolute path only.
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
==== delTempKeys(_conf_)
|
||||
Delete automatically-generated keys (if we generated them) as well as the automatically imported verification key (<<code_gpgkey_code>>).
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
64
docs/manual/dev/functions/BSSL.adoc
Normal file
64
docs/manual/dev/functions/BSSL.adoc
Normal file
@@ -0,0 +1,64 @@
|
||||
=== `bSSL.py`
|
||||
Functions having to do with OpenSSL are stored here. This is used primarily for "mini" builds (via iPXE), they let you boot your BDisk distribution over the Internet. If an SSL key, CA certificate, etc. weren't defined and you want to build a mini image, this file contains functions that will build an SSL PKI (public key infrastructure) for you automatically.
|
||||
|
||||
==== verifyCert(_cert_, _key_, _CA_ = None)
|
||||
This function will verify a certificate's validity/pairing with a key, optionally against a given CA certificate. Returns *True* on successful verification, or *False* and an exit (for sanity purposes).
|
||||
|
||||
===== cert
|
||||
The certificate to be validated. Must be a PyOpenSSL certificate object.
|
||||
|
||||
===== key
|
||||
The key to validate against <<cert>>. Must be a PyOpenSSL key object.
|
||||
|
||||
===== CA
|
||||
The CA, or certificate authority, certificate to verify against.
|
||||
|
||||
NOTE: This currently does not work, as PyOpenSSL does not currently support verifying against a specified CA certificate.
|
||||
|
||||
==== sslCAKey(_conf_)
|
||||
This function imports a CA key (<<code_ssl_cakey_code>>) into a PyOpenSSL object (or generates one if necessary). Returns a PyOpenSSL key object.
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
==== sslCA(_conf_, _key_ = None)
|
||||
This function imports a CA certificate (<<code_ssl_ca_code>>) into a PyOpenSSL object (or generates one if necessary). Returns a PyOpenSSL certificate object.
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
===== key
|
||||
A PyOpenSSL key object that should be used to generate the CA certificate (or is paired to the CA certificate if specified).
|
||||
|
||||
==== sslCKey(_conf_)
|
||||
This function imports a client key (<<code_ssl_key_code>>) into a PyOpenSSL object (or generates one if necessary). Returns a PyOpenSSL key object.
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
==== ssslCSR(_conf_, _key_ = None)
|
||||
This function generates a CSR (certificate signing request).
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
===== key
|
||||
A PyOpenSSL key object that should be used to generate the CSR. It should be a key that is paired to the client certificate.
|
||||
|
||||
==== sslSign(_conf_, _ca_, _key_, _csr_)
|
||||
This function signs a CSR using a specified CA.
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
===== ca
|
||||
A PyOpenSSL certificate object for the CA certificate. This certificate (object) should have signing capabilities.
|
||||
|
||||
===== key
|
||||
A PyOpenSSL key object paired to <<ca_2>>.
|
||||
|
||||
===== csr
|
||||
A PyOpenSSL CSR object. See <<ssslcsr_em_conf_em_em_key_em_none,sslCSR()>>.
|
||||
|
||||
==== sslPKI(_conf_)
|
||||
Ties all the above together into one convenient function. Returns a PyOpenSSL certificate object of the signed client certificate.
|
||||
26
docs/manual/dev/functions/BSYNC.adoc
Normal file
26
docs/manual/dev/functions/BSYNC.adoc
Normal file
@@ -0,0 +1,26 @@
|
||||
=== `bsync.py`
|
||||
This file has functions relating to copying your BDisk build to various resources. For instance, if you want your ISO available to download then this file would be used to copy your finished build to an HTTP server/root you specify.
|
||||
|
||||
==== http(_conf_)
|
||||
This function prepares a *local* HTTP directory, or webroot. See <<code_http_code_2>>.
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
==== tftp(_conf_)
|
||||
This function prepares a *local* TFTP directory (for traditional PXE). See <<code_tftp_code_2>>.
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
==== git(_conf_)
|
||||
This function commits (and pushes) any changes you might have made to your project (<<code_basedir_code>>) automatically.
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
==== rsync(_conf_)
|
||||
This function syncs your builds, HTTP directory (if enabled), TFTP directory (if enabled), etc. to a remote host. See <<code_rsync_code_2>>.
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
43
docs/manual/dev/functions/BUILD.adoc
Normal file
43
docs/manual/dev/functions/BUILD.adoc
Normal file
@@ -0,0 +1,43 @@
|
||||
=== `build.py`
|
||||
This is responsible for building the "full" ISO, building UEFI support, etc.
|
||||
|
||||
==== genImg(_conf_)
|
||||
This function builds the http://tldp.org/HOWTO/SquashFS-HOWTO/creatingandusing.html[squashed filesystem^] images and, <<code_gpg_code,if requested>>, signs them.
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
==== genUEFI(_build_, _bdisk_)
|
||||
This function builds UEFI support for the ISO files. Returns the path of an embedded EFI bootable binary/ESP image.
|
||||
|
||||
===== build
|
||||
The <<code_build_code,build section>> of the configuration.
|
||||
|
||||
===== bdisk
|
||||
The <<code_bdisk_code,bdisk section>> of the configuration.
|
||||
|
||||
==== genISO(_conf_)
|
||||
Builds the full ISO image(s). Returns a dictionary of information about the built ISO file (see <<iso>>).
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
==== displayStats(_iso_)
|
||||
Parses the output of e.g. <<geniso_em_conf_em,genISO()>> and displays in a summary useful to the end-user.
|
||||
|
||||
===== iso
|
||||
A dictionary of information about the ISO file. This is typically:
|
||||
|
||||
{'iso':
|
||||
{'name':<'Main' for the full ISO, 'Mini' for the mini ISO, etc.>},
|
||||
{<name>:
|
||||
'sha':<SHA256 sum of ISO file>,
|
||||
'file':<full/absolute path to ISO file>,
|
||||
'size':<size, in "humanized" format (e.g. #GB, #MB, etc.)>,
|
||||
'type':<Full or Mini>,
|
||||
'fmt':<human readable ISO type. e.g. Hybrid for an image that can be burned directly to a disk via dd or burned to optical media>
|
||||
}
|
||||
}
|
||||
|
||||
==== cleanUp()
|
||||
Currently a no-op; this function is reserved for future usage to cleanup the build process automatically.
|
||||
42
docs/manual/dev/functions/HOST.adoc
Normal file
42
docs/manual/dev/functions/HOST.adoc
Normal file
@@ -0,0 +1,42 @@
|
||||
=== `host.py`
|
||||
These functions are used to perform "meta" tasks such as get information about the build host, find <<the_code_build_ini_code_file,the `build.ini` file>>, and parse your configuration options.
|
||||
|
||||
==== getOS()
|
||||
Returns the distribution of the build host.
|
||||
|
||||
==== getBits()
|
||||
Returns the "bitness" of the build host (e.g. `32bit` or `64bit`)
|
||||
|
||||
==== getHostname()
|
||||
Returns the hostname of the build host.
|
||||
|
||||
==== getConfig(_conf_file_ = '/etc/bdisk/build.ini')
|
||||
Returns a list of:
|
||||
|
||||
. the default configuration file
|
||||
. the user-specified configuration file
|
||||
|
||||
===== conf_file
|
||||
This is a full/absolute path that is searched first. If it exists and is a file, it is assumed to be the "canonical" <<the_code_build_ini_code_file,`build.ini` file>>.
|
||||
|
||||
==== parseConfig(_confs_)
|
||||
This function parses the configuration file(s) and returns a list of:
|
||||
|
||||
. A ConfigParser object
|
||||
. The configuration as a dictionary
|
||||
|
||||
It performs some additional things, such as:
|
||||
|
||||
* Converts "boolean" operations to true Python booleans
|
||||
* Tries to automatically detect the version if one isn't provided
|
||||
* Establishes the build number (this is a number that should be local to the build host)
|
||||
* Forms a list of the <<code_multiarch_code,architectures>> to build
|
||||
* Validates:
|
||||
** The bootstrap tarball mirror
|
||||
** The rsync destination (if <<code_rsync_code,enabled>>)
|
||||
** The iPXE remote URI (if <<code_ipxe_code,enabled>>)
|
||||
** That <<code_basedir_code>> is correctly set
|
||||
* Makes prerequisite directories
|
||||
|
||||
===== confs
|
||||
A list of configuration files. See <<getconfig_em_conf_file_em_etc_bdisk_build_ini,getConfig()>>.
|
||||
22
docs/manual/dev/functions/IPXE.adoc
Normal file
22
docs/manual/dev/functions/IPXE.adoc
Normal file
@@ -0,0 +1,22 @@
|
||||
=== `ipxe.py`
|
||||
This file handles building the "mini" ISO via iPXE.
|
||||
|
||||
==== buildIPXE(_conf_)
|
||||
This function builds the iPXE core files.
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
==== genISO(_conf_)
|
||||
This function builds the mini ISO (if <<code_iso_code,enabled>>). Returns a dictionary of information about the built ISO file (see <<iso>>).
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
==== tftpbootEnv(_conf_)
|
||||
This function configures a TFTP boot/root directory for traditional PXE setups.
|
||||
|
||||
NOTE: This function currently is a no-op; it will be implemented in future versions.
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
52
docs/manual/dev/functions/PREP.adoc
Normal file
52
docs/manual/dev/functions/PREP.adoc
Normal file
@@ -0,0 +1,52 @@
|
||||
=== `prep.py`
|
||||
This contains functions that download the base tarball releases, preps them for <<code_bchroot_py_code>>, builds necessary directory structures, and performs the overlay preparations.
|
||||
|
||||
==== dirChk(_conf_)
|
||||
This function creates extra directories if needed.
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
==== downloadTarball(_conf_)
|
||||
This function downloads the tarball (<<code_mirrorfile_code>>) from the <<code_mirror_code>>, and performs verifications (SHA1 and GPG signature <<code_mirrorgpgsig_code,if enabled>>). Returns the full/absolute path to the downloaded tarball.
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
==== unpackTarball(_tarball_path_, _build_, _keep_ = False)
|
||||
This function extracts the tarball downloaded via <<downloadtarball_em_conf_em,downloadTarball()>>.
|
||||
|
||||
===== tarball_path
|
||||
The full/absolute path to the downloaded tarball.
|
||||
|
||||
===== build
|
||||
See <<build>>.
|
||||
|
||||
===== keep
|
||||
`True` or `False`. Whether we should keep the downloaded tarball after unpacking/extracting. If your upstream tarball changes often enough, it's recommended to set this to `False`. However, setting it to `True` can speed up the build process if you're on a slower Internet connection.
|
||||
|
||||
==== buildChroot(_conf_, _keep_ = False)
|
||||
This incorporates <<downloadtarball_em_conf_em,donwloading>> and <<unpacktarball_em_tarball_path_em_em_build_em_em_keep_em_false,extracting>> into one function, as well as applying the <<pre_build_d>> directory (and the <<pre_build_d_2,pre-build.d templates>>).
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
===== keep
|
||||
See <<keep>>.
|
||||
|
||||
==== prepChroot(_conf_)
|
||||
Returns a modified/updated <<build>>. This function:
|
||||
|
||||
. Prepares some variables that <<changing_the_build_process,pre-build.sh>> needs inside the chroot(s)
|
||||
. Builds <<version_info_txt_j2,the VERSION_INFO.txt file>>
|
||||
. Updates the build number
|
||||
. Imports the <<code_mygpgkey_code,signing GPG key>>
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
|
||||
==== postChroot(_conf_)
|
||||
This function applies the <<overlay_2>> directory (and the <<overlay,overlay templates>>).
|
||||
|
||||
===== conf
|
||||
See <<conf>>.
|
||||
83
docs/manual/faq/ALTERNATIVES.adoc
Normal file
83
docs/manual/faq/ALTERNATIVES.adoc
Normal file
@@ -0,0 +1,83 @@
|
||||
== I don't like BDisk. Are there any other alternatives?
|
||||
First, I'm sorry to hear that BDisk doesn't suit your needs. If you want any features you think are missing or encounter any <<FURTHER.adoc#bug_reports_feature_requests, bugs>>, please report them!
|
||||
|
||||
But yes; there are plenty of alternatives! I encourage you to search for yourself, but I've tried to be as impartial as I can for the below.
|
||||
|
||||
NOTE: Only *currently maintained projects* are listed here.
|
||||
|
||||
=== https://wiki.archlinux.org/index.php/archboot[Archboot^]
|
||||
Written in Bash
|
||||
[frame="topbot",options="header,footer"]
|
||||
|======================
|
||||
|Pros|Cons
|
||||
|Highly featureful|Arch-only live media
|
||||
|Includes an assisted Arch install script|Inaccessible to non-Arch users
|
||||
|Can create tarballs too|Not very customizable by default
|
||||
|Supports hybrid ISOs|Infrequent stable releases
|
||||
|Supports PXE-booting infrastructure|Requires a systemd build host
|
||||
|Supports SecureBoot|Not a secure setup by default
|
||||
|Supports GRUB2's "ISO-loopback" mode|Builds a much larger image
|
||||
|Official Arch project|Some graphical bugs
|
||||
||Much more disk space is necessary for the tool itself
|
||||
||*Only* runs in RAM, so not ideal for RAM-constrained systems
|
||||
||Based on/requires an Arch build host
|
||||
||Requires an x86_64 build host
|
||||
||Has a large amount of dependencies
|
||||
||Manual intervention required for build process
|
||||
||Minimal documentation
|
||||
||
|
||||
|======================
|
||||
|
||||
=== https://wiki.archlinux.org/index.php/archiso[Archiso^]
|
||||
Written in Bash.
|
||||
[frame="topbot",options="header,footer"]
|
||||
|======================
|
||||
|Pros|Cons
|
||||
|Used to build the official Arch ISO|Requires an x86_64 build host
|
||||
|Supports custom local on-disk repositories|Not very featureful as far as customization goes
|
||||
|Supports arbitrary file placement in finished image|Requires an Arch build host
|
||||
|Supports hybrid ISOs|Has odd quirks with package selection
|
||||
|Supports Secureboot|Manual intervention required for build process
|
||||
|Official Arch project|Does not start networking by default
|
||||
|Can run in RAM or from media|Very minimal environment
|
||||
||Arch-only live meda
|
||||
||Documentation is lacking
|
||||
||
|
||||
|======================
|
||||
|
||||
=== Debian's https://wiki.debian.org/Simple-CDD[Simple-CDD^]
|
||||
Written in Bash (some Python).
|
||||
[frame="topbot",options="header,footer"]
|
||||
|======================
|
||||
|Pros|Cons
|
||||
|Supports custom packages to be installed|Very limited -- no customization beyond package listing
|
||||
|Lightweight; quick to set up|Takes a long time for preparation; requires a clone of many .deb packages first.
|
||||
||Doesn't seem to work as according to https://wiki.debian.org/Simple-CDD/Howto[the documentation^]
|
||||
||Documentation is sparse
|
||||
||Full featureset unknown due to ISO not building on Debian Jessie (8.0)
|
||||
||
|
||||
|======================
|
||||
|
||||
=== Fedora's https://fedoraproject.org/wiki/Livemedia-creator-_How_to_create_and_use_a_Live_CD[Livemedia-creator^]
|
||||
Written in Bash.
|
||||
[frame="topbot",options="header,footer"]
|
||||
|======================
|
||||
|Pros|Cons
|
||||
|Somewhat customizable|Requires manual initialization of chroot(s) via https://github.com/rpm-software-management/mock/wiki[mock^]
|
||||
|Uses kickstart configurations|*Requires* a kickstart configuration in order to be useful
|
||||
|Simple/easy to use|Full featureset unknown; documentation is sparse
|
||||
||Limited configuration/customization
|
||||
||
|
||||
|======================
|
||||
|
||||
=== https://github.com/rhinstaller/livecd-tools[LiveCD Tools^]
|
||||
Written in Python 2, some Bash.
|
||||
[frame="topbot",options="header,footer"]
|
||||
|======================
|
||||
|Pros|Cons
|
||||
|Can use kickstarts|*Requires* a kickstart configuration
|
||||
|Simple/easy to use to use|Limited configuration/customization
|
||||
|Automatically builds chroots|Full featureset unknown; documentation is sparse
|
||||
||
|
||||
|======================
|
||||
|
||||
3
docs/manual/faq/GETVERSION.adoc
Normal file
3
docs/manual/faq/GETVERSION.adoc
Normal file
@@ -0,0 +1,3 @@
|
||||
== How do I get the version/build of an ISO?
|
||||
This can be found in a multitude of places. The full-size ISO file should have the version right in the filename. If you want more detailed information (or perhaps you renamed the file), you can mount the ISO as loopback in GNU/Linux, *BSD, or Mac OS X/macOS and check `/path/to/mounted/iso/VERSION_INFO.txt`. Lastly, within the runtime itself (especially handy if booting via iPXE), you can check `/root/VERSION_INFO.txt` to get information about the build of the currently running live environment.
|
||||
|
||||
5
docs/manual/faq/INDEX.adoc
Normal file
5
docs/manual/faq/INDEX.adoc
Normal file
@@ -0,0 +1,5 @@
|
||||
include::WHYARCH.adoc[]
|
||||
include::LONGTIME.adoc[]
|
||||
include::ISOBIG.adoc[]
|
||||
include::GETVERSION.adoc[]
|
||||
include::ALTERNATIVES.adoc[]
|
||||
5
docs/manual/faq/ISOBIG.adoc
Normal file
5
docs/manual/faq/ISOBIG.adoc
Normal file
@@ -0,0 +1,5 @@
|
||||
== Why is the ISO so large?
|
||||
This actually entirely depends on what <<changing_the_installed_software,packages you have chosen to install>> (and if you're building a <<code_multiarch_code,multiarch ISO>>). The default list is quite large.
|
||||
|
||||
If you build a minimal ISO (i.e. only the necessary components required for booting and nothing else, single-arch), the ISO is only about 570MB (but work is being done to make this even smaller).
|
||||
|
||||
70
docs/manual/faq/LONGTIME.adoc
Normal file
70
docs/manual/faq/LONGTIME.adoc
Normal file
@@ -0,0 +1,70 @@
|
||||
== Why does building take so long?
|
||||
This typically occurs when you're building from within a LiveCD/LiveUSB situation, in a VM/container/etc., or on a headless server. If this is the case, you may run into what appears to be "stalling", especially while keys are generating for the chroots. Thankfully, there is an easy fix. You can install http://www.issihosts.com/haveged/[haveged^] and run it (this can be done safely while a build is executing). This will show an immediate and non-negligible improvement for the above contexts. If you have extra processing power to throw at the build process (or are using a dedicated build box) as well, I recommend enabling <<code_its_full_of_stars,`its_full_of_stars`>>. BDisk will then be more aggressive with its resource consumption.
|
||||
|
||||
=== Running a local mirror
|
||||
Keep in mind also that the more packages you opt to install, the longer the build process will take. This process will also use quite a fair bit of bandwidth. If you plan on building regular images (e.g. nightly builds, etc.) or are undergoing some custom change testing, I recommend running a private repository mirror on-site. For Arch-based builds, this will not store AUR packages, as those will still be fetched and built (documentation on working around this is TODO) but setting up a local mirror is quite quick and easy. We'll of course use Arch as an example since that's the default guest environment (though I have a https://git.square-r00t.net/OpTools/tree/centos/repoclone[script^] for CentOS as well).
|
||||
|
||||
First, you'll need at least 90Gb of free disk space. Let's say our repository clone will be at `/srv/repo/arch/`.
|
||||
|
||||
You'll also need to find an Arch mirror, ideally one close to you that is up-to-date. The https://www.archlinux.org/mirrorlist/[mirrorlist generator^] and https://www.archlinux.org/mirrors/[mirror list^] will assist you here greatly.
|
||||
|
||||
NOTE: You'll need to find a mirror that supports _rsync_.
|
||||
|
||||
TIP: You can use ANY distro to run a repository mirror, as long as it has _rsync_ installed!
|
||||
|
||||
==== Set up the sync
|
||||
|
||||
I have https://git.square-r00t.net/OpTools/tree/arch/repoclone.py[written a script^] that does the heavy-lifting! https://git.square-r00t.net/OpTools/plain/arch/repoclone.py[Download it^] and mark it as executable (`chmod +x repoclone.py`). Make sure you read the --help option and edit `~/.config/optools/repoclone/arch.ini`.
|
||||
|
||||
Assuming you want to run the sync script every 6 hours, this is the cron entry you would use (`crontab -e`):
|
||||
|
||||
0 */6 * * * /path/to/repoclone.py
|
||||
|
||||
The first sync can take quite a while, but subsequent runs shouldn't take more than five minutes or so (depending on how many updates are available).
|
||||
|
||||
==== Configuring the local mirror
|
||||
You'll need a way to serve this local mirror in a way pacman can understand. Luckily, it's fairly easy. I recommend using https://www.nginx.com/[nginx^] as it's available by default in many operating systems. You can of course use others such as https://www.lighttpd.net/[lighttpd^], https://httpd.apache.org/[apache/httpd^], etc. For the example configuration here, we're going to use an nginx configuration file.
|
||||
|
||||
```
|
||||
server {
|
||||
listen [::]:80;
|
||||
access_log /var/log/nginx/repo.access.log main;
|
||||
error_log /var/log/nginx/repo.error.log;
|
||||
#error_log /var/log/nginx/repo.error.log debug;
|
||||
|
||||
autoindex on;
|
||||
|
||||
root /srv/repo/arch;
|
||||
}
|
||||
```
|
||||
|
||||
The configuration may vary according to your distribution's provided nginx default configuration, but you'll want this configuration to be served as the default (or set an appropriate `https://nginx.org/en/docs/http/server_names.html[server_name]` directive which you would then use in `<profile><build><paths><base>/etc/pacman.d/mirrorlist`).
|
||||
|
||||
==== Configuring BDisk
|
||||
|
||||
You'll then want to configure BDisk's chroots to use your local mirror first. However, if you want to use a LAN resource mirror, when doing so you run into an issue -- in the built image, install operations will take longer than they need to because the local mirror likely won't be available! This is a small issue as it's unexpected that you'll need to install software within the live environment, but I've run into cases where it was a necessity once or twice.
|
||||
|
||||
There is an https://devblog.square-r00t.net/articles/libvirt-spoof-domains-dns-records-redirect-to-another-ip[easy workaround^] if you're using libvirt to build -- you can simply tell your build VM to resolve to the IP address of the box that is running the mirror for the same FQDN that the "preferred" "real" mirror on the Internet and set that mirror at the top of `<profile><build><paths><base>/etc/pacman.d/mirrorlist`. However, that's not always feasible- most notably if you're building on a physical box and it's the same host as the repository clone. In that case you can set the specific local resolution -- e.g. `http://127.0.0.1/` -- at the top of `<profile><build><paths><base>/etc/pacman.d/mirrorlist` and then set a mirrorlist WITHOUT that entry in `<profile><build><paths><overlay>/etc/pacman.d/mirrorlist`. For more information on using these type of overrides, see <<advanced_customization>>.
|
||||
|
||||
If you're using the libvirt workaround, remember to configure nginx (or whatever you're using) with a virtual host and location block that matches the "real", upstream mirror. In our example below, we use *http://arch.mirror.square-r00t.net* as the mirror.
|
||||
|
||||
```
|
||||
server {
|
||||
listen [::]:80;
|
||||
access_log /var/log/nginx/repo.access.log main;
|
||||
error_log /var/log/nginx/repo.error.log;
|
||||
#error_log /var/log/nginx/repo.error.log debug;
|
||||
|
||||
server_name arch.mirror.square-r00t.net;
|
||||
|
||||
autoindex on;
|
||||
|
||||
root /srv/repo/arch;
|
||||
|
||||
location /archlinux {
|
||||
autoindex on;
|
||||
rewrite ^/archlinux(/.*)$ /$1;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
5
docs/manual/faq/WHYARCH.adoc
Normal file
5
docs/manual/faq/WHYARCH.adoc
Normal file
@@ -0,0 +1,5 @@
|
||||
== Why Arch Linux as the Recommended Guest?
|
||||
Because it's a very easy-to-use, simple, https://wiki.archlinux.org/[well-documented^] distro. It's no-frills and incredibly flexible/customizable, and can be made rather slim (and is out of the box, in fact). It's also very friendly to run as a chroot inside any other distro or as a chroot host to any other distro.
|
||||
|
||||
Plus they release monthly tarball snapshots that are fairly small and create quick bootstrap environments.
|
||||
|
||||
18
docs/manual/further/BUGS.adoc
Normal file
18
docs/manual/further/BUGS.adoc
Normal file
@@ -0,0 +1,18 @@
|
||||
== Bug Reports/Feature Requests
|
||||
NOTE: It is possible to submit a bug or feature request without registering in my bugtracker. One of my pet peeves is needing to create an account/register on a bugtracker simply to report a bug! The following links only require an email address to file a bug (which is necessary in case I need any further clarification from you or to keep you updated on the status of the bug/feature request -- so please be sure to use a valid email address).
|
||||
|
||||
=== Bugs
|
||||
If you encounter any bugs in *BDisk*, you can file a bug report https://bugs.square-r00t.net/index.php?do=newtask&project=2&task_type=1[here^].
|
||||
|
||||
If you encounter any bugs (inaccurate information, typos, misformatting, etc.) in *this documentation*, you can file a bug report https://bugs.square-r00t.net/index.php?do=newtask&project=8&task_type=1[here^].
|
||||
|
||||
=== Feature Requests
|
||||
If you have any features you'd like to see or you think would help *BDisk* become even more useful, please file a feature request https://bugs.square-r00t.net/index.php?do=newtask&project=2&task_type=2[here^].
|
||||
|
||||
If you have any suggestions on how to improve *this documentation* or feel it's missing information that could be useful, please file a feature request https://bugs.square-r00t.net/index.php?do=newtask&project=8&task_type=2[here^].
|
||||
|
||||
=== Patches
|
||||
I gladly welcome https://www.gnu.org/software/diffutils/manual/html_node/Unified-Format.html[patches^], but I deplore using GitHub (even though I https://github.com/johnnybubonic/BDisk[have a mirror there^]). For this reason, please follow the same https://www.kernel.org/doc/Documentation/SubmittingPatches[patch/pull request process^] for the Linux kernel and email it to bts@square-r00t.net.
|
||||
|
||||
Alternatively, you may attach a patch to a <<bugs,bug report>>/<<feature_requests,feature request>>.
|
||||
|
||||
10
docs/manual/further/CONTACT.adoc
Normal file
10
docs/manual/further/CONTACT.adoc
Normal file
@@ -0,0 +1,10 @@
|
||||
== Contact the Author
|
||||
If you have any questions, comments, or concerns, you can use the following information to get in touch with me.
|
||||
|
||||
I am available via mailto:bts@square-r00t.net[email]. If you use GPG, you can find my pubkey and other related info https://devblog.square-r00t.net/about/my-gpg-public-key-verification-of-identity[here^] (and on most keyservers).
|
||||
|
||||
I occasionally write howto articles, brief tips, and other information in my https://devblog.square-r00t.net[dev blog].
|
||||
|
||||
I am on IRC as *r00t^2*, and am usually in the irc://irc.freenode.org/#sysadministrivia[Sysadministrivia channel on Freenode]. Which reminds me, I run a podcast called https://sysadministrivia.com[Sysadministrivia^].
|
||||
|
||||
I am on Twitter as https://twitter.com/brentsaner[@brentsaner^], though I don't tweet very often. (I usually tweet from my https://twitter.com/SysAdm_Podcast[podcast's twitter^].)
|
||||
95
docs/manual/further/PASSWORDS.adoc
Normal file
95
docs/manual/further/PASSWORDS.adoc
Normal file
@@ -0,0 +1,95 @@
|
||||
== Passwords
|
||||
NOTE: If you're specifying passwords, be sure to use a https://www.schneier.com/blog/archives/2014/03/choosing_secure_1.html[strong password^]!
|
||||
|
||||
=== `build.ini` Password Value Examples
|
||||
Passwords work a little interestingly in BDisk. These aspects all apply to both <<code_root_password_code,the root password>> and <<code_password_code,the user password>> (if you enable a regular user).
|
||||
|
||||
CAUTION: DO *NOT* USE A PLAINTEXT PASSWORD IN THE `build.ini`! This is _by design_; plaintext passwords are much more insecure. If you use a plaintext password, it *will not work*.
|
||||
|
||||
WARNING: Remember to <<escaping_the_salted_hash,escape your hash>> before placing it in your `build.ini`!
|
||||
|
||||
.Password Value Scheme
|
||||
[frame="topbot",options="header,footer"]
|
||||
|======================
|
||||
|If you have...|BDisk will...
|
||||
|the string `BLANK`|give the user a blank password, allowing you to just hit `<Enter>` to log in
|
||||
|nothing set|lock the account (e.g. no non-SSH login is possible)
|
||||
|a properly hashed, salted, and escaped string|set the account to the password used to generate that hash.
|
||||
||
|
||||
|======================
|
||||
|
||||
.Password Value Examples
|
||||
[frame="topbot",options="header,footer"]
|
||||
|======================
|
||||
|If the value is...|Then BDisk...
|
||||
|`root_password = BLANK`|will let you log into the TTY as the root user by just hitting the `<Enter>` key.
|
||||
|`root_password =`|will not allow the root user to log into the TTY at all.
|
||||
|`root_password = <some salted, hashed, escaped string created from 'test'>`|will let you log into the root user on a TTY with the password `test`.
|
||||
||
|
||||
|======================
|
||||
|
||||
|
||||
NOTE: I specify "TTY login" because SSH login may still be possible. By default, SSH will allow password logins for non-root users (root user SSH password login is prohibited by default; only pubkey login for root is allowed.) -- this can be overridden, however, by customization.
|
||||
|
||||
=== Generating a Password Salt/Hash
|
||||
First, if you are not familiar with a http://man7.org/linux/man-pages/man3/crypt.3.html#NOTES[salted hash^] that GNU/Linux uses, you may want to learn about it.
|
||||
|
||||
That said, there are utilities in `extra/bin/` that should generate a salted hash for you. Currently only `hashgen.py` is distributed, but additions/examples for other languages are welcome.
|
||||
|
||||
....
|
||||
$ ./hashgen.py
|
||||
|
||||
What password would you like to hash/salt?
|
||||
(NOTE: will NOT echo back!)
|
||||
|
||||
|
||||
Your salted hash is:
|
||||
$6$t92Uvm1ETLocDb1D$BvI0Sa6CSXxzIKBinIaJHb1gLJWheoXp7WzdideAJN46aChFu3hKg07QaIJNk4dfIJ2ry3tEfo3FRvstKWasg/
|
||||
|
||||
....
|
||||
|
||||
The password `test` was used above. In `crypt(3)`-salted hashes, there are specific sections separated by USD dollar symbols (`$`). The first section (containing `6`) marks the *hash algorithm* -- in this case, _SHA512_. (The http://man7.org/linux/man-pages/man3/crypt.3.html#NOTES[crypt man page^] mentions all supported hash types and their corresponding ID.) The next section, `t92Uvm1ETLocDb1D`, is the *salt*. The last section is the *hash*. How salted hashes work is an original piece of data is given (in our case, the word `test`). This data is then sent through a one-way cryptographic process that generates a new string that makes it difficult to know what the original data was. THEN a salt is added- a random string- and the process repeats. In our format, this is done _5000_ times in a row. When you log in with your password, the salt is fetched and the same process is done again- predictably, the data that process goes through should then match the salted hash string stored in the password system (in this case, the https://linux.die.net/man/5/shadow[`/etc/shadow`] file).
|
||||
|
||||
There are other ways to generate the salted hash as well. These include:
|
||||
|
||||
==== Debian's `mkpasswd` Utility
|
||||
Part of the https://packages.debian.org/jessie/whois[whois^] package, available in the AUR as https://aur.archlinux.org/packages/debian-whois-mkpasswd/[debian-whois-mkpasswd^].
|
||||
|
||||
mkpasswd --method=sha-512 <password>
|
||||
|
||||
==== Perl
|
||||
The following Perl one-liner will generate a salted hash string (using the salt `aBcDeFgHiJ`):
|
||||
|
||||
perl -e 'print crypt("PASSWORD","\$6\$aBcDeFgHiJ\$") . "\n"'
|
||||
|
||||
==== `grub-crypt`
|
||||
Legacy GRUB ("GRUB v1") includes `grub-crypt`, which will let you generate a salted hash:
|
||||
|
||||
/sbin/grub-crypt --sha-512
|
||||
|
||||
=== Escaping the Salted Hash
|
||||
One last thing, and this is *very* important -- failure to perform this step will cause all sorts of strange Python errors -- is to escape the salted hash. Thankfully, however, this is a lot easier than it sounds.
|
||||
|
||||
So we have our salted hash: `$6$t92Uvm1ETLocDb1D$BvI0Sa6CSXxzIKBinIaJHb1gLJWheoXp7WzdideAJN46aChFu3hKg07QaIJNk4dfIJ2ry3tEfo3FRvstKWasg/`. In order to get it into a usable format, we need to make sure the configuration parsing won't try to read sections of it as variables. To do this, we do something called *escaping*.
|
||||
|
||||
All you need to do is take the salted hash and replace every `$` you see -- there should be exactly three -- with `$$`. That's it! Count them to be sure; you should now have *6* `$` symbols present instead of three. Once you've escaped the salted hash, you're ready to roll.
|
||||
|
||||
=== Cheating/The Easy Way
|
||||
Feeling overwhelmed? There's an easy way to do all of this.
|
||||
|
||||
First, while logged into your local computer, change your password to what you want ether `root_password` or `password` to be:
|
||||
|
||||
passwd
|
||||
|
||||
NOTE: Remember, changing your password won't echo the password back on the screen for security reasons!
|
||||
|
||||
Then get your shadow entry. This has to be done with sudo, as only the root user has access to the hashed passwords on the system. The following command will combine all steps necessary; the string it returns will be a string you can use directly in your `build.ini`.
|
||||
|
||||
sudo grep "^${SUDO_USER}:" /etc/shadow | awk -F':' '{print $2}' | sed -e 's/\$/$$/'
|
||||
|
||||
Don't forget to change your password back to what it was before!
|
||||
|
||||
passwd
|
||||
|
||||
That's it!
|
||||
|
||||
BIN
docs/manual/images/fig1.1.png
Normal file
BIN
docs/manual/images/fig1.1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 270 KiB |
128
docs/manual/netboot/HOWTO.adoc
Normal file
128
docs/manual/netboot/HOWTO.adoc
Normal file
@@ -0,0 +1,128 @@
|
||||
== How to Netboot BDisk
|
||||
I update this server with images and iPXE images you can use to netboot my personal spin of BDisk.
|
||||
|
||||
You can https://bdisk.square-r00t.net/download/bdisk-mini.iso[download] a demo of the iPXE functionality. Note that your computer needs to be connected to a valid Internet connection via ethernet and be able to get a DHCP lease for it to work.
|
||||
|
||||
NOTE: Advanced users, you can https://www.gnupg.org/gph/en/manual/x135.html[verify^] it against the GPG signature (https://bdisk.square-r00t.net/download/bdisk-mini.iso.asc[ASC], https://bdisk.square-r00t.net/download/bdisk-mini.iso.sig[BIN]). Please see https://square-r00t.net/gpg-info[this blog post^] for information on fetching my keys and such. Note that while this project is in flux, I may be signing with temporarily-generated throwaway keys.
|
||||
|
||||
Once downloaded, you can follow the appropriate steps based on your operating system:
|
||||
|
||||
=== Windows
|
||||
==== CD/DVD
|
||||
Simply put a blank CD/DVD-R (or RW, RW+, etc.) in your optical media drive. Find where you downloaded the above file (it should be named `bdisk-mini.iso`). Right-click and select *Burn disc image*.
|
||||
|
||||
==== USB
|
||||
You'll most likely want to https://svwh.dl.sourceforge.net/project/usbwriter/USBWriter-1.3.zip[download] a program called https://sourceforge.net/projects/usbwriter/[USBWriter^]. Unzip it (or just open it via double-clicking) and copy the `USBWriter.exe` program somewhere you'll remember- your desktop, for instance.
|
||||
|
||||
Next, make sure your USB thumbdrive is inserted in your computer and https://support.microsoft.com/en-us/help/17418/windows-7-create-format-hard-disk-partition[formatted/"initialized"^] already.
|
||||
|
||||
WARNING: Formatting a disk/partition will *destroy* any and all data on that device! Make sure there is nothing on your USB drive you want to keep, as formatting BDisk to it *will* delete any data on it.
|
||||
|
||||
Now right-click on the USBWriter icon and select *Run as administrator*. You may get a warning pop up asking for permissions for USBWriter. It's safe to click Yes.
|
||||
|
||||
Select the proper USB flash drive from the *Target device* dropdown menu. If your USB drive isn't showing up, try clicking the Refresh button and looking again. (If it still doesn't show up, you may need to reboot your computer.)
|
||||
|
||||
Click the *Browse...* button and find where you saved `bdisk-mini.iso`. Once you've found it, double-click it. Then click *Write*. It might take a little bit of time depending on how fast your USB interface is, so give it some time. When it finishes, click *Close*. You now have a bootable USB thumbdrive.
|
||||
|
||||
==== Booting
|
||||
Booting differs depending on each and every hardware, but *typically* you should get a message when you first start up for "_Setup_" and/or "_Boot options_" or the like. The terminology differs here. It will probably be an *F__#__* button (usually `F2`, `F4`, `F10`, or `F12`) or the *Delete* key. While rebooting, try to hold or press repeatedly this key and you should come across an option somewhere with a list of devices to boot from or an order you can set. Make sure the USB (or CD/DVD, whichever media type you're using) is set as first, and save.
|
||||
|
||||
=== Mac OS X/macOS
|
||||
==== CD/DVD
|
||||
Unfortunately, the OS X/macOS Disk Utility doesn't work with hybrid ISOs (what `bdisk-mini.iso` is). At all. You're out of luck, I'm afraid, unless you happen have a spare USB thumbdrive handy.
|
||||
|
||||
==== USB
|
||||
We'll need to get a little messy with this one.
|
||||
|
||||
Open Applications => Utilities => Terminal. A black box should pop up.
|
||||
|
||||
Insert your USB thumbdrive now (if you haven't already) and run the following command:
|
||||
|
||||
diskutil list
|
||||
|
||||
You should see an entry, probably near the bottom, that looks something like this:
|
||||
|
||||
(...)
|
||||
/dev/disk42 (external, physical):
|
||||
#: TYPE NAME SIZE IDENTIFIER
|
||||
0: *8.2 GB disk42
|
||||
(...)
|
||||
|
||||
CAUTION: *Be sure* to find the disk that matches the size of your thumbdrive! If you use the wrong disk identifier, it will break your OS X/macOS install at best and delete all your data at worst!
|
||||
|
||||
Now that you've found which disk your USB device is (the `/dev/disk__#__` part), we can continue. Make sure that it is the disk ID *right above* the line that contains your flash drive size! For our example, I will use `/dev/disk__42__` as an example as it's highly unlikely you'll have that many disk IDs, but be sure to replace this in the following commands with the proper disk ID you found above.
|
||||
|
||||
Then we need to unmount the disk, in case it's already mounted.
|
||||
|
||||
diskutil unmountDisk /dev/disk42
|
||||
|
||||
Assuming you saved BDisk Mini to your Desktop, you can do:
|
||||
|
||||
sudo dd if=~/Desktop/bdisk-mini.iso of=/dev/disk42
|
||||
|
||||
NOTE: The above command may prompt you for a password. This is the same password you use to log into your Mac (and unlock the screensaver, etc.). No characters will show up when you type (for security reasons, in case someone is behind you watching your screen) so it may take you a couple tries.
|
||||
|
||||
This will run for a couple seconds. When it finishes, you should see something similar to (but not necessarily the same numbers as) this:
|
||||
|
||||
0+1 records in
|
||||
0+1 records out
|
||||
169 bytes transferred in 0.000530 secs (318865 bytes/sec)
|
||||
|
||||
At this point you _may_ get a popup warning you _"The disk you inserted was not readable by this computer."_ If you do, just click the *Ignore* button.
|
||||
|
||||
One last step. Still in Terminal:
|
||||
|
||||
diskutil eject /dev/disk42
|
||||
|
||||
You can then close Terminal.
|
||||
|
||||
==== Booting
|
||||
The instructions here don't differ too much than from Windows, though it's always the same key. From it being in a shutdown state, power on your Macbook Pro (or whatever it is you have) and hold the *Option* key (or the *Alt* key on non-Apple keyboards). The *Option/Alt* key should bring up a boot menu that will let you select a USB device to boot from.
|
||||
|
||||
Strangely enough, you should still be able to _boot_ a BDisk Mini CD/DVD, you just can't *burn* one. I'm tempted to make a cheap dig at Apple, but I'll refrain.
|
||||
|
||||
=== GNU/Linux
|
||||
==== CD/DVD
|
||||
Easy. Most (if not all) of https://wiki.archlinux.org/index.php/Optical_disc_drive#Burning[these^] should support burning `bdisk-mini.iso` to disc (I'm partial to _cdrecord_). If you prefer a GUI, try some of https://wiki.archlinux.org/index.php/Optical_disc_drive#Burning_CD.2FDVD.2FBD_with_a_GUI[these^] instead (I like _k3b_).
|
||||
|
||||
==== USB
|
||||
Very similar to OS X/macOS in approach. First open a terminal emulator- the ways of navigating to it depends on your window manager/desktop environment, but it's usually under a System or Utilities menu.
|
||||
|
||||
Now we need to find which disk our USB thumbdrive is. Insert your USB thumbdrive now, if you haven't already, and run in the terminal:
|
||||
|
||||
sudo fdisk -l
|
||||
|
||||
You should see a device matching your USB thumbdrive's size. In our example, I use */dev/sdz* as it's unlikely you have that many disks attached to a system, but be sure to replace this in the following commands with the proper disk ID you find.
|
||||
|
||||
(...)
|
||||
Disk /dev/sdz: 7.6 GiB, 8178892800 bytes, 15974400 sectors
|
||||
Units: sectors of 1 * 512 = 512 bytes
|
||||
Sector size (logical/physical): 512 bytes / 512 bytes
|
||||
I/O size (minimum/optimal): 512 bytes / 512 bytes
|
||||
(...)
|
||||
|
||||
CAUTION: *Be sure* to find the disk that matches the size of your thumbdrive! If you use the wrong disk identifier, it will break your GNU/Linux install (or possibly Windows install if you're dual-booting, etc.) at best and delete all your data at worst!
|
||||
|
||||
Make sure it isn't mounted:
|
||||
|
||||
umount /dev/sdz
|
||||
|
||||
You should get a message that says `umount: /dev/sdz: not mounted`. If it was mounted before, it's unmounted now.
|
||||
|
||||
Next, simply dd over the ISO file.
|
||||
|
||||
sudo dd if=~/Desktop/bdisk-mini.iso of=/dev/sdz
|
||||
|
||||
NOTE: The above command may prompt you for a password. This is the same password you use to log in (and unlock the screensaver, etc.). No characters will show up when you type (for security reasons, in case someone is behind you watching your screen) so it may take you a couple tries.
|
||||
|
||||
This will run for a couple seconds. When it finishes, you should see something similar to (but not necessarily the same numbers as) this:
|
||||
|
||||
75776+0 records in
|
||||
75776+0 records out
|
||||
38797312 bytes (39 MB, 37 MiB) copied, 9.01915 s, 4.3 MB/s
|
||||
|
||||
If you get a popup from your desktop environment (assuming you're using one) about not being able to mount a disk, or that it's unformatted, etc. and it prompts you to format, ignore/cancel/close it- do *not* format it! This would erase the BDisk Mini image on it.
|
||||
|
||||
==== Booting
|
||||
Exactly the same as those for Windows. (Unless you're running GNU/Linux on Mac hardware, in which case follow the booting instructions for Mac instead.)
|
||||
|
||||
14
docs/manual/user/ADVANCED.adoc
Normal file
14
docs/manual/user/ADVANCED.adoc
Normal file
@@ -0,0 +1,14 @@
|
||||
== Advanced Customization
|
||||
If the <<the_code_build_ini_code_file,`build.ini`>> file doesn't provide enough customization to your liking, I don't blame you! It was designed only to provide the most basic control and is primarily only used to control the build process itself.
|
||||
|
||||
Luckily, there are a lot of changes you can make. For all of these, you'll want to make a copy of the <<code_basedir_code,`basedir`>> directory somewhere and change the basedir configuration option in the <<the_code_build_ini_code_file,`build.ini`>> file to point to that directory.
|
||||
|
||||
This section isn't going to cover every single use case, as that's mostly an exercise for you -- I can't predict how you want to use BDisk! But we'll cover some common cases you can use and in the process you'll know how to implement your own customizations.
|
||||
|
||||
include::advanced/SSH.adoc[]
|
||||
include::advanced/VPN.adoc[]
|
||||
include::advanced/SOFTWARE.adoc[]
|
||||
include::advanced/BUILDING.adoc[]
|
||||
include::advanced/AUTOLOGIN.adoc[]
|
||||
include::advanced/DESKTOP.adoc[]
|
||||
|
||||
13
docs/manual/user/BUILDING.adoc
Normal file
13
docs/manual/user/BUILDING.adoc
Normal file
@@ -0,0 +1,13 @@
|
||||
== Building a BDisk ISO
|
||||
So you finally have <<the_code_build_ini_code_file,configured>> BDisk (and perhaps added further <<advanced_customization,customizations>>. Now you're ready to build!
|
||||
|
||||
Building is, thankfully, the easiest part!
|
||||
|
||||
NOTE: Due to requiring various mounting and chrooting, BDisk must be run as the *root* user (or via _sudo_).
|
||||
|
||||
To initiate a build, simply run `<basedir>/bdisk/bdisk.py`. That's it! Everything should continue automatically.
|
||||
|
||||
If you're using a packaged version you installed from your distro's package manager, you instead should run wherever it installs to. Most likely this is going to be `/usr/sbin/bdisk`. (On systemd build hosts that have done the https://www.freedesktop.org/wiki/Software/systemd/TheCaseForTheUsrMerge/[/usr merge^], you can use `/usr/sbin/bdisk` or `/sbin/bdisk`.)
|
||||
|
||||
If you encounter any issues during the process, make sure you read the documentation -- if your issue still isn't addressed, please be sure to file a <<bug_reports_feature_requests,bug report>>!
|
||||
|
||||
598
docs/manual/user/BUILDINI.adoc
Normal file
598
docs/manual/user/BUILDINI.adoc
Normal file
@@ -0,0 +1,598 @@
|
||||
== The `build.ini` File
|
||||
This file is where you can specify some of the very basics of BDisk building. It allows you to specify/define certain variables and settings used by the build process. It uses https://docs.python.org/3/library/configparser.html[ConfigParser^] for the parsing engine, and you can do some https://wiki.python.org/moin/ConfigParserExamples[more advanced^] things with it than I demonstrate in the default.
|
||||
|
||||
It's single-level, but divided into "sections". This is unfortunately a limitation of ConfigParser, but it should be easy enough to follow.
|
||||
|
||||
Blank lines are ignored, as well as any lines beginning with `#` and `;`. There are some restrictions and recommendations for some values, so be sure to note them when they occur. Variables referencing other values in the `build.ini` are allowed in the format of `${value}` if it's in the same section; otherwise, `${section:value}` can be used.
|
||||
|
||||
If you want to use your own `build.ini` file (and you should!), the following paths are searched in order. The first one found will be used.
|
||||
|
||||
* `/etc/bdisk/build.ini`
|
||||
* `/usr/share/bdisk/build.ini`
|
||||
* `/usr/share/bdisk/extra/build.ini`
|
||||
* `/usr/share/docs/bdisk/build.ini`
|
||||
* `/usr/local/etc/bdisk/build.ini`
|
||||
* `/usr/local/share/docs/bdisk/build.ini`
|
||||
* `/opt/dev/bdisk/build.ini`
|
||||
* `/opt/dev/bdisk/extra/build.ini`
|
||||
* `/opt/dev/bdisk/extra/dist.build.ini`
|
||||
|
||||
We'll go into more detail for each section below.
|
||||
|
||||
=== Example
|
||||
[bdisk]
|
||||
name = BDISK
|
||||
uxname = bdisk
|
||||
pname = BDisk
|
||||
ver =
|
||||
dev = A Developer
|
||||
email = dev@domain.tld
|
||||
desc = A rescue/restore live environment.
|
||||
uri = https://domain.tld
|
||||
root_password =
|
||||
user = yes
|
||||
[user]
|
||||
username = ${bdisk:uxname}
|
||||
name = Default user
|
||||
password = $$6$$t92Uvm1ETLocDb1D$$BvI0Sa6CSXxzIKBinIaJHb1gLJWheoXp7WzdideAJN46aChFu3hKg07QaIJNk4dfIJ2ry3tEfo3FRvstKWasg/
|
||||
[build]
|
||||
mirror = mirror.us.leaseweb.net
|
||||
mirrorproto = https
|
||||
mirrorpath = /archlinux/iso/latest/
|
||||
mirrorfile =
|
||||
mirrorchksum = ${mirrorpath}sha1sums.txt
|
||||
mirrorgpgsig =
|
||||
gpgkey = 7F2D434B9741E8AC
|
||||
gpgkeyserver =
|
||||
gpg = no
|
||||
dlpath = /var/tmp/${bdisk:uxname}
|
||||
chrootdir = /var/tmp/chroots
|
||||
basedir = /opt/dev/bdisk
|
||||
isodir = ${dlpath}/iso
|
||||
srcdir = ${dlpath}/src
|
||||
prepdir = ${dlpath}/temp
|
||||
archboot = ${prepdir}/${bdisk:name}
|
||||
mountpt = /mnt/${bdisk:uxname}
|
||||
multiarch = yes
|
||||
ipxe = no
|
||||
i_am_a_racecar = no
|
||||
[gpg]
|
||||
mygpgkey =
|
||||
mygpghome =
|
||||
[sync]
|
||||
http = no
|
||||
tftp = no
|
||||
git = no
|
||||
rsync = no
|
||||
[http]
|
||||
path = ${build:dlpath}/http
|
||||
user = http
|
||||
group = http
|
||||
[tftp]
|
||||
path = ${build:dlpath}/tftpboot
|
||||
user = root
|
||||
group = root
|
||||
[ipxe]
|
||||
iso = no
|
||||
uri = https://domain.tld
|
||||
ssldir = ${build:dlpath}/ssl
|
||||
ssl_ca = ${ssldir}/ca.crt
|
||||
ssl_cakey = ${ssldir}/ca.key
|
||||
ssl_crt = ${ssldir}/main.crt
|
||||
ssl_key = ${ssldir}/main.key
|
||||
[rsync]
|
||||
host =
|
||||
user =
|
||||
path =
|
||||
iso = no
|
||||
|
||||
=== `[bdisk]`
|
||||
This section controls some basic branding and information having to do with the end product.
|
||||
|
||||
==== `name`
|
||||
This value is a "basic" name of your project. It's not really shown anywhere end-user visible, but we need a consistent name that follows some highly constrained rules:
|
||||
|
||||
. Alphanumeric only
|
||||
. 8 characters total (or less)
|
||||
. No whitespace
|
||||
. ASCII only
|
||||
. Will be converted to uppercase if it isn't already
|
||||
|
||||
==== `uxname`
|
||||
This value is used for filenames and the like. I highly recommend it be the same as `<<code_name_code,name>>` (in lowercase) but it doesn't need to be. It also has some rules:
|
||||
|
||||
. Alphanumeric only
|
||||
. No whitespace
|
||||
. ASCII only
|
||||
. Will be converted to lowercase if it isn't already
|
||||
|
||||
==== `pname`
|
||||
This string is used for "pretty-printing" of the project name; it should be a more human-readable string.
|
||||
|
||||
. *Can* contain whitespace
|
||||
. *Can* be mixed-case, uppercase, or lowercase
|
||||
. ASCII only
|
||||
|
||||
==== `ver`
|
||||
The version string. If this isn't specified, we'll try to guess based on the current git commit and tags in `<<code_basedir_code,build:basedir>>`.
|
||||
|
||||
. No whitespace
|
||||
|
||||
==== `dev`
|
||||
The name of the developer or publisher of the ISO, be it an individual or organization. For example, if you are using BDisk to build an install CD for your distro, this would be the name of your distro. The same rules as `<<code_pname_code,pname>>` apply.
|
||||
|
||||
. *Can* contain whitespace
|
||||
. *Can* be mixed-case, uppercase, or lowercase
|
||||
. ASCII only
|
||||
|
||||
==== `email`
|
||||
An email address to use for git syncing messages, and/or GPG key generation.
|
||||
|
||||
==== `desc`
|
||||
What this distribution/project is used for.
|
||||
|
||||
. *Can* contain whitespace
|
||||
. *Can* be mixed-case, uppercase, or lowercase
|
||||
. ASCII only
|
||||
|
||||
==== `uri`
|
||||
What is this project's URI (website, etc.)? Alternatively, your personal site, your company's site, etc.
|
||||
|
||||
. Should be a valid URI understood by curl
|
||||
|
||||
|
||||
==== `root_password`
|
||||
The escaped, salted, hashed string to use for the root user.
|
||||
|
||||
Please see <<passwords,the section on passwords>> for information on this value. In the <<example,example above>>, the string `$$6$$t92Uvm1ETLocDb1D$$BvI0Sa6CSXxzIKBinIaJHb1gLJWheoXp7WzdideAJN46aChFu3hKg07QaIJNk4dfIJ2ry3tEfo3FRvstKWasg/` is created from the password `test`. I cannot stress this enough, do not use a plaintext password here nor just use a regular `/etc/shadow` file/`crypt(3)` hash here. Read the section. I promise it's short.
|
||||
|
||||
==== `user`
|
||||
*Default: no*
|
||||
|
||||
This setting specifies if we should create a regular (non-root) user in the live environment. See the section <<code_user_code_2,`[user]`>> for more options.
|
||||
|
||||
NOTE: If enabled, this user has full sudo access.
|
||||
|
||||
[options="header"]
|
||||
|======================
|
||||
2+^|Accepts (case-insensitive) one of:
|
||||
^m|yes ^m|no
|
||||
^m|true ^m|false
|
||||
^m|1 ^m|0
|
||||
|======================
|
||||
|
||||
=== `[user]`
|
||||
This section of `build.ini` controls aspects about `bdisk:user`. It is only used if <<code_user_code,`bdisk:user`>> is enabled.
|
||||
|
||||
==== `username`
|
||||
What username should the user have? Standard *nix username rules apply:
|
||||
|
||||
. ASCII only
|
||||
. 32 characters or less
|
||||
. Alphanumeric only
|
||||
. Lowercase only
|
||||
. No whitespace
|
||||
. Cannot start with a number
|
||||
|
||||
==== `name`
|
||||
What comment/description/real name should be used for the user? For more information on this, see the https://linux.die.net/man/5/passwd[passwd(5) man page^]'s section on *GECOS*.
|
||||
|
||||
. ASCII only
|
||||
|
||||
==== `password`
|
||||
The escaped, salted, hashed string to use for the non-root user.
|
||||
|
||||
Please see <<passwords,the section on passwords>> for information on this value. In the <<example,example above>>, the string `$$6$$t92Uvm1ETLocDb1D$$BvI0Sa6CSXxzIKBinIaJHb1gLJWheoXp7WzdideAJN46aChFu3hKg07QaIJNk4dfIJ2ry3tEfo3FRvstKWasg/` is created from the password `test`. I cannot stress this enough, do not use a plaintext password here nor just use a regular `/etc/shadow` file/`crypt(3)` hash here. Read the section. I promise it's short.
|
||||
|
||||
=== `[build]`
|
||||
This section controls some aspects about the host and things like filesystem paths, etc.
|
||||
|
||||
==== `mirror`
|
||||
A mirror that hosts the bootstrap tarball. It is *highly* recommended you use an Arch Linux https://wiki.archlinux.org/index.php/Install_from_existing_Linux#Method_A:_Using_the_bootstrap_image_.28recommended.29[bootstrap tarball^] as the build process is highly specialized to this (but <<bug_reports_feature_requests,patches/feature requests>> are welcome for other built distros). You can find a list of mirrors at the bottom of Arch's https://www.archlinux.org/download/[download page^].
|
||||
|
||||
. No whitespace
|
||||
. Must be accessible remotely/via a WAN-recognized address
|
||||
. Must be a domain/FQDN only; no paths (those come later!)
|
||||
|
||||
==== `mirrorproto`
|
||||
What protocol should we use for the <<code_mirror_code,`mirror`>>?
|
||||
|
||||
|======================
|
||||
^s|Must be (case-insensitive) one of: ^.^m|http ^.^m|https ^.^m|ftp
|
||||
|======================
|
||||
|
||||
==== `mirrorpath`
|
||||
What is the path to the tarball directory on the <<code_mirror_code,`mirror`>>?
|
||||
|
||||
. Must be a complete path (e.g. `/dir1/subdir1/subdir2`)
|
||||
. No whitespace
|
||||
|
||||
==== `mirrorfile`
|
||||
What is the filename for the tarball found in the path specified in <<code_mirrorpath_code,`mirrorpath`>> ? If left blank, we will use the sha1 <<code_mirrorchksum_code,checksum>> file to try to guess the most recent file.
|
||||
|
||||
==== `mirrorchksum`
|
||||
The path to a sha1 checksum file of the bootstrap tarball.
|
||||
|
||||
. No whitespace
|
||||
. Must be the full path
|
||||
. Don't include the mirror domain or protocol
|
||||
|
||||
==== `mirrorgpgsig`
|
||||
*[optional]* +
|
||||
*default: (no GPG checking done)* +
|
||||
*requires: <<optional,_gpg/gnupg_>>* +
|
||||
*requires: <<code_gpgkey_code,`gpgkey`>>*
|
||||
|
||||
If the bootstrap tarball file has a GPG signature, we can use it for extra checking. If it's blank, GPG checking will be disabled.
|
||||
|
||||
If you specify just `.sig` (or use the default and don't specify a <<code_mirrorfile_code,`mirrorfile`>>), BDisk will try to guess based on the file from the sha1 <<code_mirrorchksum_code,checksum>> file. Note that this must evaluate to a full URL. (e.g. `${mirrorproto}://${mirror}${mirrorpath}somefile.sig`)
|
||||
|
||||
==== `gpgkey`
|
||||
*requires: <<optional,_gpg/gnupg_>>*
|
||||
|
||||
What is a key ID that should be used to verify/validate the <<code_mirrorgpgsig_code,`mirrorgpgsig`>>?
|
||||
|
||||
. Only used if <<code_mirrorgpgsig_code,`mirrorgpgsig`>> is set
|
||||
. Can be in "short" form (e.g. _7F2D434B9741E8AC_) or "full" form (_4AA4767BBC9C4B1D18AE28B77F2D434B9741E8AC_), with or without the _0x_ prefix.
|
||||
|
||||
==== `gpgkeyserver`
|
||||
*default: blank (GNUPG-bundled keyservers)* +
|
||||
*requires: <<optional,_gpg/gnupg_>>*
|
||||
|
||||
What is a valid keyserver we should use to fetch <<code_gpgkey_code,`gpgkey`>>?
|
||||
|
||||
. Only used if <<code_mirrorgpgsig_code,`mirrorgpgsig`>> is set
|
||||
. The default (blank) is probably fine. If you don't specify a personal GPG config, then you'll most likely want to leave this blank.
|
||||
. If set, make sure it is a valid keyserver URI (e.g. `hkp://keys.gnupg.net`)
|
||||
|
||||
==== `gpg`
|
||||
Should we sign our release files? See the <<code_gpg_code_2,`[gpg]`>> section.
|
||||
|
||||
[options="header"]
|
||||
|======================
|
||||
2+^|Accepts (case-insensitive) one of:
|
||||
^m|yes ^m|no
|
||||
^m|true ^m|false
|
||||
^m|1 ^m|0
|
||||
|======================
|
||||
|
||||
==== `dlpath`
|
||||
Where should the release files be saved? Note that many other files are created here as well.
|
||||
|
||||
WARNING: If you manage your project in git, this should not be checked in as it has many large files that are automatically generated!
|
||||
|
||||
. No whitespace
|
||||
. Will be created if it doesn't exist
|
||||
|
||||
==== `chrootdir`
|
||||
Where the bootstrap tarball(s) extract to, where the chroots are built and prepped for filesystems on the live media.
|
||||
|
||||
WARNING: If you manage your project in git, this should not be checked in as it has many large files that are automatically generated!
|
||||
|
||||
. No whitespace
|
||||
. Will be created if it doesn't exist
|
||||
|
||||
==== `basedir`
|
||||
Where your <<extra,`extra/`>> and <<overlay,`overlay/`>> directories are located. If you checked out from git, this would be your git worktree directory.
|
||||
|
||||
. No whitespace
|
||||
. Must exist and contain the above directories populated with necessary files
|
||||
|
||||
==== `isodir`
|
||||
This is the output directory of ISO files when they're created (as well as GPG signatures if you <<code_gpg_code,enabled them>>).
|
||||
|
||||
WARNING: If you manage your project in git, this should not be checked in as it has many large files that are automatically generated!
|
||||
|
||||
. No whitespace
|
||||
. Will be created if it doesn't exist
|
||||
|
||||
==== `srcdir`
|
||||
This is where we save and compile source code if we need to dynamically build components (such as iPXE for mini ISOs).
|
||||
|
||||
. No whitespace
|
||||
. Will be created if it doesn't exist (and is needed)
|
||||
|
||||
==== `prepdir`
|
||||
This is the directory we use for staging.
|
||||
|
||||
. No whitespace
|
||||
. Will be created if it doesn't exist
|
||||
|
||||
==== `archboot`
|
||||
This directory is used to stage boot files.
|
||||
|
||||
WARNING: This directory should not be the exact same path as other directives! If so, you will cause your ISO to be much larger than necessary. A subdirectory of another directive's path, however, is okay.
|
||||
|
||||
. No whitespace
|
||||
. Will be created if it doesn't exist
|
||||
|
||||
==== `mountpt`
|
||||
The path to use as a mountpoint.
|
||||
|
||||
. No whitespace
|
||||
. Will be created if it doesn't exist
|
||||
|
||||
==== `multiarch`
|
||||
*default: yes*
|
||||
|
||||
Whether or not to build a "multiarch" image- that is, building support for both x86_64 and i686 in the same ISO.
|
||||
|
||||
[options="header"]
|
||||
|======================
|
||||
s|In order to... 3+^|Accepts (case-insensitive) one of:
|
||||
s|build a multiarch ISO ^m|yes ^m|true ^m|1
|
||||
s|build a separate ISO for each architecture ^m|no ^m|false ^m|0
|
||||
s|only build an i686-architecture ISO ^m|i686 ^m|32 ^m|no64
|
||||
s|only build an x86_64-architecture ISO ^m|x86_64 ^m|64 ^m|no32
|
||||
|======================
|
||||
|
||||
==== `ipxe`
|
||||
*default: no*
|
||||
|
||||
Enable iPXE ("mini ISO") functionality.
|
||||
|
||||
NOTE: This has no bearing on the <<code_sync_code,`[sync]`>> section, so you can create an iPXE HTTP preparation for instance without needing to sync it anywhere (in case you're building on the webserver itself).
|
||||
|
||||
[options="header"]
|
||||
|======================
|
||||
2+^|Accepts (case-insensitive) one of:
|
||||
^m|yes ^m|no
|
||||
^m|true ^m|false
|
||||
^m|1 ^m|0
|
||||
|======================
|
||||
|
||||
==== `i_am_a_racecar`
|
||||
*default: no*
|
||||
|
||||
This option should only be enabled if you are on a fairly powerful, multicore system with plenty of RAM. It will speed the build process along, but will have some seriously adverse effects if your system can't handle it. Most modern systems should be fine with enabling it.
|
||||
|
||||
[options="header"]
|
||||
|======================
|
||||
2+^|Accepts (case-insensitive) one of:
|
||||
^m|yes ^m|no
|
||||
^m|true ^m|false
|
||||
^m|1 ^m|0
|
||||
|======================
|
||||
|
||||
=== `[gpg]`
|
||||
This section controls settings for signing our release files. This is only used if <<code_gpg_code,`build:gpg`>> is enabled.
|
||||
|
||||
==== `mygpgkey`
|
||||
A valid key ID that BDisk should use to _sign_ release files.
|
||||
|
||||
. You will be prompted for a passphrase if your key has one/you don't have an open and authorized gpg-agent session. Make sure you have a working pinentry configuration set up!
|
||||
. If you leave this blank we will use the key we generate automatically earlier in the build process.
|
||||
. We will generate one if this is blank and you have selected sign as yes.
|
||||
|
||||
==== `mygpghome`
|
||||
The directory should be used for the above GPG key if specified. Make sure it contains a keybox (`.kbx`) your private key. (e.g. `/home/username/.gnupg`)
|
||||
|
||||
=== `[sync]`
|
||||
This section controls what we should do with the resulting build and how to handle uploads, if we choose to use those features.
|
||||
|
||||
==== `http`
|
||||
*default: no*
|
||||
|
||||
If enabled, BDisk will generate/prepare HTTP files. This is mostly only useful if you plan on using iPXE. See the <<code_http_code_2,`[http]`>> section.
|
||||
|
||||
[options="header"]
|
||||
|======================
|
||||
2+^|Accepts (case-insensitive) one of:
|
||||
^m|yes ^m|no
|
||||
^m|true ^m|false
|
||||
^m|1 ^m|0
|
||||
|======================
|
||||
|
||||
==== `tftp`
|
||||
*default: no*
|
||||
|
||||
If enabled, BDisk will generate/prepare TFTP files. This is mostly only useful if you plan on using more traditional (non-iPXE) setups and regualar PXE bootstrapping into iPXE.
|
||||
|
||||
[options="header"]
|
||||
|======================
|
||||
2+^|Accepts (case-insensitive) one of:
|
||||
^m|yes ^m|no
|
||||
^m|true ^m|false
|
||||
^m|1 ^m|0
|
||||
|======================
|
||||
|
||||
==== `git`
|
||||
*requires: <<optional,git>>* +
|
||||
*default: no*
|
||||
|
||||
Enable automatic Git pushing for any changes done to the project itself. If you don't have upstream write/push access, you'll want to disable this.
|
||||
|
||||
[options="header"]
|
||||
|======================
|
||||
2+^|Accepts (case-insensitive) one of:
|
||||
^m|yes ^m|no
|
||||
^m|true ^m|false
|
||||
^m|1 ^m|0
|
||||
|======================
|
||||
|
||||
==== `rsync`
|
||||
*requires: <<optional,rsync>>* +
|
||||
*default: no*
|
||||
|
||||
Enable rsync pushing for the ISO (and other files, if you choose- useful for iPXE over HTTP(S)).
|
||||
|
||||
[options="header"]
|
||||
|======================
|
||||
2+^|Accepts (case-insensitive) one of:
|
||||
^m|yes ^m|no
|
||||
^m|true ^m|false
|
||||
^m|1 ^m|0
|
||||
|======================
|
||||
|
||||
=== `[http]`
|
||||
This section controls details about HTTP file preparation/generation. Only used if <<code_http_code,`sync:http`>> is enabled.
|
||||
|
||||
==== `path`
|
||||
This directory is where to build an HTTP webroot.
|
||||
|
||||
WARNING: MAKE SURE you do not store files here that you want to keep! They will be deleted!
|
||||
|
||||
. No whitespace
|
||||
. If blank, HTTP preparation/generation will not be done
|
||||
. If specified, it will be created if it doesn't exist
|
||||
. Will be deleted first
|
||||
|
||||
==== `user`
|
||||
What user the HTTP files should be owned as. This is most likely going to be either 'http', 'nginx', or 'apache'.
|
||||
|
||||
. No whitespace
|
||||
. User must exist on build system
|
||||
|
||||
|======================
|
||||
^s|Can be one of: ^.^m|username ^.^m|http://www.linfo.org/uid.html[UID]
|
||||
|======================
|
||||
|
||||
==== `group`
|
||||
What group the HTTP files should be owned as. This is most likely going to be either 'http', 'nginx', or 'apache'.
|
||||
|
||||
. No whitespace
|
||||
. Group must exist on build system
|
||||
|
||||
|======================
|
||||
^s|Can be one of: ^.^m|group name ^.^m|https://linux.die.net/man/5/group[UID]
|
||||
|======================
|
||||
|
||||
=== `[tftp]`
|
||||
This section controls details about TFTP file preparation/generation. Only used if <<code_tftp_code,`sync:tftp`>> is enabled.
|
||||
|
||||
==== `path`
|
||||
The directory where we want to build a TFTP root.
|
||||
|
||||
WARNING: MAKE SURE you do not store files here that you want to keep! They will be deleted!
|
||||
|
||||
. No whitespace
|
||||
. Will be created if it doesn't exist
|
||||
. Will be deleted first
|
||||
|
||||
==== `user`
|
||||
What user the TFTP files should be owned as. This is most likely going to be either 'tftp', 'root', or 'nobody'.
|
||||
|
||||
. No whitespace
|
||||
. User must exist on build system
|
||||
|
||||
|======================
|
||||
^s|Can be one of: ^.^m|username ^.^m|http://www.linfo.org/uid.html[UID]
|
||||
|======================
|
||||
|
||||
==== `group`
|
||||
What group the TFTP files should be owned as. This is most likely going to be either 'tftp', 'root', or 'nobody'.
|
||||
|
||||
. No whitespace
|
||||
. Group must exist on build system
|
||||
|
||||
|======================
|
||||
^s|Can be one of: ^.^m|group name ^.^m|https://linux.die.net/man/5/group[UID]
|
||||
|======================
|
||||
|
||||
=== `[ipxe]`
|
||||
This section controls aspects of iPXE building. Only used if <<code_ipxe_code,`build:ipxe`>> is enabled.
|
||||
|
||||
==== `iso`
|
||||
*default: no* +
|
||||
*requires: <<optional,_git_>>*
|
||||
|
||||
Build a "mini-ISO"; that is, an ISO file that can be used to bootstrap an iPXE environment (so you don't need to set up a traditional PXE environment on your LAN). We'll still build a full standalone ISO no matter what.
|
||||
|
||||
[options="header"]
|
||||
|======================
|
||||
2+^|Accepts (case-insensitive) one of:
|
||||
^m|yes ^m|no
|
||||
^m|true ^m|false
|
||||
^m|1 ^m|0
|
||||
|======================
|
||||
|
||||
==== `uri`
|
||||
What URI iPXE's EMBED script should use. This would be where you host an iPXE chainloading script on a webserver, for instance. See iPXE's example of http://ipxe.org/scripting#dynamic_scripts[dynamic scripts^] for an example of the script that would be placed at this URI.
|
||||
|
||||
NOTE: If you require HTTP BASIC Authentication or HTTP Digest Authentication (untested), you can format it via `https://user:password@bdisk.square-r00t.net/boot.php`.
|
||||
|
||||
NOTE: This currently does not work for HTTPS with self-signed certificates.
|
||||
|
||||
. *Required* if <<code_iso_code,`iso`>> is enabled
|
||||
|
||||
==== `ssldir`
|
||||
Directory to hold SSL results, if we are generating keys, certificates, etc.
|
||||
|
||||
. No whitespace
|
||||
. Will be created if it does not exist
|
||||
|
||||
==== `ssl_ca`
|
||||
Path to the (root) CA certificate file iPXE should use. See http://ipxe.org/crypto[iPXE's crypto page^] for more information.
|
||||
|
||||
NOTE: You can use your own CA to sign existing certs. This is handy if you run a third-party/"Trusted" root-CA-signed certificate for the HTTPS target.
|
||||
|
||||
. No whitespace
|
||||
. Must be in PEM/X509 format
|
||||
. *Required* if <<code_iso_code,`iso`>> is enabled
|
||||
. If it exists, a matching key (ssl_cakey) *must* be specified
|
||||
.. However, if left blank/doesn't exist, one will be automatically generated
|
||||
|
||||
==== `ssl_cakey`
|
||||
Path to the (root) CA key file iPXE should use.
|
||||
|
||||
. No whitespace
|
||||
. Must be in PEM/X509 format
|
||||
. *Required* if <<code_iso_code,`iso`>> is enabled
|
||||
. If left blank or it doesn't exist (and <<code_ssl_ca_code,`ssl_ca`>> is also blank), one will be automatically generated
|
||||
. *Must* match/pair to <<code_ssl_ca_code,`ssl_ca`>> if specified/exists
|
||||
. MUST NOT be passphrase-protected/DES-encrypted
|
||||
|
||||
==== `ssl_crt`
|
||||
Path to the _client_ certificate iPXE should use.
|
||||
|
||||
. No whitespace
|
||||
. Must be in PEM/X509 format
|
||||
. *Required* if <<code_iso_code,`iso`>> is enabled
|
||||
. If specified/existent, a matching CA cert (<<code_ssl_ca_code,`ssl_ca`>>) and key (<<code_ssl_cakey_code,`ssl_cakey`>>) *must* be specified
|
||||
.. However, if left blank/doesn't exist, one will be automatically generated
|
||||
. *Must* be signed by <<code_ssl_ca_code,`ssl_ca`>>/<<code_ssl_cakey_code,`ssl_cakey`>> if specified and already exists
|
||||
|
||||
==== `ssl_key`
|
||||
Path to the _client_ key iPXE should use.
|
||||
|
||||
. No whitespace
|
||||
. Must be in PEM/X509 format
|
||||
. *Required* if <<code_iso_code,`iso`>> is enabled
|
||||
. If left blank/nonexistent (and <<code_ssl_ca_code,`ssl_ca`>> is also blank), one will be automatically generated
|
||||
|
||||
=== `[rsync]`
|
||||
This section controls aspects of rsync pushing. Only used if <<code_rsync_code,`sync:rsync`>> is enabled.
|
||||
|
||||
==== `host`
|
||||
The rsync destination host.
|
||||
|
||||
. Must resolve from the build server
|
||||
. Can be host, FQDN, or IP address
|
||||
|
||||
==== `user`
|
||||
This is the remote user we should use when performing the rsync push.
|
||||
|
||||
. User must exist on remote system
|
||||
. SSH pubkey authorization must be configured
|
||||
. The destination's hostkey must be added to your local build user's known hosts
|
||||
|
||||
==== `path`
|
||||
This is the remote destination path we should use for pushing via rsync.
|
||||
|
||||
|
||||
NOTE: You'll probably want to set <<code_user_code_3,`http:user`>> and <<code_group_code,`http:group`>> to what it'll need to be on the destination.
|
||||
|
||||
. No whitespace
|
||||
. The path *must* exist on the remote host
|
||||
. The path MUST be writable by <<code_user_code_5,`user`>>
|
||||
|
||||
==== `iso`
|
||||
Should we rsync over the ISO files too, or just the boot files?
|
||||
|
||||
[options="header"]
|
||||
|======================
|
||||
2+^|Accepts (case-insensitive) one of:
|
||||
^m|yes ^m|no
|
||||
^m|true ^m|false
|
||||
^m|1 ^m|0
|
||||
|======================
|
||||
91
docs/manual/user/GETTING_STARTED.adoc
Normal file
91
docs/manual/user/GETTING_STARTED.adoc
Normal file
@@ -0,0 +1,91 @@
|
||||
== Getting Started
|
||||
|
||||
=== Downloading
|
||||
If it isn't in your distro's repositories (It *is* in Arch's AUR! Both https://aur.archlinux.org/packages/bdisk/[tagged release^] and https://aur.archlinux.org/packages/bdisk-git/[git master^].), you can still easily get rolling. Simply visit the project's https://git.square-r00t.net/BDisk/[source code web interface^] and download a tarball under the *Download* column:
|
||||
|
||||
image::fig1.1.png[cgit,align="center"]
|
||||
|
||||
If you know the tag of the commit you want, you can use curl:
|
||||
|
||||
curl -sL https://git.square-r00t.net/BDisk/snapshot/BDisk-4.0.0.tar.xz | tar -xf -
|
||||
|
||||
or wget:
|
||||
|
||||
wget -O - https://git.square-r00t.net/BDisk/snapshot/BDisk-3.11.tar.xz | tar -xf -
|
||||
|
||||
You can use `https://git.square-r00t.net/BDisk/snapshot/BDisk-master.tar.xz` for the URL if you want the latest working version. If you want a snapshot of a specific commit, you can use e.g. `https://git.square-r00t.net/BDisk/snapshot/BDisk-5ac510762ce00eef213957825de0e6d07186e7f8.tar.xz` and so on.
|
||||
|
||||
Alternatively, you can use https://git-scm.com/[git^]. Git most definitely _should_ be in your distro's repositories.
|
||||
|
||||
TIP: If you're new to git and want to learn more, I highly recommend the book https://git-scm.com/book/en/v2[Pro Git^]. It is available for free download (or online reading).
|
||||
|
||||
You can clone via https:
|
||||
|
||||
git clone https://git.square-r00t.net/BDisk
|
||||
|
||||
or native git protocol:
|
||||
|
||||
git clone git://git.square-r00t.net/bdisk.git BDisk
|
||||
|
||||
The git protocol is much faster, but at a cost of lessened security.
|
||||
|
||||
NOTE: I also have a mirror at https://github.com/johnnybubonic/BDisk[GitHub^], but I don't like GitHub very much and since it's a mirror repository, it's possible it will be out of date. For this reason, it's recommended that you use the resources above.
|
||||
|
||||
=== Prerequisites
|
||||
This is a list of software you'll need available to build with BDisk.
|
||||
|
||||
TIP: Your distro's package manager should have most if not all of these available, so it's unlikely you'll need to install from source.
|
||||
|
||||
NOTE: Some versions may be higher than actually needed (especially _gcc_).
|
||||
|
||||
CAUTION: You will need at least about *15GB* of free disk space, depending on what options you enable. Each architecture chroot (i.e. x86_64, i686) is about 3.5GB after a build using the default package set (more on that later), each architecture release tarball (what we use to build the chroots) is approximately 115MB each, and each squashed image per architecture is 1.1GB (if you use the default package set). If you don't understand what this means quite yet, don't worry- we'll go into more detail later on. Just know that you'll need a fair bit of free disk space.
|
||||
|
||||
==== Build Environment
|
||||
* GNU/Linux (relatively recent release of preferred distro)
|
||||
** Building on FreeBSD and other +*BSDs+ *may* be possible via the use of https://www.freebsd.org/doc/handbook/jails.html[jails^]. This is entirely untested and no support nor testing will be offered by the author (me). If you would like to offer documentation for this, please <<contact_the_author,contact me>>.
|
||||
** Building on Windows *may* be possible via the use of the https://docs.microsoft.com/en-us/windows/wsl/install-win10[WSL (Windows Subsystem for Linux)^]. This is entirely untested and no support nor testing will be offered by the author (me). If you would like to offer documentation for this, please <<contact_the_author,contact me>>.
|
||||
** Building on macOS is simply not supported, period, due to chroots being necessary to the build functionality of BDisk (and macOS not being able to implement GNU/Linux chroots). You'll need to run a build VM.
|
||||
* https://www.python.org/[Python^] (>=3.6)
|
||||
|
||||
==== Necessary
|
||||
These are needed for using BDisk.
|
||||
|
||||
* https://github.com/dosfstools/dosfstools[dosfstools^]
|
||||
* http://libburnia-project.org[libisoburn^]
|
||||
* http://squashfs.sourceforge.net[squashfs-tools^] (>=4.2)
|
||||
|
||||
These are required Python modules:
|
||||
// TODO: double-check/update these.
|
||||
|
||||
* https://pypi.python.org/pypi/humanize[Humanize^]
|
||||
* http://jinja.pocoo.org/[Jinja2^]
|
||||
* https://pypi.python.org/pypi/psutil[PSUtil^]
|
||||
* https://pypi.python.org/pypi/validators[Validators^]
|
||||
|
||||
==== Optional
|
||||
While not strictly necessary, these will greatly enhance your BDisk usage. I've included some reasons why you might want to install them.
|
||||
|
||||
NOTE: If you do not wish to install any of these or cannot install them, be sure to disable the relevant options in the `build.ini` file (we'll talk about that later). The default `extra/dist.build.ini` should be sane enough to not require any of these.
|
||||
|
||||
* http://gcc.gnu.org[gcc (multilib)^] (>=6.x)
|
||||
** Needed for building iPXE.
|
||||
* http://gcc.gnu.org[gcc-libs (multilib)^] (>=6.x)
|
||||
** (Same as _gcc_.)
|
||||
* https://git-scm.com/[git^]
|
||||
** For autodetection of version, automatically making commits for your project, checking out source code, etc.
|
||||
* https://www.gnupg.org/[gpg/gnupg^] (>=2.1.11)
|
||||
** For automatically signing releases, verifying downloaded files from the Internet as part of the build process, etc. It's okay if you don't have a key set up!
|
||||
* https://rsync.samba.org/[rsync^]
|
||||
** For syncing built ISOs to a fileserver, syncing to a remote iPXE server, syncing to a traditional PXE/TFTP server, etc.
|
||||
|
||||
These are optional Python modules:
|
||||
|
||||
* https://pypi.python.org/pypi/GitPython[GitPython^]
|
||||
** (Same reasons as _git_)
|
||||
* https://pypi.python.org/pypi/pygpgme[PyGPGME^]
|
||||
** (Same reasons as _gpg/gnupg_)
|
||||
* https://pypi.python.org/pypi/patch[Patch^]
|
||||
** For branding iPXE environments per your `build.ini`.
|
||||
* https://pypi.python.org/pypi/pyOpenSSL[PyOpenSSL^]
|
||||
** To set up a PKI when building iPXE; used to create trusted/verified images.
|
||||
|
||||
51
docs/manual/user/IMPORTANT_CONCEPTS.adoc
Normal file
51
docs/manual/user/IMPORTANT_CONCEPTS.adoc
Normal file
@@ -0,0 +1,51 @@
|
||||
== Important Concepts
|
||||
If this is your first foray into building live distros, there are some terms and concepts we need to understand first. This will simplify the process later on.
|
||||
|
||||
=== Terms
|
||||
An *operating system*, or OS, is what your programs (email client, web browser, etc.) run on.
|
||||
|
||||
There are two basic types of booting systems that communicate between the *hardware* (the physical computer itself and its components) and the operating system: https://en.wikipedia.org/wiki/BIOS[*BIOS*^] (Basic Input/Output System) which has been around for quite some time and the newer https://en.wikipedia.org/wiki/Unified_Extensible_Firmware_Interface[*UEFI*^] (Unified Extensible Firmware Interface). Don't worry, you don't need to memorize what they're acronyms for and there won't be an exam -- just remember that BIOS is an older technology and UEFI is the newer one (and that they operate differently).
|
||||
|
||||
*GNU/Linux*, sometimes just referred to as _Linux_ (And there is a difference between the terminologies, but it's nuanced. You are welcome to https://www.gnu.org/gnu/linux-and-gnu.en.html[read up on it^] though!), is an example of an operating system. Other examples include _Windows_, _macOS_ (previously _OS X_), _iOS_, _Android_, and a whole slew of others. There are many types of GNU/Linux offerings, called _distributions_, _flavors_, or _distros_.
|
||||
|
||||
A *live distro*, *live CD*, *live DVD*, *live USB*, and the like are a way of booting an operating system without installing it on the hard drive- this means the computer doesn't even need a hard drive installed, or it doesn't matter if the installed operating system is broken. Typically they are Linux-based, but there are several Windows-based live releases out there (usually they're focused on rescuing broken Windows systems, so they're not very flexible).
|
||||
|
||||
*Hybrid ISOs* are ISO files that can be burned to optical media (CDs, DVDs, etc.) and also be _dd_'d directly to a USB thumbdrive (for computers that support booting from USB). That means one file, multiple media types.
|
||||
|
||||
*Architectures* are different hardware platforms. This mostly refers to the CPU. Common implementations are *64-bit* (also known as *x86_64* or *AMD64* for ones that support running both 64-bit and 32-bit software, or *IA64* or *Itanium* for processors that only support 64-bit) and *32-bit* (or *i686* and the older *i386* and *i486* implementations). Most consumer PCs on the market today are x86_64.
|
||||
|
||||
*Chroots*, *chrooting*, and the like are variants on the word *chroot*. A *chroot* is a way of running a GNU/Linux install "inside" another GNU/Linux distro. It's sort of like a virtual machine, or VM, except that it's a lot more lightweight and it doesn't do any actual virtualization- and uses the host's kernel, memory mapping, etc. It's very useful for development of operating systems.
|
||||
|
||||
*PXE*, or Pre-boot eXecution Environment, is a way of booting operating systems over a local network.
|
||||
|
||||
*iPXE* is a http://ipxe.org/[project^] that builds a very small Linux kernel, UNDI (traditional PXE) images, and the like that allow you to essentially use PXE over the Internet. It's very flexible and customizable, and supports a custom scripting engine and such.
|
||||
|
||||
=== Why live media is necessary/Why you might want BDisk
|
||||
"But Brent," I hear you ask in a voice which most likely is nothing close to what you actually sound like and entirely in my head, "Why would I need a live CD/USB/etc.? And why BDisk?"
|
||||
|
||||
Elementary, my dear imaginary reader! I touch on some reasons why one might want live media in the beginning of the <<USER.adoc#user_manual,User Manual>>, but here's why you might want BDisk specifically as opposed to another live distro (or <<FAQ.adoc#i_don_t_like_bdisk_are_there_any_other_alternatives,live distro creator>>).
|
||||
|
||||
* Fully customizable
|
||||
* Works with a multitude of GNU/Linux distros -- both for the host build system and as the guest. (Still under development!)
|
||||
* It performs optimizations and compression to help you get the smallest ISO possible.
|
||||
* In addition to building hybrid ISOs, it supports building iPXE hybrid ISOs (meaning you only need a very small file; the rest of the operating system boots over the Internet).
|
||||
* It supports both BIOS and UEFI systems- both the full image and the iPXE images.
|
||||
* It supports multiple architectures (x86_64, i686, possibly IA64 -- untested) on the same ISO.
|
||||
* It supports automatically syncing to a web mirror, PXE boot server, etc. via rsync upon successful build.
|
||||
* It supports SecureBoot (untested!).
|
||||
* It is 100% compatible with both the https://wiki.archlinux.org/index.php/installation_guide[Arch installation guide^] and the https://wiki.gentoo.org/wiki/Handbook:AMD64#Installing_Gentoo[Gentoo installation guide^].
|
||||
* It allows for non-interactive/automated building (i.e. nightly images).
|
||||
* It supports arbitrary file inclusion in a defined path on the ISO itself, not via some arbitrary directory as a separate partition on the media.
|
||||
* It can automatically build an accompanying "mini" ISO using iPXE -- which is also a hybrid, UEFI-supported ISO.
|
||||
* Automatic versioning based on git tags (optional).
|
||||
|
||||
=== Who might want to use BDisk?
|
||||
* System builders/hardware testers
|
||||
* System Administrators/Engineers/Architects
|
||||
* Information Security professionals
|
||||
* Computer repair shops
|
||||
* Technology Consultants
|
||||
* Hobbyists
|
||||
* Home GNU/Linux users
|
||||
* Technology enthusiasts
|
||||
|
||||
81
docs/manual/user/PROJECT_LAYOUT.adoc
Normal file
81
docs/manual/user/PROJECT_LAYOUT.adoc
Normal file
@@ -0,0 +1,81 @@
|
||||
== Project Structure
|
||||
The following is a tree of files and directories in a BDisk root directory. Note that yours may not look quite like this, as BDisk supports some directory relocation to aid in packaging for distros. These will be examined in-depth in the coming sections.
|
||||
|
||||
<BDisk root directory>
|
||||
├── bdisk
|
||||
│ ├── bchroot.py
|
||||
│ ├── bdisk.py
|
||||
│ ├── bGPG.py
|
||||
│ ├── bSSL.py
|
||||
│ ├── bsync.py
|
||||
│ ├── build.py
|
||||
│ ├── host.py
|
||||
│ ├── ipxe.py
|
||||
│ └── prep.py
|
||||
├── docs
|
||||
│ ├── COPYING
|
||||
│ ├── LICENSE -> COPYING
|
||||
│ ├── manual
|
||||
│ │ └── (...)
|
||||
│ ├── README
|
||||
├── examples
|
||||
│ └── HTTP
|
||||
│ └── (...)
|
||||
├── extra
|
||||
│ ├── bdisk.png
|
||||
│ ├── bin
|
||||
│ │ └── (...)
|
||||
│ ├── dist.build.ini
|
||||
│ ├── external
|
||||
│ │ └── (...)
|
||||
│ ├── mirrorlist
|
||||
│ ├── pre-build.d
|
||||
│ │ ├── (...)
|
||||
│ │ ├── i686
|
||||
│ │ │ └── (...)
|
||||
│ │ └── x86_64
|
||||
│ │ └── (...)
|
||||
│ └── templates
|
||||
│ ├── BIOS
|
||||
│ │ ├── isolinux.cfg.arch.j2
|
||||
│ │ └── isolinux.cfg.multi.j2
|
||||
│ ├── EFI
|
||||
│ │ ├── base.conf.j2
|
||||
│ │ ├── loader.conf.j2
|
||||
│ │ ├── ram.conf.j2
|
||||
│ │ ├── uefi1.conf.j2
|
||||
│ │ └── uefi2.conf.j2
|
||||
│ ├── GPG.j2
|
||||
│ ├── iPXE
|
||||
│ │ ├── BIOS
|
||||
│ │ │ └── isolinux.cfg.j2
|
||||
│ │ ├── EFI
|
||||
│ │ │ ├── base.conf.j2
|
||||
│ │ │ └── loader.conf.j2
|
||||
│ │ ├── EMBED.j2
|
||||
│ │ ├── patches
|
||||
│ │ │ ├── 01.git-version.patch.j2
|
||||
│ │ │ └── 02.banner.patch.j2
|
||||
│ │ └── ssl
|
||||
│ │ └── openssl.cnf
|
||||
│ ├── overlay
|
||||
│ │ ├── (...)
|
||||
│ │ ├── i686
|
||||
│ │ ├── x86_64
|
||||
│ ├── pre-build.d
|
||||
│ │ ├── (...)
|
||||
│ │ ├── i686
|
||||
│ │ ├── x86_64
|
||||
│ ├── VARS.txt.j2
|
||||
│ └── VERSION_INFO.txt.j2
|
||||
└── overlay
|
||||
├── (...)
|
||||
├── i686
|
||||
└── x86_64
|
||||
|
||||
include::fslayout/BDISK.adoc[]
|
||||
include::fslayout/DOCS.adoc[]
|
||||
include::fslayout/EXAMPLES.adoc[]
|
||||
include::fslayout/EXTRA.adoc[]
|
||||
include::fslayout/OVERLAY.adoc[]
|
||||
|
||||
10
docs/manual/user/advanced/AUTOLOGIN.adoc
Normal file
10
docs/manual/user/advanced/AUTOLOGIN.adoc
Normal file
@@ -0,0 +1,10 @@
|
||||
=== Automatic Login (TTY)
|
||||
If you don't want to have to log into the TTY on boot, BDisk can automatically log in for you with a given username.
|
||||
|
||||
If, for example, you want a terminal to auto-login on TTY1 with the root user, you would create the following file at `<basedir>/overlay/etc/systemd/system/getty@tty1.service.d/autologin.conf`:
|
||||
|
||||
[Service]
|
||||
Type=idle
|
||||
ExecStart=
|
||||
ExecStart=-/usr/bin/agetty --autologin root --noclear %I 38400 linux
|
||||
|
||||
3
docs/manual/user/advanced/BUILDING.adoc
Normal file
3
docs/manual/user/advanced/BUILDING.adoc
Normal file
@@ -0,0 +1,3 @@
|
||||
=== Changing the Build Process
|
||||
If you want to make modifications that can't be managed by arbitrary file inclusion or changing the software package lists, you may want to introduce additional changes to the image configuration that's run during the chroot. This is fairly easy to do. Simply modify `<basedir>/extra/pre-build.d/root/pre-build.sh` with the changes you desire. Note that this has a `.sh` file extension, but it can be any type of script you want -- Bash, Perl, Python, etc. -- it just needs the shebang line at the beginning of the script.
|
||||
|
||||
30
docs/manual/user/advanced/DESKTOP.adoc
Normal file
30
docs/manual/user/advanced/DESKTOP.adoc
Normal file
@@ -0,0 +1,30 @@
|
||||
=== Starting a Desktop Environment
|
||||
You can install any desktop environment or window manager you would like via <<changing_the_installed_software,package lists>>! From there, it's simply a matter of setting the correct Systemd unit to start automatically. The https://wiki.archlinux.org/index.php/[Arch wiki^] has a lot of useful information here. As an example, I'll include http://lxde.org/[LXDE^] instructions here.
|
||||
|
||||
Simply create a symlink for the target. In the `<basedir>/overlay/etc/systemd/system/` directory:
|
||||
|
||||
ln -s /usr/lib/systemd/system/lxdm.service display-manager.service
|
||||
|
||||
==== Autologin (LXDE)
|
||||
Many desktop environments even offer an automatic login feature directly through the desktop manager (LXDM, in LXDE's case).
|
||||
|
||||
Again, using LXDE as an example, create a file at `<basedir>/overlay/etc/lxdm/lxdm.conf`:
|
||||
|
||||
[base]
|
||||
autologin=bdisk
|
||||
greeter=/usr/lib/lxdm/lxdm-greeter-gtk
|
||||
[server]
|
||||
arg=/usr/bin/X -background vt1
|
||||
[display]
|
||||
gtk_theme=Adwaita
|
||||
bottom_pane=1
|
||||
lang=1
|
||||
keyboard=0
|
||||
theme=Industrial
|
||||
[input]
|
||||
[userlist]
|
||||
disable=0
|
||||
white=
|
||||
black=
|
||||
|
||||
LXDE will then automatically log in with the user `bdisk` (note the second line, right under `[base]`) whenever started.
|
||||
20
docs/manual/user/advanced/SOFTWARE.adoc
Normal file
20
docs/manual/user/advanced/SOFTWARE.adoc
Normal file
@@ -0,0 +1,20 @@
|
||||
=== Changing the Installed Software
|
||||
BDisk comes with a large https://bdisk.square-r00t.net/packages/[list of software^] installed in the build instance by default, ranging from data recovery (such as _foremost_, _scalpel_, _ddrescue_, etc.), security and data wiping (_nwipe_, _scrub_, etc.), penetration testing (_wifite_, _aircrack-ng_, etc.) and a slew of others. Seriously, if you're looking for a tool, changes are it's on it.
|
||||
|
||||
However, this leads to a fairly long build time- even with a local repository mirror (many of the packages are from the AUR). You may want to replace the list with a smaller subset.
|
||||
|
||||
The `iso.pkgs.\*` files are not files you should modify- they contain software necessary to the building of BDisk and are the basic necessary files to build a bootable image. However, the `packages.*` files are where you would add or remove software to be installed.
|
||||
|
||||
NOTE: The package lists can contain both https://www.archlinux.org/packages/[Arch repository packages^] *and* https://aur.archlinux.org/[AUR^] packages.
|
||||
|
||||
NOTE: Blank lines are ignored, and you can comment out lines by prefixing the line with the `#` character.
|
||||
|
||||
==== `<basedir>/extra/pre-build.d/i686/root/packages.arch`
|
||||
This list contains packages to *only* be installed for the i686 image.
|
||||
|
||||
==== `<basedir>/extra/pre-build.d/x86_64/root/packages.arch`
|
||||
This list contains packages you *only* want installed in the x86_64 image.
|
||||
|
||||
==== `<basedir>/extra/pre-build.d/root/packages.both`
|
||||
This file contains packages for both architectures (i686 and x86_64).
|
||||
|
||||
74
docs/manual/user/advanced/SSH.adoc
Normal file
74
docs/manual/user/advanced/SSH.adoc
Normal file
@@ -0,0 +1,74 @@
|
||||
=== SSH Pubkey Authentication
|
||||
To start with, you'll want to secure SSH a little more than normal.
|
||||
|
||||
I highly recommend https://stribika.github.io/2015/01/04/secure-secure-shell.html[this article^], which we'll be following in this process.
|
||||
|
||||
First, create a file: `<basedir>/overlay/etc/ssh/sshd_config` using the following. Comments and blank lines have been stripped out for brevity.
|
||||
|
||||
PermitRootLogin prohibit-password
|
||||
HostKey /etc/ssh/ssh_host_ed25519_key
|
||||
HostKey /etc/ssh/ssh_host_rsa_key
|
||||
AuthorizedKeysFile .ssh/authorized_keys
|
||||
PasswordAuthentication no
|
||||
PermitEmptyPasswords no
|
||||
ChallengeResponseAuthentication no
|
||||
UsePAM yes
|
||||
PrintMotd no # pam does that
|
||||
Subsystem sftp /usr/lib/ssh/sftp-server
|
||||
KexAlgorithms curve25519-sha256@libssh.org,diffie-hellman-group-exchange-sha256
|
||||
Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr
|
||||
MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
|
||||
|
||||
We'll also want to implement a more secure `ssh_config` file to avoid possible leaks. The following is `<basedir>/overlay/etc/ssh/ssh_config`:
|
||||
|
||||
Host *
|
||||
KexAlgorithms curve25519-sha256@libssh.org,diffie-hellman-group-exchange-sha256
|
||||
PasswordAuthentication no
|
||||
ChallengeResponseAuthentication no
|
||||
PubkeyAuthentication yes
|
||||
HostKeyAlgorithms ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ssh-ed25519,ssh-rsa
|
||||
Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr
|
||||
MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
|
||||
|
||||
We'll want to create our own moduli. This can take a long time, but only needs to be done once -- it doesn't need to be done for every build. The following commands should be run in `<basedir>/overlay/etc/ssh/`:
|
||||
|
||||
ssh-keygen -G moduli.all -b 4096
|
||||
ssh-keygen -T moduli.safe -f moduli.all
|
||||
mv moduli.safe moduli
|
||||
rm moduli.all
|
||||
|
||||
Then we generate hostkeys. This isn't strictly necessary as the live media will create them automatically when starting SSH if they're missing, but this does provide some verification that the host you're SSHing to is, in fact, running the BDisk instance that you yourself built. The following commands should be run in `<basedir>/overlay/etc/ssh/`:
|
||||
|
||||
ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null
|
||||
ssh-keygen -t rsa -b 4096 -f ssh_host_rsa_key -N "" < /dev/null
|
||||
|
||||
Make sure you have keys on your host workstation generated so you can SSH into BDisk. If you don't have any ED25519 or RSA SSH keys, this will create them for you. The following should be run as the host (build machine, or what have you) user you want to be able to SSH into BDisk as:
|
||||
|
||||
ssh-keygen -t ed25519 -o -a 100
|
||||
ssh-keygen -t rsa -b 4096 -o -a 100
|
||||
|
||||
The defaults are fine. Adding a password to your private key is not necessary, but recommended (though note that doing so will inhibit automated SSHing). You should now have in `~/.ssh/` the following files (assuming you kept the defaults above):
|
||||
|
||||
id_ed25519
|
||||
id_ed25519.pub
|
||||
id_rsa
|
||||
id_rsa.pub
|
||||
|
||||
WARNING: The files ending in *.pub* are _public_ -- they can be published anywhere. However, the ones that are not appended with *.pub* are your _private keys_ and should not be shared with anyone, whether they're password-protected or not!
|
||||
|
||||
Now you'll want to get the public key of your SSH keys so you can add them to your BDisk build. The following commands should be run in `<basedir>/overlay/`:
|
||||
|
||||
mkdir -p root/.ssh
|
||||
chmod 700 root/.ssh
|
||||
touch root/.ssh/authorized_keys
|
||||
chmod 600 root/.ssh/authorized_keys
|
||||
cat ~/.ssh/id_{ed25519,rsa}.pub > root/.ssh/authorized_keys
|
||||
|
||||
If you decided to <<code_user_code,enable a regular non-root user>> in your build, you'll want to perform the same steps above for the regular user as well (or forego the above and just enable SSH for the user you create). Remember to replace `root/` with `home/<<_code_username_code,<username>>>/`!
|
||||
|
||||
Lastly, we need to enable SSH to start on boot. Run the following command in `<basedir>/overlay/etc/systemd/system/multi-user.target.wants/`:
|
||||
|
||||
ln -s /usr/lib/systemd/system/sshd.service sshd.service
|
||||
|
||||
You should now have SSH automatically start once the instance boots.
|
||||
|
||||
13
docs/manual/user/advanced/VPN.adoc
Normal file
13
docs/manual/user/advanced/VPN.adoc
Normal file
@@ -0,0 +1,13 @@
|
||||
=== VPN Configuration
|
||||
For this example we'll set up an https://openvpn.net/[OpenVPN^] client to start automatically on boot.
|
||||
|
||||
Setting up an OpenVPN server is outside the scope of this section, but there are a https://openvpn.net/index.php/open-source/documentation/howto.html[multitude^] of https://openvpn.net/index.php/open-source/documentation/examples.html[useful^] https://wiki.archlinux.org/index.php/OpenVPN[documentation^] https://wiki.gentoo.org/wiki/Openvpn[sources^] out there that will help you with that.
|
||||
|
||||
However, once you have your client .ovpn file (in our example, we'll call it `client.ovpn`) you can add it to the build relatively easily.
|
||||
|
||||
Copy `client.ovpn` as `<basedir>/overlay/etc/openvpn/client/client.conf` -- note the changed file extension. Then, in the `<basedir>/overlay/etc/systemd/system/multi-user.target.wants/` directory, issue these commands:
|
||||
|
||||
ln -s /usr/lib/systemd/system/openvpn-client\@.service openvpn-client\@client.service
|
||||
|
||||
OpenVPN will then start on boot in the built BDisk instance.
|
||||
|
||||
13
docs/manual/user/fslayout/BDISK.adoc
Normal file
13
docs/manual/user/fslayout/BDISK.adoc
Normal file
@@ -0,0 +1,13 @@
|
||||
=== bdisk/
|
||||
This directory contains the "heart" of BDisk. It essentially is a Python module package. It contains several python "subpackages" split into different files that provide different functions for BDisk. Chances are you won't ever need to touch anything in here.
|
||||
|
||||
* <<code_bchroot_py_code>>
|
||||
* <<code_bdisk_py_code>>
|
||||
* <<code_bgpg_py_code>>
|
||||
* <<code_bssl_py_code>>
|
||||
* <<code_bsync_py_code>>
|
||||
* <<code_build_py_code>>
|
||||
* <<code_host_py_code>>
|
||||
* <<code_ipxe_py_code>>
|
||||
* <<code_prep_py_code>>
|
||||
|
||||
15
docs/manual/user/fslayout/DOCS.adoc
Normal file
15
docs/manual/user/fslayout/DOCS.adoc
Normal file
@@ -0,0 +1,15 @@
|
||||
=== docs/
|
||||
This directory contains various documentation and other helpful text.
|
||||
|
||||
==== COPYING
|
||||
This contains BDisk's license, the GPLv3.
|
||||
|
||||
==== LICENSE
|
||||
This is simply a link to `COPYING`.
|
||||
|
||||
==== manual/
|
||||
This directory contains the documentation source you're reading right now! It's written in http://asciidoc.org/[asciidoc^] (well, to be more precise it's written in/has some http://asciidoctor.org/[asciidoctor^]-isms). I'd recommend reading the rendered version, as the source (while perfectly human-readable) is written in a very modular fashion so it may be inconvenient to read each source file and following include directives.
|
||||
|
||||
==== README
|
||||
This is a placeholder for common convention; it simply tells you to read the manual (and where to find it/build it).
|
||||
|
||||
3
docs/manual/user/fslayout/EXAMPLES.adoc
Normal file
3
docs/manual/user/fslayout/EXAMPLES.adoc
Normal file
@@ -0,0 +1,3 @@
|
||||
=== examples/
|
||||
This directory contains example filesystem layouts for e.g. your webserver (for iPXE), or your PXE server via TFTP.
|
||||
|
||||
21
docs/manual/user/fslayout/EXTRA.adoc
Normal file
21
docs/manual/user/fslayout/EXTRA.adoc
Normal file
@@ -0,0 +1,21 @@
|
||||
=== extra/
|
||||
This directory contains multiple "support files" for BDisk building.
|
||||
|
||||
==== bdisk.png
|
||||
This file is used for bootloader graphics. If you change the name of the project, this can be named something different -- see <<code_uxname_code,the section on uxname>>.
|
||||
|
||||
==== bin/
|
||||
This directory contains sample code or extra tools that don't have anything to do with BDisk normal operation but are useful in building a BDisk distribution.
|
||||
|
||||
==== dist.build.ini
|
||||
This is the "source-provided"/upstream example <<the_code_build_ini_code_file,`build.ini`>>. It will be sourced for any missing configuration options or the like.
|
||||
|
||||
==== external/
|
||||
This directory contains external source code for use with extra features in BDisk that would otherwise be inconvenient to fetch and build dynamically.
|
||||
|
||||
==== pkg.build.ini
|
||||
This is the recommended default <<the_code_build_ini_code_file,`build.ini`>> file for packagers of distro repositories to use when packaging BDisk for inclusion in a package manager.
|
||||
|
||||
include::PREBUILD.adoc[]
|
||||
|
||||
include::TEMPLATES.adoc[]
|
||||
13
docs/manual/user/fslayout/OVERLAY.adoc
Normal file
13
docs/manual/user/fslayout/OVERLAY.adoc
Normal file
@@ -0,0 +1,13 @@
|
||||
=== overlay/
|
||||
This directory follows similar rules to the <<pre_build_d,pre-build.d/>> directory, except it is applied *after* the chroots are prepared (as it is designed to be user-centric rather than core functionality). We'll go more into this later in-depth, as this is where most of your customizations will be done.
|
||||
|
||||
For files that should be included in both chroots, simply recreate the path with the desired file. For instance, if you want a file `/etc/foo/bar.conf` to exist in both i686 and x86_64 versions, it would exist as the path `overlay/etc/foo/bar.conf`.
|
||||
|
||||
It follows the following structure:
|
||||
|
||||
==== i686/
|
||||
This contains modifications that should be applied to the i686 version only. If you wanted a file to exist only in the i686 version at `/etc/a/b.conf`, it would be placed in `overlay/i686/etc/a/b.conf`.
|
||||
|
||||
==== x86_64/
|
||||
This contains modifications that should be applied to the x86_64 version only. If you wanted a file to exist only in the x86_64 version at `/etc/z/y.conf`, it would be placed in `overlay/x86_64/etc/z/y.conf`.
|
||||
|
||||
13
docs/manual/user/fslayout/PREBUILD.adoc
Normal file
13
docs/manual/user/fslayout/PREBUILD.adoc
Normal file
@@ -0,0 +1,13 @@
|
||||
==== pre-build.d/
|
||||
This file contains a "core" overlay. Generally these files shouldn't be modified unless you know what you're doing, but there are some interesting things you can do in here. Generally speaking, though, you'll want to place your modifications in the <<overlay_2,`overlay/`>> directory.
|
||||
|
||||
For files that should be included in both chroots, simply recreate the path with the desired file. For instance, if you want a file `/etc/foo/bar.conf` to exist in both i686 and x86_64 versions, it would exist as the path `pre-build.d/etc/foo/bar.conf`.
|
||||
|
||||
It follows the following structure:
|
||||
|
||||
===== i686/
|
||||
This contains modifications that should be applied to the i686 version *only*. If you wanted a file to exist only in the i686 version at `/etc/a/b.conf`, it would be placed in `pre-build.d/i686/etc/a/b.conf`.
|
||||
|
||||
===== x86_64/
|
||||
This contains modifications that should be applied to the x86_64 version *only*. If you wanted a file to exist only in the x86_64 version at `/etc/z/y.conf`, it would be placed in `pre-build.d/x86_64/etc/z/y.conf`.
|
||||
|
||||
48
docs/manual/user/fslayout/TEMPLATES.adoc
Normal file
48
docs/manual/user/fslayout/TEMPLATES.adoc
Normal file
@@ -0,0 +1,48 @@
|
||||
==== templates/
|
||||
This directory contains dynamic templates used for dynamic configuration building and other such things. They are written in http://jinja.pocoo.org/[Jinja2^]. If you haven't used Jinja2 before, the http://jinja.pocoo.org/docs/dev/templates/[templating documentation^] will prove to be very useful.
|
||||
|
||||
This allows you to customize low-level behaviour of BDisk without modifying the source.
|
||||
|
||||
===== BIOS/
|
||||
The `isolinux.cfg.arch.j2` template controls boot options for the single-arch versions of BDisk. In other words if you only build an i686 or only an x86_64 version, this is the template that would be used for BIOS boot mode.
|
||||
|
||||
The `isolinux.cfg.multi.j2` is used for multi-arch. It manages booting for both i686 and x86_64 versions.
|
||||
|
||||
These files will let you change the behaviour of booting in BIOS systems. The menu colour, the menu entries, the menu default, etc.
|
||||
|
||||
===== EFI/
|
||||
The files in here are https://www.freedesktop.org/wiki/Software/systemd/systemd-boot/[systemd-boot^] configurations. The distributed defaults are:
|
||||
|
||||
`base.conf.j2`, which controls the "main"/default entry.
|
||||
|
||||
`loader.conf.j2`, the meta configuration file which tells the loader which entry to load by default and which entries to include.
|
||||
|
||||
`ram.conf.j2` which allows BDisk to run entirely from RAM.
|
||||
|
||||
`uefi1.conf.j2` which provides a UEFI shell (for older UEFI systems).
|
||||
|
||||
`uefi2.conf.j2` which provides a UEFI shell (for newer UEFI systems).
|
||||
|
||||
===== GPG.j2
|
||||
This file contains default parameters for the https://www.gnupg.org/documentation/manuals/gnupg/Unattended-GPG-key-generation.html[GPG key generation], if we need to automatically generate a key.
|
||||
|
||||
===== iPXE/
|
||||
This directory holds templates for iPXE/mini builds.
|
||||
|
||||
The `BIOS/` directory is similar to <<bios, BIOS/>> mentioned above, but it only needs one configuration file and is a much more minimal design (since its entire purpose is to chainload to the iPXE loader).
|
||||
|
||||
The `EFI/` directory is similar to <<efi, EFI/>> above also, but needs fewer configuration files (its only purpose is to bootstrap iPXE).
|
||||
|
||||
`EMBED.j2` is the iPXE http://ipxe.org/scripting[embedded script^] (http://ipxe.org/embed[more info^]). This is what chainloads the remote resources (kernel, intird, squashed filesystem images, and so forth).
|
||||
|
||||
The `patches/` directory largely control branding of the mini ISO. They are in https://www.gnu.org/software/diffutils/manual/html_node/Unified-Format.html[unified diff^] (or "patch") format.
|
||||
|
||||
===== overlay/
|
||||
This directory contains *templated* overlays. These are intended to be templated by the user. See <<overlay, the overlay section>> for more information on how to use this. Remember to suffix your template files with the `.j2` extension.
|
||||
|
||||
===== pre-build.d/
|
||||
This directory contains *templated* overlays. These are intended to not be managed by the user, as they handle configuration necessary for building an ISO. See <<pre_build_d, the pre-build.d section>> for more information on this.
|
||||
|
||||
===== VERSION_INFO.txt.j2
|
||||
This template specifies a VERSION_INFO.txt file placed in various locations throughout the builds to help identify which version, build, etc. the ISO is.
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
<?php
|
||||
print '#!ipxe
|
||||
|
||||
cpuid --ext 29 && set bit_type 64 || set bit_type 32
|
||||
initrd example.${bit_type}.img
|
||||
kernel example.${bit_type}.kern initrd=example.${bit_type}.img ip=:::::eth0:dhcp archiso_http_srv=http://domain.tld/path/to/squashes/ archisobasedir=EXAMPLE archisolabel=EXAMPLE checksum=y
|
||||
boot
|
||||
'
|
||||
?>
|
||||
@@ -1 +0,0 @@
|
||||
NOT A REAL INITRD IMAGE. REPLACE WITH ACTUAL INITRD.
|
||||
@@ -1 +0,0 @@
|
||||
NOT A REAL KERNEL FILE. REPLACE WITH ACTUAL KERNEL
|
||||
@@ -1 +0,0 @@
|
||||
NOT A REAL INITRD IMAGE. REPLACE WITH ACTUAL INITRD.
|
||||
@@ -1 +0,0 @@
|
||||
NOT A REAL KERNEL FILE. REPLACE WITH ACTUAL KERNEL
|
||||
@@ -1 +0,0 @@
|
||||
c18bde6e20c195bfb0a018b5c13dc420 airootfs.sfs
|
||||
@@ -1 +0,0 @@
|
||||
NOT A REAL SQUASHED FILESYSTEM FILE. REPLACE WITH ACTUAL SQUASHED FILESYSTEM
|
||||
@@ -1 +0,0 @@
|
||||
ada655a13f53702b3fe13cae001ab14f741e10c2bb83869048d4c18e74111c12 airootfs.sfs
|
||||
@@ -1 +0,0 @@
|
||||
c18bde6e20c195bfb0a018b5c13dc420 airootfs.sfs
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user