mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
parent
14f300f212
commit
4958180333
1 changed files with 32 additions and 32 deletions
|
@ -220,69 +220,69 @@ class VaultEditor:
|
||||||
|
|
||||||
def __init__(self, password):
|
def __init__(self, password):
|
||||||
self.vault = VaultLib(password)
|
self.vault = VaultLib(password)
|
||||||
|
|
||||||
def _shred_file_custom(self, tmp_path):
|
def _shred_file_custom(self, tmp_path):
|
||||||
""""Destroy a file, when shred (core-utils) is not available
|
""""Destroy a file, when shred (core-utils) is not available
|
||||||
|
|
||||||
Unix `shred' destroys files "so that they can be recovered only with great difficulty with
|
Unix `shred' destroys files "so that they can be recovered only with great difficulty with
|
||||||
specialised hardware, if at all". It is based on the method from the paper
|
specialised hardware, if at all". It is based on the method from the paper
|
||||||
"Secure Deletion of Data from Magnetic and Solid-State Memory",
|
"Secure Deletion of Data from Magnetic and Solid-State Memory",
|
||||||
Proceedings of the Sixth USENIX Security Symposium (San Jose, California, July 22-25, 1996).
|
Proceedings of the Sixth USENIX Security Symposium (San Jose, California, July 22-25, 1996).
|
||||||
|
|
||||||
We do not go to that length to re-implement shred in Python; instead, overwriting with a block
|
We do not go to that length to re-implement shred in Python; instead, overwriting with a block
|
||||||
of random data should suffice.
|
of random data should suffice.
|
||||||
|
|
||||||
See https://github.com/ansible/ansible/pull/13700 .
|
See https://github.com/ansible/ansible/pull/13700 .
|
||||||
"""
|
"""
|
||||||
|
|
||||||
file_len = os.path.getsize(tmp_path)
|
file_len = os.path.getsize(tmp_path)
|
||||||
max_chunk_len = min(1024*1024*2, file_len)
|
max_chunk_len = min(1024*1024*2, file_len)
|
||||||
|
|
||||||
passes = 3
|
passes = 3
|
||||||
with open(tmp_path, "wb") as fh:
|
with open(tmp_path, "wb") as fh:
|
||||||
for _ in range(passes):
|
for _ in range(passes):
|
||||||
fh.seek(0, 0)
|
fh.seek(0, 0)
|
||||||
# get a random chunk of data, each pass with other length
|
# get a random chunk of data, each pass with other length
|
||||||
chunk_len = random.randint(max_chunk_len/2, max_chunk_len)
|
chunk_len = random.randint(max_chunk_len//2, max_chunk_len)
|
||||||
data = os.urandom(chunk_len)
|
data = os.urandom(chunk_len)
|
||||||
|
|
||||||
for _ in range(0, file_len // chunk_len):
|
for _ in range(0, file_len // chunk_len):
|
||||||
fh.write(data)
|
fh.write(data)
|
||||||
fh.write(data[:file_len % chunk_len])
|
fh.write(data[:file_len % chunk_len])
|
||||||
|
|
||||||
assert(fh.tell() == file_len) # FIXME remove this assert once we have unittests to check its accuracy
|
assert(fh.tell() == file_len) # FIXME remove this assert once we have unittests to check its accuracy
|
||||||
os.fsync(fh)
|
os.fsync(fh)
|
||||||
|
|
||||||
|
|
||||||
def _shred_file(self, tmp_path):
|
def _shred_file(self, tmp_path):
|
||||||
"""Securely destroy a decrypted file
|
"""Securely destroy a decrypted file
|
||||||
|
|
||||||
Note standard limitations of GNU shred apply (For flash, overwriting would have no effect
|
Note standard limitations of GNU shred apply (For flash, overwriting would have no effect
|
||||||
due to wear leveling; for other storage systems, the async kernel->filesystem->disk calls never
|
due to wear leveling; for other storage systems, the async kernel->filesystem->disk calls never
|
||||||
guarantee data hits the disk; etc). Furthermore, if your tmp dirs is on tmpfs (ramdisks),
|
guarantee data hits the disk; etc). Furthermore, if your tmp dirs is on tmpfs (ramdisks),
|
||||||
it is a non-issue.
|
it is a non-issue.
|
||||||
|
|
||||||
Nevertheless, some form of overwriting the data (instead of just removing the fs index entry) is
|
Nevertheless, some form of overwriting the data (instead of just removing the fs index entry) is
|
||||||
a good idea. If shred is not available (e.g. on windows, or no core-utils installed), fall back on
|
a good idea. If shred is not available (e.g. on windows, or no core-utils installed), fall back on
|
||||||
a custom shredding method.
|
a custom shredding method.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not os.path.isfile(tmp_path):
|
if not os.path.isfile(tmp_path):
|
||||||
# file is already gone
|
# file is already gone
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
r = call(['shred', tmp_path])
|
r = call(['shred', tmp_path])
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
# shred is not available on this system, or some other error occured.
|
# shred is not available on this system, or some other error occured.
|
||||||
r = 1
|
r = 1
|
||||||
|
|
||||||
if r != 0:
|
if r != 0:
|
||||||
# we could not successfully execute unix shred; therefore, do custom shred.
|
# we could not successfully execute unix shred; therefore, do custom shred.
|
||||||
self._shred_file_custom(tmp_path)
|
self._shred_file_custom(tmp_path)
|
||||||
|
|
||||||
os.remove(tmp_path)
|
os.remove(tmp_path)
|
||||||
|
|
||||||
def _edit_file_helper(self, filename, existing_data=None, force_save=False):
|
def _edit_file_helper(self, filename, existing_data=None, force_save=False):
|
||||||
|
|
||||||
# Create a tempfile
|
# Create a tempfile
|
||||||
|
@ -294,11 +294,11 @@ class VaultEditor:
|
||||||
# drop the user into an editor on the tmp file
|
# drop the user into an editor on the tmp file
|
||||||
try:
|
try:
|
||||||
call(self._editor_shell_command(tmp_path))
|
call(self._editor_shell_command(tmp_path))
|
||||||
except:
|
except:
|
||||||
# whatever happens, destroy the decrypted file
|
# whatever happens, destroy the decrypted file
|
||||||
self._shred_file(tmp_path)
|
self._shred_file(tmp_path)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
tmpdata = self.read_data(tmp_path)
|
tmpdata = self.read_data(tmp_path)
|
||||||
|
|
||||||
# Do nothing if the content has not changed
|
# Do nothing if the content has not changed
|
||||||
|
|
Loading…
Add table
Reference in a new issue