Skip to content

Commit 9d03f50

Browse files
authored
Merge branch 'develop' into feature/data-renderer
2 parents 07b0503 + 81bac4d commit 9d03f50

File tree

7 files changed

+384
-59
lines changed

7 files changed

+384
-59
lines changed

volatility3/framework/plugins/linux/pagecache.py

Lines changed: 262 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,15 @@
55
import math
66
import logging
77
import datetime
8+
import time
9+
import tarfile
810
from dataclasses import dataclass, astuple
911
from typing import IO, List, Set, Type, Iterable, Tuple
12+
from io import BytesIO
13+
from pathlib import PurePath
1014

1115
from volatility3.framework.constants import architectures
12-
from volatility3.framework import renderers, interfaces, exceptions
16+
from volatility3.framework import constants, renderers, interfaces, exceptions
1317
from volatility3.framework.renderers import format_hints
1418
from volatility3.framework.interfaces import plugins
1519
from volatility3.framework.configuration import requirements
@@ -625,3 +629,260 @@ def run(self):
625629
return renderers.TreeGrid(
626630
headers, Files.format_fields_with_headers(headers, self._generator())
627631
)
632+
633+
634+
class RecoverFs(plugins.PluginInterface):
635+
"""Recovers the cached filesystem (directories, files, symlinks) into a compressed tarball.
636+
637+
Details: level 0 directories are named after the UUID of the parent superblock; metadata aren't replicated to extracted objects; objects modification time is set to the plugin run time; absolute symlinks
638+
are converted to relative symlinks to prevent referencing the analyst's filesystem.
639+
Troubleshooting: to fix extraction errors related to long paths, please consider using https://github.com/mxmlnkn/ratarmount.
640+
"""
641+
642+
_version = (1, 0, 0)
643+
_required_framework_version = (2, 21, 0)
644+
645+
@classmethod
646+
def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface]:
647+
return [
648+
requirements.ModuleRequirement(
649+
name="kernel",
650+
description="Linux kernel",
651+
architectures=architectures.LINUX_ARCHS,
652+
),
653+
requirements.PluginRequirement(
654+
name="files", plugin=Files, version=(1, 1, 0)
655+
),
656+
requirements.PluginRequirement(
657+
name="inodepages", plugin=InodePages, version=(3, 0, 0)
658+
),
659+
requirements.ChoiceRequirement(
660+
name="compression_format",
661+
description="Compression format (default: gz)",
662+
choices=["gz", "bz2", "xz"],
663+
default="gz",
664+
optional=True,
665+
),
666+
]
667+
668+
def _tar_add_reg_inode(
669+
self,
670+
context: interfaces.context.ContextInterface,
671+
layer_name: str,
672+
tar: tarfile.TarFile,
673+
reg_inode_in: InodeInternal,
674+
path_prefix: str = "",
675+
mtime: float = None,
676+
) -> int:
677+
"""Extracts a REG inode content and writes it to a TarFile object.
678+
679+
Args:
680+
context: The context on which to operate
681+
layer_name: The name of the layer on which to operate
682+
tar: The TarFile object to write to
683+
reg_inode_in: The inode to extract content from
684+
path_prefix: A custom path prefix to prepend the inode path with
685+
mtime: The modification time to set the TarInfo object to
686+
687+
Returns:
688+
The number of extracted bytes
689+
"""
690+
inode_content_buffer = BytesIO()
691+
InodePages.write_inode_content_to_stream(
692+
context, layer_name, reg_inode_in.inode, inode_content_buffer
693+
)
694+
inode_content_buffer.seek(0)
695+
handle_buffer_size = inode_content_buffer.getbuffer().nbytes
696+
697+
tar_info = tarfile.TarInfo(path_prefix + reg_inode_in.path)
698+
# The tarfile module only has read support for sparse files:
699+
# https://docs.python.org/3.12/library/tarfile.html#tarfile.LNKTYPE:~:text=and%20longlink%20extensions%2C-,read%2Donly%20support,-for%20all%20variants
700+
tar_info.type = tarfile.REGTYPE
701+
tar_info.size = handle_buffer_size
702+
tar_info.mode = 0o444
703+
if mtime is not None:
704+
tar_info.mtime = mtime
705+
tar.addfile(tar_info, inode_content_buffer)
706+
707+
return handle_buffer_size
708+
709+
def _tar_add_dir(
710+
self,
711+
tar: tarfile.TarFile,
712+
directory_path: str,
713+
mtime: float = None,
714+
) -> None:
715+
"""Adds a directory path to a TarFile object, based on a DIR inode.
716+
717+
Args:
718+
tar: The TarFile object to write to
719+
directory_path: The directory path to create
720+
mtime: The modification time to set the TarInfo object to
721+
"""
722+
tar_info = tarfile.TarInfo(directory_path)
723+
tar_info.type = tarfile.DIRTYPE
724+
tar_info.mode = 0o755
725+
if mtime is not None:
726+
tar_info.mtime = mtime
727+
tar.addfile(tar_info)
728+
729+
def _tar_add_lnk(
730+
self,
731+
tar: tarfile.TarFile,
732+
symlink_source: str,
733+
symlink_dest: str,
734+
symlink_source_prefix: str = "",
735+
mtime: float = None,
736+
) -> None:
737+
"""Adds a symlink to a TarFile object.
738+
739+
Args:
740+
tar: The TarFile object to write to
741+
symlink_source: The symlink source path
742+
symlink_dest: The symlink target/destination
743+
symlink_source_prefix: A custom path prefix to prepend the symlink source with
744+
mtime: The modification time to set the TarInfo object to
745+
"""
746+
# Patch symlinks pointing to absolute paths,
747+
# to prevent referencing the host filesystem.
748+
if symlink_dest.startswith("/"):
749+
relative_dest = PurePath(symlink_dest).relative_to(PurePath("/"))
750+
# Remove the leading "/" to prevent an extra undesired "../" in the output
751+
symlink_dest = (
752+
PurePath(
753+
*[".."] * len(PurePath(symlink_source.lstrip("/")).parent.parts)
754+
)
755+
/ relative_dest
756+
).as_posix()
757+
tar_info = tarfile.TarInfo(symlink_source_prefix + symlink_source)
758+
tar_info.type = tarfile.SYMTYPE
759+
tar_info.linkname = symlink_dest
760+
tar_info.mode = 0o444
761+
if mtime is not None:
762+
tar_info.mtime = mtime
763+
tar.addfile(tar_info)
764+
765+
def _generator(self):
766+
vmlinux_module_name = self.config["kernel"]
767+
vmlinux = self.context.modules[vmlinux_module_name]
768+
vmlinux_layer = self.context.layers[vmlinux.layer_name]
769+
tar_buffer = BytesIO()
770+
tar = tarfile.open(
771+
fileobj=tar_buffer,
772+
mode=f"w:{self.config['compression_format']}",
773+
)
774+
# Set a unique timestamp for all extracted files
775+
mtime = time.time()
776+
777+
inodes_iter = Files.get_inodes(
778+
context=self.context,
779+
vmlinux_module_name=vmlinux_module_name,
780+
follow_symlinks=False,
781+
)
782+
783+
# Prefix paths with the superblock UUID's to prevent overlaps.
784+
# Switch to device major and device minor for older kernels (< 2.6.39-rc1).
785+
uuid_as_prefix = vmlinux.get_type("super_block").has_member("s_uuid")
786+
if not uuid_as_prefix:
787+
vollog.warning(
788+
"super_block struct does not support s_uuid attribute. Consequently, level 0 directories won't refer to the superblock uuid's, but to its device_major:device_minor numbers."
789+
)
790+
791+
visited_paths = seen_prefixes = set()
792+
for inode_in in inodes_iter:
793+
794+
# Code is slightly duplicated here with the if-block below.
795+
# However this prevents unneeded tar manipulation if fifo
796+
# or sock inodes come through for example.
797+
if not (
798+
inode_in.inode.is_reg or inode_in.inode.is_dir or inode_in.inode.is_link
799+
):
800+
continue
801+
802+
if not inode_in.path.startswith("/"):
803+
vollog.debug(
804+
f'Skipping processing of potentially smeared "{inode_in.path}" inode name as it does not starts with a "/".'
805+
)
806+
continue
807+
808+
# Construct the output path
809+
if uuid_as_prefix:
810+
prefix = f"/{inode_in.superblock.uuid}"
811+
else:
812+
prefix = f"/{inode_in.superblock.major}:{inode_in.superblock.minor}"
813+
prefixed_path = prefix + inode_in.path
814+
815+
# Sanity check for already processed paths
816+
if prefixed_path in visited_paths:
817+
vollog.log(
818+
constants.LOGLEVEL_VV,
819+
f'Already processed prefixed inode path: "{prefixed_path}".',
820+
)
821+
continue
822+
elif prefix not in seen_prefixes:
823+
self._tar_add_dir(tar, prefix, mtime)
824+
seen_prefixes.add(prefix)
825+
826+
visited_paths.add(prefixed_path)
827+
extracted_file_size = renderers.NotApplicableValue()
828+
829+
# Inodes parent directory is yielded first, which
830+
# ensures that a file parent path will exist beforehand.
831+
# tarfile will take care of creating it anyway.
832+
if inode_in.inode.is_reg:
833+
extracted_file_size = self._tar_add_reg_inode(
834+
self.context,
835+
vmlinux_layer.name,
836+
tar,
837+
inode_in,
838+
prefix,
839+
mtime,
840+
)
841+
elif inode_in.inode.is_dir:
842+
self._tar_add_dir(tar, prefixed_path, mtime)
843+
elif (
844+
inode_in.inode.is_link
845+
and inode_in.inode.has_member("i_link")
846+
and inode_in.inode.i_link
847+
and inode_in.inode.i_link.is_readable()
848+
):
849+
symlink_dest = inode_in.inode.i_link.dereference().cast(
850+
"string", max_length=255, encoding="utf-8", errors="replace"
851+
)
852+
self._tar_add_lnk(tar, inode_in.path, symlink_dest, prefix, mtime)
853+
# Set path to a user friendly representation before yielding
854+
inode_in.path = InodeUser.format_symlink(inode_in.path, symlink_dest)
855+
else:
856+
continue
857+
858+
inode_out = inode_in.to_user(vmlinux_layer)
859+
yield (0, astuple(inode_out) + (extracted_file_size,))
860+
861+
tar.close()
862+
tar_buffer.seek(0)
863+
output_filename = f"recovered_fs.tar.{self.config['compression_format']}"
864+
with self.open(output_filename) as f:
865+
f.write(tar_buffer.getvalue())
866+
867+
def run(self):
868+
headers = [
869+
("SuperblockAddr", format_hints.Hex),
870+
("MountPoint", str),
871+
("Device", str),
872+
("InodeNum", int),
873+
("InodeAddr", format_hints.Hex),
874+
("FileType", str),
875+
("InodePages", int),
876+
("CachedPages", int),
877+
("FileMode", str),
878+
("AccessTime", datetime.datetime),
879+
("ModificationTime", datetime.datetime),
880+
("ChangeTime", datetime.datetime),
881+
("FilePath", str),
882+
("InodeSize", int),
883+
("Recovered FileSize", int),
884+
]
885+
886+
return renderers.TreeGrid(
887+
headers, Files.format_fields_with_headers(headers, self._generator())
888+
)

volatility3/framework/plugins/windows/cmdline.py

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@ def _generator(self, procs):
7070
for proc in procs:
7171
process_name = utility.array_to_string(proc.ImageFileName)
7272
proc_id = "Unknown"
73+
result_text = None
7374

7475
try:
7576
proc_id = proc.UniqueProcessId
@@ -78,13 +79,22 @@ def _generator(self, procs):
7879
)
7980

8081
except exceptions.SwappedInvalidAddressException as exp:
81-
result_text = f"Required memory at {exp.invalid_address:#x} is inaccessible (swapped)"
82+
vollog.debug(
83+
f"Required memory at {exp.invalid_address:#x} is inaccessible (swapped)"
84+
)
8285

8386
except exceptions.PagedInvalidAddressException as exp:
84-
result_text = f"Required memory at {exp.invalid_address:#x} is not valid (process exited?)"
87+
vollog.debug(
88+
f"Required memory at {exp.invalid_address:#x} is not valid (process exited?)"
89+
)
8590

8691
except exceptions.InvalidAddressException as exp:
87-
result_text = f"Process {proc_id}: Required memory at {exp.invalid_address:#x} is not valid (incomplete layer {exp.layer_name}?)"
92+
vollog.debug(
93+
f"Process {proc_id}: Required memory at {exp.invalid_address:#x} is not valid (incomplete layer {exp.layer_name}?)"
94+
)
95+
96+
if not result_text:
97+
result_text = renderers.UnreadableValue()
8898

8999
yield (0, (proc.UniqueProcessId, process_name, result_text))
90100

volatility3/framework/plugins/windows/dumpfiles.py

Lines changed: 27 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -44,14 +44,16 @@ def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface]
4444
description="Process ID to include (all other processes are excluded)",
4545
optional=True,
4646
),
47-
requirements.IntRequirement(
47+
requirements.ListRequirement(
4848
name="virtaddr",
49-
description="Dump a single _FILE_OBJECT at this virtual address",
49+
element_type=int,
50+
description="Dump the _FILE_OBJECTs at the given virtual address(es)",
5051
optional=True,
5152
),
52-
requirements.IntRequirement(
53+
requirements.ListRequirement(
5354
name="physaddr",
54-
description="Dump a single _FILE_OBJECT at this physical address",
55+
element_type=int,
56+
description="Dump a single _FILE_OBJECTs at the given physical address(es)",
5557
optional=True,
5658
),
5759
requirements.StringRequirement(
@@ -318,24 +320,26 @@ def _generator(self, procs: List, offsets: List):
318320
)
319321

320322
elif offsets:
323+
virtual_layer_name = kernel.layer_name
324+
325+
# FIXME - change this after standard access to physical layer
326+
physical_layer_name = self.context.layers[virtual_layer_name].config[
327+
"memory_layer"
328+
]
329+
321330
# Now process any offsets explicitly requested by the user.
322331
for offset, is_virtual in offsets:
323332
try:
324-
layer_name = kernel.layer_name
325-
# switch to a memory layer if the user provided --physaddr instead of --virtaddr
326-
if not is_virtual:
327-
layer_name = self.context.layers[layer_name].config[
328-
"memory_layer"
329-
]
330-
331333
file_obj = self.context.object(
332334
kernel.symbol_table_name + constants.BANG + "_FILE_OBJECT",
333-
layer_name=layer_name,
334-
native_layer_name=kernel.layer_name,
335+
layer_name=(
336+
virtual_layer_name if is_virtual else physical_layer_name
337+
),
338+
native_layer_name=virtual_layer_name,
335339
offset=offset,
336340
)
337341
for result in self.process_file_object(
338-
self.context, kernel.layer_name, self.open, file_obj
342+
self.context, virtual_layer_name, self.open, file_obj
339343
):
340344
yield (0, result)
341345
except exceptions.InvalidAddressException:
@@ -355,11 +359,15 @@ def run(self):
355359
):
356360
raise ValueError("Cannot use filter flag with an address flag")
357361

358-
if self.config.get("virtaddr", None) is not None:
359-
offsets.append((self.config["virtaddr"], True))
360-
elif self.config.get("physaddr", None) is not None:
361-
offsets.append((self.config["physaddr"], False))
362-
else:
362+
if self.config.get("virtaddr"):
363+
for virtaddr in self.config["virtaddr"]:
364+
offsets.append((virtaddr, True))
365+
366+
if self.config.get("physaddr"):
367+
for physaddr in self.config["physaddr"]:
368+
offsets.append((physaddr, False))
369+
370+
if not offsets:
363371
filter_func = pslist.PsList.create_pid_filter(
364372
[self.config.get("pid", None)]
365373
)

0 commit comments

Comments
 (0)