|
5 | 5 | import math
|
6 | 6 | import logging
|
7 | 7 | import datetime
|
| 8 | +import time |
| 9 | +import tarfile |
8 | 10 | from dataclasses import dataclass, astuple
|
9 | 11 | from typing import IO, List, Set, Type, Iterable, Tuple
|
| 12 | +from io import BytesIO |
| 13 | +from pathlib import PurePath |
10 | 14 |
|
11 | 15 | from volatility3.framework.constants import architectures
|
12 |
| -from volatility3.framework import renderers, interfaces, exceptions |
| 16 | +from volatility3.framework import constants, renderers, interfaces, exceptions |
13 | 17 | from volatility3.framework.renderers import format_hints
|
14 | 18 | from volatility3.framework.interfaces import plugins
|
15 | 19 | from volatility3.framework.configuration import requirements
|
@@ -625,3 +629,260 @@ def run(self):
|
625 | 629 | return renderers.TreeGrid(
|
626 | 630 | headers, Files.format_fields_with_headers(headers, self._generator())
|
627 | 631 | )
|
| 632 | + |
| 633 | + |
| 634 | +class RecoverFs(plugins.PluginInterface): |
| 635 | + """Recovers the cached filesystem (directories, files, symlinks) into a compressed tarball. |
| 636 | +
|
| 637 | + Details: level 0 directories are named after the UUID of the parent superblock; metadata aren't replicated to extracted objects; objects modification time is set to the plugin run time; absolute symlinks |
| 638 | + are converted to relative symlinks to prevent referencing the analyst's filesystem. |
| 639 | + Troubleshooting: to fix extraction errors related to long paths, please consider using https://github.com/mxmlnkn/ratarmount. |
| 640 | + """ |
| 641 | + |
| 642 | + _version = (1, 0, 0) |
| 643 | + _required_framework_version = (2, 21, 0) |
| 644 | + |
| 645 | + @classmethod |
| 646 | + def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface]: |
| 647 | + return [ |
| 648 | + requirements.ModuleRequirement( |
| 649 | + name="kernel", |
| 650 | + description="Linux kernel", |
| 651 | + architectures=architectures.LINUX_ARCHS, |
| 652 | + ), |
| 653 | + requirements.PluginRequirement( |
| 654 | + name="files", plugin=Files, version=(1, 1, 0) |
| 655 | + ), |
| 656 | + requirements.PluginRequirement( |
| 657 | + name="inodepages", plugin=InodePages, version=(3, 0, 0) |
| 658 | + ), |
| 659 | + requirements.ChoiceRequirement( |
| 660 | + name="compression_format", |
| 661 | + description="Compression format (default: gz)", |
| 662 | + choices=["gz", "bz2", "xz"], |
| 663 | + default="gz", |
| 664 | + optional=True, |
| 665 | + ), |
| 666 | + ] |
| 667 | + |
| 668 | + def _tar_add_reg_inode( |
| 669 | + self, |
| 670 | + context: interfaces.context.ContextInterface, |
| 671 | + layer_name: str, |
| 672 | + tar: tarfile.TarFile, |
| 673 | + reg_inode_in: InodeInternal, |
| 674 | + path_prefix: str = "", |
| 675 | + mtime: float = None, |
| 676 | + ) -> int: |
| 677 | + """Extracts a REG inode content and writes it to a TarFile object. |
| 678 | +
|
| 679 | + Args: |
| 680 | + context: The context on which to operate |
| 681 | + layer_name: The name of the layer on which to operate |
| 682 | + tar: The TarFile object to write to |
| 683 | + reg_inode_in: The inode to extract content from |
| 684 | + path_prefix: A custom path prefix to prepend the inode path with |
| 685 | + mtime: The modification time to set the TarInfo object to |
| 686 | +
|
| 687 | + Returns: |
| 688 | + The number of extracted bytes |
| 689 | + """ |
| 690 | + inode_content_buffer = BytesIO() |
| 691 | + InodePages.write_inode_content_to_stream( |
| 692 | + context, layer_name, reg_inode_in.inode, inode_content_buffer |
| 693 | + ) |
| 694 | + inode_content_buffer.seek(0) |
| 695 | + handle_buffer_size = inode_content_buffer.getbuffer().nbytes |
| 696 | + |
| 697 | + tar_info = tarfile.TarInfo(path_prefix + reg_inode_in.path) |
| 698 | + # The tarfile module only has read support for sparse files: |
| 699 | + # https://docs.python.org/3.12/library/tarfile.html#tarfile.LNKTYPE:~:text=and%20longlink%20extensions%2C-,read%2Donly%20support,-for%20all%20variants |
| 700 | + tar_info.type = tarfile.REGTYPE |
| 701 | + tar_info.size = handle_buffer_size |
| 702 | + tar_info.mode = 0o444 |
| 703 | + if mtime is not None: |
| 704 | + tar_info.mtime = mtime |
| 705 | + tar.addfile(tar_info, inode_content_buffer) |
| 706 | + |
| 707 | + return handle_buffer_size |
| 708 | + |
| 709 | + def _tar_add_dir( |
| 710 | + self, |
| 711 | + tar: tarfile.TarFile, |
| 712 | + directory_path: str, |
| 713 | + mtime: float = None, |
| 714 | + ) -> None: |
| 715 | + """Adds a directory path to a TarFile object, based on a DIR inode. |
| 716 | +
|
| 717 | + Args: |
| 718 | + tar: The TarFile object to write to |
| 719 | + directory_path: The directory path to create |
| 720 | + mtime: The modification time to set the TarInfo object to |
| 721 | + """ |
| 722 | + tar_info = tarfile.TarInfo(directory_path) |
| 723 | + tar_info.type = tarfile.DIRTYPE |
| 724 | + tar_info.mode = 0o755 |
| 725 | + if mtime is not None: |
| 726 | + tar_info.mtime = mtime |
| 727 | + tar.addfile(tar_info) |
| 728 | + |
| 729 | + def _tar_add_lnk( |
| 730 | + self, |
| 731 | + tar: tarfile.TarFile, |
| 732 | + symlink_source: str, |
| 733 | + symlink_dest: str, |
| 734 | + symlink_source_prefix: str = "", |
| 735 | + mtime: float = None, |
| 736 | + ) -> None: |
| 737 | + """Adds a symlink to a TarFile object. |
| 738 | +
|
| 739 | + Args: |
| 740 | + tar: The TarFile object to write to |
| 741 | + symlink_source: The symlink source path |
| 742 | + symlink_dest: The symlink target/destination |
| 743 | + symlink_source_prefix: A custom path prefix to prepend the symlink source with |
| 744 | + mtime: The modification time to set the TarInfo object to |
| 745 | + """ |
| 746 | + # Patch symlinks pointing to absolute paths, |
| 747 | + # to prevent referencing the host filesystem. |
| 748 | + if symlink_dest.startswith("/"): |
| 749 | + relative_dest = PurePath(symlink_dest).relative_to(PurePath("/")) |
| 750 | + # Remove the leading "/" to prevent an extra undesired "../" in the output |
| 751 | + symlink_dest = ( |
| 752 | + PurePath( |
| 753 | + *[".."] * len(PurePath(symlink_source.lstrip("/")).parent.parts) |
| 754 | + ) |
| 755 | + / relative_dest |
| 756 | + ).as_posix() |
| 757 | + tar_info = tarfile.TarInfo(symlink_source_prefix + symlink_source) |
| 758 | + tar_info.type = tarfile.SYMTYPE |
| 759 | + tar_info.linkname = symlink_dest |
| 760 | + tar_info.mode = 0o444 |
| 761 | + if mtime is not None: |
| 762 | + tar_info.mtime = mtime |
| 763 | + tar.addfile(tar_info) |
| 764 | + |
| 765 | + def _generator(self): |
| 766 | + vmlinux_module_name = self.config["kernel"] |
| 767 | + vmlinux = self.context.modules[vmlinux_module_name] |
| 768 | + vmlinux_layer = self.context.layers[vmlinux.layer_name] |
| 769 | + tar_buffer = BytesIO() |
| 770 | + tar = tarfile.open( |
| 771 | + fileobj=tar_buffer, |
| 772 | + mode=f"w:{self.config['compression_format']}", |
| 773 | + ) |
| 774 | + # Set a unique timestamp for all extracted files |
| 775 | + mtime = time.time() |
| 776 | + |
| 777 | + inodes_iter = Files.get_inodes( |
| 778 | + context=self.context, |
| 779 | + vmlinux_module_name=vmlinux_module_name, |
| 780 | + follow_symlinks=False, |
| 781 | + ) |
| 782 | + |
| 783 | + # Prefix paths with the superblock UUID's to prevent overlaps. |
| 784 | + # Switch to device major and device minor for older kernels (< 2.6.39-rc1). |
| 785 | + uuid_as_prefix = vmlinux.get_type("super_block").has_member("s_uuid") |
| 786 | + if not uuid_as_prefix: |
| 787 | + vollog.warning( |
| 788 | + "super_block struct does not support s_uuid attribute. Consequently, level 0 directories won't refer to the superblock uuid's, but to its device_major:device_minor numbers." |
| 789 | + ) |
| 790 | + |
| 791 | + visited_paths = seen_prefixes = set() |
| 792 | + for inode_in in inodes_iter: |
| 793 | + |
| 794 | + # Code is slightly duplicated here with the if-block below. |
| 795 | + # However this prevents unneeded tar manipulation if fifo |
| 796 | + # or sock inodes come through for example. |
| 797 | + if not ( |
| 798 | + inode_in.inode.is_reg or inode_in.inode.is_dir or inode_in.inode.is_link |
| 799 | + ): |
| 800 | + continue |
| 801 | + |
| 802 | + if not inode_in.path.startswith("/"): |
| 803 | + vollog.debug( |
| 804 | + f'Skipping processing of potentially smeared "{inode_in.path}" inode name as it does not starts with a "/".' |
| 805 | + ) |
| 806 | + continue |
| 807 | + |
| 808 | + # Construct the output path |
| 809 | + if uuid_as_prefix: |
| 810 | + prefix = f"/{inode_in.superblock.uuid}" |
| 811 | + else: |
| 812 | + prefix = f"/{inode_in.superblock.major}:{inode_in.superblock.minor}" |
| 813 | + prefixed_path = prefix + inode_in.path |
| 814 | + |
| 815 | + # Sanity check for already processed paths |
| 816 | + if prefixed_path in visited_paths: |
| 817 | + vollog.log( |
| 818 | + constants.LOGLEVEL_VV, |
| 819 | + f'Already processed prefixed inode path: "{prefixed_path}".', |
| 820 | + ) |
| 821 | + continue |
| 822 | + elif prefix not in seen_prefixes: |
| 823 | + self._tar_add_dir(tar, prefix, mtime) |
| 824 | + seen_prefixes.add(prefix) |
| 825 | + |
| 826 | + visited_paths.add(prefixed_path) |
| 827 | + extracted_file_size = renderers.NotApplicableValue() |
| 828 | + |
| 829 | + # Inodes parent directory is yielded first, which |
| 830 | + # ensures that a file parent path will exist beforehand. |
| 831 | + # tarfile will take care of creating it anyway. |
| 832 | + if inode_in.inode.is_reg: |
| 833 | + extracted_file_size = self._tar_add_reg_inode( |
| 834 | + self.context, |
| 835 | + vmlinux_layer.name, |
| 836 | + tar, |
| 837 | + inode_in, |
| 838 | + prefix, |
| 839 | + mtime, |
| 840 | + ) |
| 841 | + elif inode_in.inode.is_dir: |
| 842 | + self._tar_add_dir(tar, prefixed_path, mtime) |
| 843 | + elif ( |
| 844 | + inode_in.inode.is_link |
| 845 | + and inode_in.inode.has_member("i_link") |
| 846 | + and inode_in.inode.i_link |
| 847 | + and inode_in.inode.i_link.is_readable() |
| 848 | + ): |
| 849 | + symlink_dest = inode_in.inode.i_link.dereference().cast( |
| 850 | + "string", max_length=255, encoding="utf-8", errors="replace" |
| 851 | + ) |
| 852 | + self._tar_add_lnk(tar, inode_in.path, symlink_dest, prefix, mtime) |
| 853 | + # Set path to a user friendly representation before yielding |
| 854 | + inode_in.path = InodeUser.format_symlink(inode_in.path, symlink_dest) |
| 855 | + else: |
| 856 | + continue |
| 857 | + |
| 858 | + inode_out = inode_in.to_user(vmlinux_layer) |
| 859 | + yield (0, astuple(inode_out) + (extracted_file_size,)) |
| 860 | + |
| 861 | + tar.close() |
| 862 | + tar_buffer.seek(0) |
| 863 | + output_filename = f"recovered_fs.tar.{self.config['compression_format']}" |
| 864 | + with self.open(output_filename) as f: |
| 865 | + f.write(tar_buffer.getvalue()) |
| 866 | + |
| 867 | + def run(self): |
| 868 | + headers = [ |
| 869 | + ("SuperblockAddr", format_hints.Hex), |
| 870 | + ("MountPoint", str), |
| 871 | + ("Device", str), |
| 872 | + ("InodeNum", int), |
| 873 | + ("InodeAddr", format_hints.Hex), |
| 874 | + ("FileType", str), |
| 875 | + ("InodePages", int), |
| 876 | + ("CachedPages", int), |
| 877 | + ("FileMode", str), |
| 878 | + ("AccessTime", datetime.datetime), |
| 879 | + ("ModificationTime", datetime.datetime), |
| 880 | + ("ChangeTime", datetime.datetime), |
| 881 | + ("FilePath", str), |
| 882 | + ("InodeSize", int), |
| 883 | + ("Recovered FileSize", int), |
| 884 | + ] |
| 885 | + |
| 886 | + return renderers.TreeGrid( |
| 887 | + headers, Files.format_fields_with_headers(headers, self._generator()) |
| 888 | + ) |
0 commit comments