diff --git a/linux-ramdump-parser-v2/aarch64iommulib.py b/linux-ramdump-parser-v2/aarch64iommulib.py new file mode 100755 index 0000000000000000000000000000000000000000..de13e0bec26b29337b38a408ae584d7d9e091228 --- /dev/null +++ b/linux-ramdump-parser-v2/aarch64iommulib.py @@ -0,0 +1,385 @@ +# Copyright (c) 2016, The Linux Foundation. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 and +# only version 2 as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +from sizes import SZ_4K, SZ_64K, SZ_2M, SZ_32M, SZ_1G, SZ_256G +from sizes import get_order, order_size_strings + +NUM_PT_LEVEL = 4 +NUM_FL_PTE = 512 +NUM_SL_PTE = 512 +NUM_TL_PTE = 512 +NUM_LL_PTE = 512 + +FLSL_BASE_MASK = 0xFFFFFFFFF000 +FLSL_TYPE_BLOCK = (1 << 0) +FLSL_TYPE_TABLE = (3 << 0) +FLSL_PTE_TYPE_MASK = (3 << 0) + +LL_TYPE_PAGE = (3 << 0) +LL_PAGE_MASK = 0xFFFFFFFFF000 + +LL_AP_BITS = (0x3 << 6) +LL_CH = (0x1 << 52) + +LL_AP_RO = (0x3 << 6) +LL_AP_RW = (0x1 << 6) +LL_AP_PR_RW = (0x0 << 6) +LL_AP_PR_RO = (0x2 << 6) + + +class FlatMapping(object): + + def __init__(self, virt, phys=-1, type='[]', size=SZ_4K, mapped=False): + self.virt = virt + self.phys = phys + self.type = type + self.map_size = size + self.mapped = mapped + + +class CollapsedMapping(object): + def __init__(self, virt_start, virt_end, phys_start=-1, phys_end=-1, + map_type='[]', map_size=SZ_4K, mapped=False): + + self.virt_start = virt_start + self.virt_end = virt_end - 1 + self.phys_start = phys_start + self.phys_end = phys_end - 1 + self.map_type = map_type + self.map_size = map_size + self.mapped = mapped + + +def add_collapsed_mapping(mappings, virt_start, virt_end, phys_start, phys_end, + map_type, map_size, mapped): + map = CollapsedMapping(virt_start, virt_end, phys_start, phys_end, + map_type, map_size, mapped) + + if virt_start not in mappings: + mappings[virt_start] = map + else: + map.type = 'Duplicate' + mappings[virt_start] = map + + return mappings + + +def create_collapsed_mapping(flat_mapping): + collapsed_mapping = {} + + if len(flat_mapping.keys()) > 0: + virt_addrs = sorted(flat_mapping.keys()) + start_map = prev_map = flat_mapping[virt_addrs[0]] + last_mapping = False + new_mapping = False + + for virt in virt_addrs[1:]: + map = flat_mapping[virt] + + if map.map_size == prev_map.map_size \ + and map.type == prev_map.type \ + and map.mapped == prev_map.mapped \ + and not map.mapped: + new_mapping = False + + if virt == virt_addrs[-1]: + last_mapping = True + + else: + new_mapping = True + + if new_mapping: + collapsed_mapping = add_collapsed_mapping( + collapsed_mapping, start_map.virt, + map.virt, start_map.phys, + start_map.phys + start_map.map_size, + start_map.type, start_map.map_size, start_map.mapped) + start_map = map + + elif last_mapping: + collapsed_mapping = add_collapsed_mapping( + collapsed_mapping, start_map.virt, + 0xFFFFFFFFFFFF + 1, start_map.phys, + start_map.phys + start_map.map_size, + start_map.type, start_map.map_size, start_map.mapped) + + prev_map = map + return collapsed_mapping + + +def add_flat_mapping(mappings, fl_idx, sl_idx, tl_idx, ll_idx, + phy_addr, map_type, page_size, mapped): + virt = (fl_idx << 39) | (sl_idx << 30) | (tl_idx << 21) | (ll_idx << 12) + map_type_str = '[R/W]' + + if map_type == LL_AP_RO: + map_type_str = '[RO]' + elif map_type == LL_AP_PR_RW: + map_type_str = '[P R/W]' + elif map_type == LL_AP_PR_RO: + map_type_str = '[P RO]' + + map = FlatMapping(virt, phy_addr, map_type_str, page_size, mapped) + + if virt not in mappings: + mappings[virt] = map + else: + map.type = 'Duplicate' + mappings[virt] = map + + return mappings + + +def get_super_section_mapping_info(ramdump, pg_table, index): + phy_addr = ramdump.read_u64(pg_table, False) + current_phy_addr = -1 + current_page_size = SZ_1G + current_map_type = 0 + status = True + + if phy_addr is not None: + current_map_type = phy_addr & LL_AP_BITS + current_phy_addr = phy_addr & 0xFFFFC0000000 + else: + status = False + + return (current_phy_addr, current_page_size, current_map_type, status) + + +def get_section_mapping_info(ramdump, pg_table, index): + phy_addr = ramdump.read_u64(pg_table, False) + current_phy_addr = -1 + current_page_size = SZ_2M + current_map_type = 0 + status = True + section_skip_count = 0 + + if phy_addr is not None: + current_map_type = phy_addr & LL_AP_BITS + + if phy_addr & LL_CH: + current_phy_addr = phy_addr & 0xFFFFFE000000 + current_page_size = SZ_32M + section_skip_count = 15 + # Current + next 15 entries are contiguous + else: + current_phy_addr = phy_addr & 0xFFFFFFE00000 + current_page_size = SZ_2M + + return (current_phy_addr, current_page_size, current_map_type, + status, section_skip_count) + + +def get_mapping_info(ramdump, pg_table, index): + ll_pte = pg_table + (index * 8) + phy_addr = ramdump.read_u64(ll_pte, False) + current_phy_addr = -1 + current_page_size = SZ_4K + current_map_type = 0 + status = True + skip_count = 0 + + if phy_addr is not None: + current_map_type = phy_addr & LL_AP_BITS + + if phy_addr & LL_TYPE_PAGE: + current_phy_addr = phy_addr & 0xFFFFFFFFF000 + if phy_addr & LL_CH: + current_phy_addr = phy_addr & 0xFFFFFFFF0000 + current_page_size = SZ_64K + skip_count = 15 + # Current + next 15 entries are contiguous + + elif phy_addr != 0: + # Error condition if at last level it is not LL_TYPE_PAGE + current_phy_addr = phy_addr + status = False + return (current_phy_addr, current_page_size, current_map_type, + status, skip_count) + + +def fl_entry(ramdump, fl_pte, skip_fl): + fl_pg_table_entry = ramdump.read_u64(fl_pte) + sl_pte = fl_pg_table_entry & FLSL_BASE_MASK + + if skip_fl == 1: + fl_pg_table_entry = FLSL_TYPE_TABLE + sl_pte = fl_pte + + # Make 1st level entry look like dummy entry of type table in + # case of only 3 level page tables and make sl_pte = fl_pte + # as we start parsing from second level. + + return (fl_pg_table_entry, sl_pte) + + +def parse_2nd_level_table(ramdump, sl_pg_table_entry, fl_index, + sl_index, tmp_mapping): + tl_pte = sl_pg_table_entry & FLSL_BASE_MASK + section_skip_count = 0 + + for tl_index in range(0, NUM_TL_PTE): + tl_pg_table_entry = ramdump.read_u64(tl_pte, False) + + if tl_pg_table_entry == 0 or tl_pg_table_entry is None: + tmp_mapping = add_flat_mapping( + tmp_mapping, fl_index, sl_index, + tl_index, 0, -1, + -1, SZ_2M, False) + tl_pte += 8 + continue + + tl_entry_type = tl_pg_table_entry & FLSL_PTE_TYPE_MASK + if tl_entry_type == FLSL_TYPE_TABLE: + ll_pte = tl_pg_table_entry & FLSL_BASE_MASK + skip_count = 0 + + for ll_index in range(0, NUM_LL_PTE): + if skip_count: + skip_count -= 1 + continue + + (phy_addr, page_size, map_type, status, + skip_count) = get_mapping_info( + ramdump, ll_pte, + ll_index) + + if status and phy_addr != -1: + tmp_mapping = add_flat_mapping( + tmp_mapping, fl_index, sl_index, + tl_index, ll_index, phy_addr, + map_type, page_size, True) + else: + tmp_mapping = add_flat_mapping( + tmp_mapping, fl_index, sl_index, + tl_index, ll_index, -1, + -1, page_size, False) + + elif tl_entry_type == FLSL_TYPE_BLOCK: + if section_skip_count: + section_skip_count -= 1 + continue + + (phy_addr, page_size, + map_type, status, + section_skip_count) = get_section_mapping_info( + ramdump, tl_pte, tl_index) + + if status and phy_addr != -1: + tmp_mapping = add_flat_mapping( + tmp_mapping, fl_index, sl_index, + tl_index, 0, phy_addr, + map_type, page_size, True) + + tl_pte += 8 + return tmp_mapping + + +def create_flat_mappings(ramdump, pg_table, level): + tmp_mapping = {} + fl_pte = pg_table + skip_fl = 0 + fl_range = NUM_FL_PTE + read_virtual = False + + if level == 3: + skip_fl = 1 + fl_range = 1 + read_virtual = True + + # In case we have only 3 level page table we want to skip first level + # and just parse second, third and last level. To keep unify code for 3 + # level and 4 level parsing just run first level loop once and directly + # jump to start parsing from second level + + for fl_index in range(0, fl_range): + + (fl_pg_table_entry, sl_pte) = fl_entry(ramdump, fl_pte, skip_fl) + + if fl_pg_table_entry == 0: + tmp_mapping = add_flat_mapping( + tmp_mapping, fl_index, 0, 0, 0, + -1, -1, SZ_256G, False) + fl_pte += 8 + continue + + for sl_index in range(0, NUM_SL_PTE): + + sl_pg_table_entry = ramdump.read_u64(sl_pte, read_virtual) + + if sl_pg_table_entry == 0 or sl_pg_table_entry is None: + tmp_mapping = add_flat_mapping(tmp_mapping, + fl_index, sl_index, 0, 0, + -1, -1, SZ_1G, False) + sl_pte += 8 + continue + + sl_entry_type = sl_pg_table_entry & FLSL_PTE_TYPE_MASK + if sl_entry_type == FLSL_TYPE_TABLE: + tmp_mapping = parse_2nd_level_table(ramdump, sl_pg_table_entry, + fl_index, sl_index, + tmp_mapping) + elif sl_entry_type == FLSL_TYPE_BLOCK: + (phy_addr, page_size, map_type, status) \ + = get_super_section_mapping_info(ramdump, sl_pte, sl_index) + + if status and phy_addr != -1: + tmp_mapping = add_flat_mapping( + tmp_mapping, fl_index, sl_index, 0, 0, + phy_addr, map_type, page_size, True) + + sl_pte += 8 + fl_pte += 8 + return tmp_mapping + + +def parse_aarch64_tables(ramdump, d, domain_num): + fname = 'arm_iommu_domain_%02d.txt' % (domain_num) + with ramdump.open_file(fname) as outfile: + + redirect = 'OFF' + if d.redirect is None: + redirect = 'UNKNOWN' + elif d.redirect > 0: + redirect = 'ON' + iommu_context = ' '.join('%s (%s)' % (name, num) + for (name, num) in d.ctx_list) + iommu_context = iommu_context or 'None attached' + + outfile.write( + 'IOMMU Context: %s. Domain: %s' + '[L2 cache redirect for page tables is %s]\n' % ( + iommu_context, d.client_name, redirect)) + outfile.write( + '[VA Start -- VA End ] [Size ] [PA Start -- PA End ] ' + '[Attributes][Page Table Entry Size]\n') + if d.pg_table == 0: + outfile.write( + 'No Page Table Found. (Probably a secure domain)\n') + else: + flat_mapping = create_flat_mappings(ramdump, d.pg_table, d.level) + collapsed_mapping = create_collapsed_mapping(flat_mapping) + + for virt in sorted(collapsed_mapping.keys()): + mapping = collapsed_mapping[virt] + + if mapping.mapped: + outfile.write( + '0x%x--0x%x [0x%x] A:0x%x--0x%x [0x%x] %s[%s] \n' % + (mapping.virt_start, mapping.virt_end, + mapping.map_size, mapping.phys_start, + mapping.phys_end, mapping.map_size, mapping.map_type, + order_size_strings[get_order(mapping.map_size)])) + else: + outfile.write( + '0x%x--0x%x [0x%x] [UNMAPPED]\n' % + (mapping.virt_start, mapping.virt_end, + mapping.virt_end - mapping.virt_start)) diff --git a/linux-ramdump-parser-v2/iommulib.py b/linux-ramdump-parser-v2/iommulib.py index 5cf1981ccad72fb61b8c25bc7155f40c3b5e051b..fa6e56bea715e081bfbb3ef92e05615b796c22fc 100644 --- a/linux-ramdump-parser-v2/iommulib.py +++ b/linux-ramdump-parser-v2/iommulib.py @@ -1,4 +1,4 @@ -# Copyright (c) 2014, The Linux Foundation. All rights reserved. +# Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 and @@ -12,31 +12,115 @@ import rb_tree import linux_list as llist +ARM_SMMU_DOMAIN = 0 +MSM_SMMU_DOMAIN = 1 + + class Domain(object): - def __init__(self, domain_num, pg_table, redirect, ctx_list, client_name): + def __init__(self, pg_table, redirect, ctx_list, client_name, + domain_type=MSM_SMMU_DOMAIN, level=4, domain_num=-1): self.domain_num = domain_num self.pg_table = pg_table self.redirect = redirect self.ctx_list = ctx_list self.client_name = client_name + self.level = level + self.domain_type = domain_type def __repr__(self): return "#%d: %s" % (self.domain_num, self.client_name) + class IommuLib(object): def __init__(self, ramdump): self.ramdump = ramdump self.domain_list = [] root = self.ramdump.read_word('domain_root') - if root is None: + + list_head_attachments = self.ramdump.read_pointer( + 'iommu_debug_attachments') + + if list_head_attachments is not None: + list_head_arm_addr = self.ramdump.read_structure_field( + list_head_attachments, 'struct list_head', 'prev') + list_walker = llist.ListWalker( + self.ramdump, list_head_arm_addr, + self.ramdump.field_offset('struct iommu_debug_attachment', + 'list')) + list_walker.walk(list_head_attachments, + self._iommu_domain_find_default, + self.domain_list) + + if root is not None: + rb_walker = rb_tree.RbTreeWalker(self.ramdump) + rb_walker.walk(root, self._iommu_domain_func, self.domain_list) + + def _iommu_domain_find_default(self, node, domain_list): + domain_ptr = self.ramdump.read_structure_field( + node, 'struct iommu_debug_attachment', 'domain') + priv_ptr = self.ramdump.read_structure_field( + domain_ptr, 'struct iommu_domain', 'priv') + + if not (domain_ptr and priv_ptr): return - rb_walker = rb_tree.RbTreeWalker(self.ramdump) - rb_walker.walk(root, self._iommu_domain_func, self.domain_list) + + arm_smmu_ops = self.ramdump.address_of('arm_smmu_ops') + + dev_ptr = self.ramdump.read_structure_field( + node, 'struct iommu_debug_attachment', 'dev') + + kobj_ptr = dev_ptr + self.ramdump.field_offset('struct device', 'kobj') + + client_name = self.ramdump.read_structure_cstring( + kobj_ptr, 'struct kobject', 'name') + + iommu_domain_ops = self.ramdump.read_structure_field( + domain_ptr, 'struct iommu_domain', 'ops') + + if iommu_domain_ops == arm_smmu_ops: + pgtbl_ops_ptr = self.ramdump.read_structure_field( + priv_ptr, 'struct arm_smmu_domain', 'pgtbl_ops') + + io_pgtable_ptr = self.ramdump.container_of( + pgtbl_ops_ptr, 'struct io_pgtable', 'ops') + + arm_lpae_io_pgtable_ptr = self.ramdump.container_of( + io_pgtable_ptr, 'struct arm_lpae_io_pgtable', 'iop') + + pg_table = self.ramdump.read_structure_field( + arm_lpae_io_pgtable_ptr, 'struct arm_lpae_io_pgtable', 'pgd') + + level = self.ramdump.read_structure_field( + arm_lpae_io_pgtable_ptr, 'struct arm_lpae_io_pgtable', + 'levels') + + domain_create = Domain(pg_table, 0, [], client_name, + ARM_SMMU_DOMAIN, level) + domain_list.append(domain_create) + + else: + priv_pt_offset = self.ramdump.field_offset('struct msm_iommu_priv', + 'pt') + pgtable_offset = self.ramdump.field_offset('struct msm_iommu_pt', + 'fl_table') + redirect_offset = self.ramdump.field_offset('struct msm_iommu_pt', + 'redirect') + + if priv_pt_offset is not None: + pg_table = self.ramdump.read_u64( + priv_ptr + priv_pt_offset + pgtable_offset) + redirect = self.ramdump.read_u64( + priv_ptr + priv_pt_offset + redirect_offset) + + domain_create = Domain(pg_table, redirect, [], + client_name) + domain_list.append(domain_create) def _iommu_list_func(self, node, ctx_list): ctx_drvdata_name_ptr = self.ramdump.read_word( - node + self.ramdump.field_offset('struct msm_iommu_ctx_drvdata', 'name')) + node + self.ramdump.field_offset('struct msm_iommu_ctx_drvdata', + 'name')) ctxdrvdata_num_offset = self.ramdump.field_offset( 'struct msm_iommu_ctx_drvdata', 'num') num = self.ramdump.read_u32(node + ctxdrvdata_num_offset) @@ -70,13 +154,17 @@ class IommuLib(object): 'struct msm_iommu_priv', 'list_attached') if list_attached_offset is not None: - list_attached = self.ramdump.read_word(priv_ptr + list_attached_offset) + list_attached = self.ramdump.read_word(priv_ptr + + list_attached_offset) else: list_attached = None - priv_pt_offset = self.ramdump.field_offset('struct msm_iommu_priv', 'pt') - pgtable_offset = self.ramdump.field_offset('struct msm_iommu_pt', 'fl_table') - redirect_offset = self.ramdump.field_offset('struct msm_iommu_pt', 'redirect') + priv_pt_offset = self.ramdump.field_offset('struct msm_iommu_priv', + 'pt') + pgtable_offset = self.ramdump.field_offset('struct msm_iommu_pt', + 'fl_table') + redirect_offset = self.ramdump.field_offset('struct msm_iommu_pt', + 'redirect') if priv_pt_offset is not None: pg_table = self.ramdump.read_word( @@ -87,12 +175,14 @@ class IommuLib(object): # On some builds we are unable to look up the offsets so hardcode # the offsets. pg_table = self.ramdump.read_word(priv_ptr + 0) - redirect = self.ramdump.read_u32(priv_ptr + self.ramdump.sizeof('void *')) - - # Note: On some code bases we don't have this pg_table and redirect in the priv structure (see msm_iommu_sec.c). It only - # contains list_attached. If this is the case we can detect that by checking whether - # pg_table == redirect (prev == next pointers of the attached - # list). + redirect = self.ramdump.read_u32(priv_ptr + + self.ramdump.sizeof('void *')) + + # Note: On some code bases we don't have this pg_table and redirect + # in the priv structure (see msm_iommu_sec.c). It only contains + # list_attached. If this is the case we can detect that by checking + # whether pg_table == redirect (prev == next pointers of the + # attached list). if pg_table == redirect: # This is a secure domain. We don't have access to the page # tables. @@ -103,8 +193,10 @@ class IommuLib(object): if list_attached is not None and list_attached != 0: list_walker = llist.ListWalker( self.ramdump, list_attached, - self.ramdump.field_offset('struct msm_iommu_ctx_drvdata', 'attached_elm')) + self.ramdump.field_offset('struct msm_iommu_ctx_drvdata', + 'attached_elm')) list_walker.walk(list_attached, self._iommu_list_func, ctx_list) domain_list.append( - Domain(domain_num, pg_table, redirect, ctx_list, client_name)) + Domain(pg_table, redirect, ctx_list, client_name, + domain_num=domain_num)) diff --git a/linux-ramdump-parser-v2/lpaeiommulib.py b/linux-ramdump-parser-v2/lpaeiommulib.py index 64fe5eed0f7c2b64109083697528c8502b742df1..b99aa0db6c5ce02636e3238a2d7361c6982470a3 100644 --- a/linux-ramdump-parser-v2/lpaeiommulib.py +++ b/linux-ramdump-parser-v2/lpaeiommulib.py @@ -157,8 +157,9 @@ def get_coalesced_mappings(flat_mappings): cc = OrderedDict(sorted(cc.items())) return cc -def parse_long_form_tables(dump, d): - fname = 'msm_iommu_domain_%02d.txt' % (d.domain_num) + +def parse_long_form_tables(dump, d, domain_num): + fname = 'msm_iommu_domain_%02d.txt' % (domain_num) with dump.open_file(fname) as outfile: print_out_str('LPAE Iommu page tables: ' + fname) diff --git a/linux-ramdump-parser-v2/parsers/iommu.py b/linux-ramdump-parser-v2/parsers/iommu.py index f4d01725869ab116a53b0524cd58c61b723be5c8..8a36681f1903c69b32674c4960305db0f2347036 100644 --- a/linux-ramdump-parser-v2/parsers/iommu.py +++ b/linux-ramdump-parser-v2/parsers/iommu.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. +# Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 and @@ -14,8 +14,10 @@ import math from print_out import print_out_str from parser_util import register_parser, RamParser from sizes import SZ_4K, SZ_64K, SZ_1M, SZ_16M, get_order, order_size_strings -from iommulib import IommuLib +from iommulib import IommuLib, MSM_SMMU_DOMAIN from lpaeiommulib import parse_long_form_tables +from aarch64iommulib import parse_aarch64_tables + @register_parser('--print-iommu-pg-tables', 'Print IOMMU page tables') class IOMMU(RamParser): @@ -303,9 +305,9 @@ class IOMMU(RamParser): self.out_file.write('0x%08x--0x%08x [0x%08x] [UNMAPPED]\n' % (mapping.virt_start, mapping.virt_end, mapping.virt_size())) - def parse_short_form_tables(self, d): + def parse_short_form_tables(self, d, domain_num): self.out_file = self.ramdump.open_file( - 'msm_iommu_domain_%02d.txt' % (d.domain_num)) + 'msm_iommu_domain_%02d.txt' % (domain_num)) redirect = 'OFF' if d.redirect is None: redirect = 'UNKNOWN' @@ -318,8 +320,9 @@ class IOMMU(RamParser): iommu_context += '%s (%d) ' % (name, num) iommu_context = iommu_context.strip() - self.out_file.write('IOMMU Context: %s. Domain: %s (%d) [L2 cache redirect for page tables is %s]\n' % ( - iommu_context, d.client_name, d.domain_num, redirect)) + self.out_file.write('IOMMU Context: %s. Domain: %s' + '[L2 cache redirect for page tables is %s]\n' % ( + iommu_context, d.client_name, redirect)) self.out_file.write( '[VA Start -- VA End ] [Size ] [PA Start -- PA End ] [Size ] [Read/Write][Page Table Entry Size]\n') if d.pg_table == 0: @@ -339,8 +342,14 @@ class IOMMU(RamParser): '[!] WARNING: IOMMU domains was not found in this build. No IOMMU page tables will be generated') return - for d in self.domain_list: + for (domain_num, d) in enumerate(self.domain_list): if self.ramdump.is_config_defined('CONFIG_IOMMU_LPAE'): - parse_long_form_tables(self.ramdump, d) + parse_long_form_tables(self.ramdump, d, domain_num) + elif (self.ramdump.is_config_defined('CONFIG_IOMMU_AARCH64') or + self.ramdump.is_config_defined('CONFIG_ARM_SMMU')): + if (d.domain_type == MSM_SMMU_DOMAIN): + self.parse_short_form_tables(d, domain_num) + else: + parse_aarch64_tables(self.ramdump, d, domain_num) else: - self.parse_short_form_tables(d) + self.parse_short_form_tables(d, domain_num) diff --git a/linux-ramdump-parser-v2/sizes.py b/linux-ramdump-parser-v2/sizes.py index 0f1a937f7ce6124ab8d26504692949943fd1309a..eab78df7fd2092d81061800734854c7a2150582a 100644 --- a/linux-ramdump-parser-v2/sizes.py +++ b/linux-ramdump-parser-v2/sizes.py @@ -67,6 +67,7 @@ SZ_512M = 0x20000000 SZ_1G = 0x40000000 SZ_2G = 0x80000000 +SZ_256G = 0x8000000000 size_strings = OrderedDict(( (SZ_1, "1"), @@ -101,6 +102,7 @@ size_strings = OrderedDict(( (SZ_512M, "512M"), (SZ_1G, "1G"), (SZ_2G, "2G"), + (SZ_256G, "256G"), )) def is_power_of_two(n):