diff --git a/docs/conf.py b/docs/conf.py index 0f922ea284266063ca4ca14266d2b41e84085fe4..b0a00090a5112ac0bf84f7f0cab6dad9e60beee0 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015, The Linux Foundation. All rights reserved. +# Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 and @@ -259,3 +259,5 @@ texinfo_documents = [ # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False + +autoclass_content = 'both' diff --git a/linux-ramdump-parser-v2/README b/linux-ramdump-parser-v2/README index 2b9f5da8ca3be5cc0383ad2a97d717252f42469e..84993be6db794deadcb6770e0fd9595329fa77a7 100644 --- a/linux-ramdump-parser-v2/README +++ b/linux-ramdump-parser-v2/README @@ -91,6 +91,7 @@ nm_path - absolute path to the gdb tool for the ramdumps gdb64_path - absolute path to the 64-bit gdb tool for the ramdumps nm64_path - absolute path to the 64-bit nm tool for the ramdumps qtf_path - absolute path to qtf tool executable +scandump_parser_path - absolute path to scandump parser for the ramdumps Note that local_settings.py is just a python file so the file may take advantage of python features. diff --git a/linux-ramdump-parser-v2/aarch64iommulib.py b/linux-ramdump-parser-v2/aarch64iommulib.py new file mode 100755 index 0000000000000000000000000000000000000000..de13e0bec26b29337b38a408ae584d7d9e091228 --- /dev/null +++ b/linux-ramdump-parser-v2/aarch64iommulib.py @@ -0,0 +1,385 @@ +# Copyright (c) 2016, The Linux Foundation. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 and +# only version 2 as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +from sizes import SZ_4K, SZ_64K, SZ_2M, SZ_32M, SZ_1G, SZ_256G +from sizes import get_order, order_size_strings + +NUM_PT_LEVEL = 4 +NUM_FL_PTE = 512 +NUM_SL_PTE = 512 +NUM_TL_PTE = 512 +NUM_LL_PTE = 512 + +FLSL_BASE_MASK = 0xFFFFFFFFF000 +FLSL_TYPE_BLOCK = (1 << 0) +FLSL_TYPE_TABLE = (3 << 0) +FLSL_PTE_TYPE_MASK = (3 << 0) + +LL_TYPE_PAGE = (3 << 0) +LL_PAGE_MASK = 0xFFFFFFFFF000 + +LL_AP_BITS = (0x3 << 6) +LL_CH = (0x1 << 52) + +LL_AP_RO = (0x3 << 6) +LL_AP_RW = (0x1 << 6) +LL_AP_PR_RW = (0x0 << 6) +LL_AP_PR_RO = (0x2 << 6) + + +class FlatMapping(object): + + def __init__(self, virt, phys=-1, type='[]', size=SZ_4K, mapped=False): + self.virt = virt + self.phys = phys + self.type = type + self.map_size = size + self.mapped = mapped + + +class CollapsedMapping(object): + def __init__(self, virt_start, virt_end, phys_start=-1, phys_end=-1, + map_type='[]', map_size=SZ_4K, mapped=False): + + self.virt_start = virt_start + self.virt_end = virt_end - 1 + self.phys_start = phys_start + self.phys_end = phys_end - 1 + self.map_type = map_type + self.map_size = map_size + self.mapped = mapped + + +def add_collapsed_mapping(mappings, virt_start, virt_end, phys_start, phys_end, + map_type, map_size, mapped): + map = CollapsedMapping(virt_start, virt_end, phys_start, phys_end, + map_type, map_size, mapped) + + if virt_start not in mappings: + mappings[virt_start] = map + else: + map.type = 'Duplicate' + mappings[virt_start] = map + + return mappings + + +def create_collapsed_mapping(flat_mapping): + collapsed_mapping = {} + + if len(flat_mapping.keys()) > 0: + virt_addrs = sorted(flat_mapping.keys()) + start_map = prev_map = flat_mapping[virt_addrs[0]] + last_mapping = False + new_mapping = False + + for virt in virt_addrs[1:]: + map = flat_mapping[virt] + + if map.map_size == prev_map.map_size \ + and map.type == prev_map.type \ + and map.mapped == prev_map.mapped \ + and not map.mapped: + new_mapping = False + + if virt == virt_addrs[-1]: + last_mapping = True + + else: + new_mapping = True + + if new_mapping: + collapsed_mapping = add_collapsed_mapping( + collapsed_mapping, start_map.virt, + map.virt, start_map.phys, + start_map.phys + start_map.map_size, + start_map.type, start_map.map_size, start_map.mapped) + start_map = map + + elif last_mapping: + collapsed_mapping = add_collapsed_mapping( + collapsed_mapping, start_map.virt, + 0xFFFFFFFFFFFF + 1, start_map.phys, + start_map.phys + start_map.map_size, + start_map.type, start_map.map_size, start_map.mapped) + + prev_map = map + return collapsed_mapping + + +def add_flat_mapping(mappings, fl_idx, sl_idx, tl_idx, ll_idx, + phy_addr, map_type, page_size, mapped): + virt = (fl_idx << 39) | (sl_idx << 30) | (tl_idx << 21) | (ll_idx << 12) + map_type_str = '[R/W]' + + if map_type == LL_AP_RO: + map_type_str = '[RO]' + elif map_type == LL_AP_PR_RW: + map_type_str = '[P R/W]' + elif map_type == LL_AP_PR_RO: + map_type_str = '[P RO]' + + map = FlatMapping(virt, phy_addr, map_type_str, page_size, mapped) + + if virt not in mappings: + mappings[virt] = map + else: + map.type = 'Duplicate' + mappings[virt] = map + + return mappings + + +def get_super_section_mapping_info(ramdump, pg_table, index): + phy_addr = ramdump.read_u64(pg_table, False) + current_phy_addr = -1 + current_page_size = SZ_1G + current_map_type = 0 + status = True + + if phy_addr is not None: + current_map_type = phy_addr & LL_AP_BITS + current_phy_addr = phy_addr & 0xFFFFC0000000 + else: + status = False + + return (current_phy_addr, current_page_size, current_map_type, status) + + +def get_section_mapping_info(ramdump, pg_table, index): + phy_addr = ramdump.read_u64(pg_table, False) + current_phy_addr = -1 + current_page_size = SZ_2M + current_map_type = 0 + status = True + section_skip_count = 0 + + if phy_addr is not None: + current_map_type = phy_addr & LL_AP_BITS + + if phy_addr & LL_CH: + current_phy_addr = phy_addr & 0xFFFFFE000000 + current_page_size = SZ_32M + section_skip_count = 15 + # Current + next 15 entries are contiguous + else: + current_phy_addr = phy_addr & 0xFFFFFFE00000 + current_page_size = SZ_2M + + return (current_phy_addr, current_page_size, current_map_type, + status, section_skip_count) + + +def get_mapping_info(ramdump, pg_table, index): + ll_pte = pg_table + (index * 8) + phy_addr = ramdump.read_u64(ll_pte, False) + current_phy_addr = -1 + current_page_size = SZ_4K + current_map_type = 0 + status = True + skip_count = 0 + + if phy_addr is not None: + current_map_type = phy_addr & LL_AP_BITS + + if phy_addr & LL_TYPE_PAGE: + current_phy_addr = phy_addr & 0xFFFFFFFFF000 + if phy_addr & LL_CH: + current_phy_addr = phy_addr & 0xFFFFFFFF0000 + current_page_size = SZ_64K + skip_count = 15 + # Current + next 15 entries are contiguous + + elif phy_addr != 0: + # Error condition if at last level it is not LL_TYPE_PAGE + current_phy_addr = phy_addr + status = False + return (current_phy_addr, current_page_size, current_map_type, + status, skip_count) + + +def fl_entry(ramdump, fl_pte, skip_fl): + fl_pg_table_entry = ramdump.read_u64(fl_pte) + sl_pte = fl_pg_table_entry & FLSL_BASE_MASK + + if skip_fl == 1: + fl_pg_table_entry = FLSL_TYPE_TABLE + sl_pte = fl_pte + + # Make 1st level entry look like dummy entry of type table in + # case of only 3 level page tables and make sl_pte = fl_pte + # as we start parsing from second level. + + return (fl_pg_table_entry, sl_pte) + + +def parse_2nd_level_table(ramdump, sl_pg_table_entry, fl_index, + sl_index, tmp_mapping): + tl_pte = sl_pg_table_entry & FLSL_BASE_MASK + section_skip_count = 0 + + for tl_index in range(0, NUM_TL_PTE): + tl_pg_table_entry = ramdump.read_u64(tl_pte, False) + + if tl_pg_table_entry == 0 or tl_pg_table_entry is None: + tmp_mapping = add_flat_mapping( + tmp_mapping, fl_index, sl_index, + tl_index, 0, -1, + -1, SZ_2M, False) + tl_pte += 8 + continue + + tl_entry_type = tl_pg_table_entry & FLSL_PTE_TYPE_MASK + if tl_entry_type == FLSL_TYPE_TABLE: + ll_pte = tl_pg_table_entry & FLSL_BASE_MASK + skip_count = 0 + + for ll_index in range(0, NUM_LL_PTE): + if skip_count: + skip_count -= 1 + continue + + (phy_addr, page_size, map_type, status, + skip_count) = get_mapping_info( + ramdump, ll_pte, + ll_index) + + if status and phy_addr != -1: + tmp_mapping = add_flat_mapping( + tmp_mapping, fl_index, sl_index, + tl_index, ll_index, phy_addr, + map_type, page_size, True) + else: + tmp_mapping = add_flat_mapping( + tmp_mapping, fl_index, sl_index, + tl_index, ll_index, -1, + -1, page_size, False) + + elif tl_entry_type == FLSL_TYPE_BLOCK: + if section_skip_count: + section_skip_count -= 1 + continue + + (phy_addr, page_size, + map_type, status, + section_skip_count) = get_section_mapping_info( + ramdump, tl_pte, tl_index) + + if status and phy_addr != -1: + tmp_mapping = add_flat_mapping( + tmp_mapping, fl_index, sl_index, + tl_index, 0, phy_addr, + map_type, page_size, True) + + tl_pte += 8 + return tmp_mapping + + +def create_flat_mappings(ramdump, pg_table, level): + tmp_mapping = {} + fl_pte = pg_table + skip_fl = 0 + fl_range = NUM_FL_PTE + read_virtual = False + + if level == 3: + skip_fl = 1 + fl_range = 1 + read_virtual = True + + # In case we have only 3 level page table we want to skip first level + # and just parse second, third and last level. To keep unify code for 3 + # level and 4 level parsing just run first level loop once and directly + # jump to start parsing from second level + + for fl_index in range(0, fl_range): + + (fl_pg_table_entry, sl_pte) = fl_entry(ramdump, fl_pte, skip_fl) + + if fl_pg_table_entry == 0: + tmp_mapping = add_flat_mapping( + tmp_mapping, fl_index, 0, 0, 0, + -1, -1, SZ_256G, False) + fl_pte += 8 + continue + + for sl_index in range(0, NUM_SL_PTE): + + sl_pg_table_entry = ramdump.read_u64(sl_pte, read_virtual) + + if sl_pg_table_entry == 0 or sl_pg_table_entry is None: + tmp_mapping = add_flat_mapping(tmp_mapping, + fl_index, sl_index, 0, 0, + -1, -1, SZ_1G, False) + sl_pte += 8 + continue + + sl_entry_type = sl_pg_table_entry & FLSL_PTE_TYPE_MASK + if sl_entry_type == FLSL_TYPE_TABLE: + tmp_mapping = parse_2nd_level_table(ramdump, sl_pg_table_entry, + fl_index, sl_index, + tmp_mapping) + elif sl_entry_type == FLSL_TYPE_BLOCK: + (phy_addr, page_size, map_type, status) \ + = get_super_section_mapping_info(ramdump, sl_pte, sl_index) + + if status and phy_addr != -1: + tmp_mapping = add_flat_mapping( + tmp_mapping, fl_index, sl_index, 0, 0, + phy_addr, map_type, page_size, True) + + sl_pte += 8 + fl_pte += 8 + return tmp_mapping + + +def parse_aarch64_tables(ramdump, d, domain_num): + fname = 'arm_iommu_domain_%02d.txt' % (domain_num) + with ramdump.open_file(fname) as outfile: + + redirect = 'OFF' + if d.redirect is None: + redirect = 'UNKNOWN' + elif d.redirect > 0: + redirect = 'ON' + iommu_context = ' '.join('%s (%s)' % (name, num) + for (name, num) in d.ctx_list) + iommu_context = iommu_context or 'None attached' + + outfile.write( + 'IOMMU Context: %s. Domain: %s' + '[L2 cache redirect for page tables is %s]\n' % ( + iommu_context, d.client_name, redirect)) + outfile.write( + '[VA Start -- VA End ] [Size ] [PA Start -- PA End ] ' + '[Attributes][Page Table Entry Size]\n') + if d.pg_table == 0: + outfile.write( + 'No Page Table Found. (Probably a secure domain)\n') + else: + flat_mapping = create_flat_mappings(ramdump, d.pg_table, d.level) + collapsed_mapping = create_collapsed_mapping(flat_mapping) + + for virt in sorted(collapsed_mapping.keys()): + mapping = collapsed_mapping[virt] + + if mapping.mapped: + outfile.write( + '0x%x--0x%x [0x%x] A:0x%x--0x%x [0x%x] %s[%s] \n' % + (mapping.virt_start, mapping.virt_end, + mapping.map_size, mapping.phys_start, + mapping.phys_end, mapping.map_size, mapping.map_type, + order_size_strings[get_order(mapping.map_size)])) + else: + outfile.write( + '0x%x--0x%x [0x%x] [UNMAPPED]\n' % + (mapping.virt_start, mapping.virt_end, + mapping.virt_end - mapping.virt_start)) diff --git a/linux-ramdump-parser-v2/boards.py b/linux-ramdump-parser-v2/boards.py index 8c0fcad13e80828aeb08d8b167533ef57184a985..4f7d4109b72072478dc04729a2982f3812822f8c 100755 --- a/linux-ramdump-parser-v2/boards.py +++ b/linux-ramdump-parser-v2/boards.py @@ -1,4 +1,4 @@ -# Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. +# Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 and @@ -14,6 +14,7 @@ class Board(object): socid = shared id unique to a board type board_num = human readable board number indicating the board type (e.g. 8960, 8974) + cpu = T32 cpu model ram_start = start of the DDR imem_start = start of location in which the watchdog address is stored smem_addr = start of the shared memory region @@ -27,15 +28,15 @@ class Board(object): """ def __init__(self): - self.socid = -1 - self.board_num = -1 - self.cpu = 'UNKNOWN' - self.ram_start = 0 - self.imem_start = 0 - self.smem_addr = 0 - self.phys_offset = 0 - self.wdog_addr = 0 - self.imem_file_name = None + self.socid = -1 + self.board_num = "-1" + self.cpu = 'UNKNOWN' + self.ram_start = 0 + self.imem_start = 0 + self.smem_addr = 0 + self.phys_offset = 0 + self.wdog_addr = 0 + self.imem_file_name = None class Board8960(Board): def __init__(self, socid, board_num, phys_offset=0x80200000, ram_start=0x80000000): @@ -65,7 +66,7 @@ class Board9615(Board): def __init__(self, socid): super(Board9615, self).__init__() self.socid = socid - self.board_num = 9615 + self.board_num = "9615" self.cpu = 'CORTEXA5' self.ram_start = 0x40000000 self.imem_start = 0 @@ -73,7 +74,7 @@ class Board9615(Board): self.phys_offset = 0x40800000 class Board8974(Board): - def __init__(self, socid, board_num=8974): + def __init__(self, socid, board_num="8974"): super(Board8974, self).__init__() self.socid = socid self.board_num = board_num @@ -89,7 +90,7 @@ class Board9625(Board): def __init__(self, socid): super(Board9625, self).__init__() self.socid = socid - self.board_num = 9625 + self.board_num = "9625" self.cpu = 'CORTEXA5' self.ram_start = 0x0 self.imem_start = 0xfe800000 @@ -99,7 +100,7 @@ class Board9625(Board): self.imem_file_name = 'OCIMEM.BIN' class Board8626(Board): - def __init__(self, socid, board_num=8626): + def __init__(self, socid, board_num="8626"): super(Board8626, self).__init__() self.socid = socid self.board_num = board_num @@ -112,7 +113,7 @@ class Board8626(Board): self.imem_file_name = 'OCIMEM.BIN' class Board8026LW(Board): - def __init__(self, socid, board_num=8026): + def __init__(self, socid, board_num="8026"): super(Board8026LW, self).__init__() self.socid = socid self.board_num = board_num @@ -125,7 +126,7 @@ class Board8026LW(Board): self.imem_file_name = 'OCIMEM.BIN' class Board8610(Board): - def __init__(self, socid, board_num=8610): + def __init__(self, socid, board_num="8610"): super(Board8610, self).__init__() self.socid = socid self.board_num = board_num @@ -141,7 +142,7 @@ class Board9635(Board): def __init__(self, socid): super(Board9635, self).__init__() self.socid = socid - self.board_num = 9635 + self.board_num = "9635" self.cpu = 'CORTEXA7' self.ram_start = 0x0 self.imem_start = 0xfe800000 @@ -154,7 +155,7 @@ class Board8916(Board): def __init__(self, socid, smem_addr): super(Board8916, self).__init__() self.socid = socid - self.board_num = 8916 + self.board_num = "8916" self.cpu = 'CORTEXA7' self.ram_start = 0x80000000 #self.ram_start = 0x0 @@ -168,7 +169,7 @@ class Board8939(Board): def __init__(self, socid, smem_addr): super(Board8939, self).__init__() self.socid = socid - self.board_num = 8939 + self.board_num = "8939" self.cpu = 'CORTEXA7' self.ram_start = 0x80000000 self.smem_addr = smem_addr @@ -181,7 +182,7 @@ class Board8936(Board): def __init__(self, socid): super(Board8936, self).__init__() self.socid = socid - self.board_num = 8936 + self.board_num = "8936" self.cpu = 'CORTEXA7' self.ram_start = 0x80000000 self.smem_addr = 0x6300000 @@ -194,7 +195,7 @@ class Board8994(Board): def __init__(self, socid): super(Board8994, self).__init__() self.socid = socid - self.board_num = 8994 + self.board_num = "8994" self.cpu = 'CORTEXA57A53' self.ram_start = 0x0 self.smem_addr = 0x6a00000 @@ -207,7 +208,7 @@ class Board8909(Board): def __init__(self, socid): super(Board8909, self).__init__() self.socid = socid - self.board_num = 8909 + self.board_num = "8909" self.cpu = 'CORTEXA7' self.ram_start = 0x80000000 self.smem_addr = 0x7d00000 @@ -220,7 +221,7 @@ class Board8908(Board): def __init__(self, socid): super(Board8908, self).__init__() self.socid = socid - self.board_num = 8908 + self.board_num = "8908" self.cpu = 'CORTEXA7' self.ram_start = 0x80000000 self.smem_addr = 0x7d00000 @@ -233,7 +234,7 @@ class Board9640(Board): def __init__(self, socid): super(Board9640, self).__init__() self.socid = socid - self.board_num = 9640 + self.board_num = "9640" self.cpu = 'CORTEXA7' self.ram_start = 0x80000000 self.smem_addr = 0x7e80000 @@ -246,7 +247,7 @@ class Board8992(Board): def __init__(self, socid): super(Board8992, self).__init__() self.socid = socid - self.board_num = 8992 + self.board_num = "8992" self.cpu = 'CORTEXA57A53' self.ram_start = 0x0 self.smem_addr = 0x6a00000 @@ -259,7 +260,7 @@ class Board8929(Board): def __init__(self, socid, smem_addr): super(Board8929, self).__init__() self.socid = socid - self.board_num = 8929 + self.board_num = "8929" self.cpu = 'CORTEXA7' self.ram_start = 0x80000000 self.smem_addr = smem_addr @@ -271,7 +272,7 @@ class Board8996(Board): def __init__(self, socid): super(Board8996, self).__init__() self.socid = socid - self.board_num = 8996 + self.board_num = "8996" self.cpu = 'HYDRA' self.ram_start = 0x80000000 self.smem_addr = 0x6000000 @@ -284,7 +285,7 @@ class Board8952(Board): def __init__(self, socid): super(Board8952, self).__init__() self.socid = socid - self.board_num = 8952 + self.board_num = "8952" self.cpu = 'CORTEXA53' self.ram_start = 0x80000000 self.smem_addr = 0x6300000 @@ -297,7 +298,7 @@ class Board8976(Board): def __init__(self, socid): super(Board8976, self).__init__() self.socid = socid - self.board_num = 8976 + self.board_num = "8976" self.cpu = 'CORTEXA53' self.ram_start = 0x80000000 self.smem_addr = 0x6300000 @@ -310,7 +311,7 @@ class Board9607(Board): def __init__(self, socid): super(Board9607, self).__init__() self.socid = socid - self.board_num = 9607 + self.board_num = "9607" self.cpu = 'CORTEXA7' self.ram_start = 0x80000000 self.smem_addr = 0x7d00000 @@ -324,7 +325,7 @@ class Board8937(Board): def __init__(self, socid): super(Board8937, self).__init__() self.socid = socid - self.board_num = 8937 + self.board_num = "8937" self.cpu = 'CORTEXA53' self.ram_start = 0x80000000 self.smem_addr = 0x6300000 @@ -333,12 +334,38 @@ class Board8937(Board): self.wdog_addr = 0x8600658 self.imem_file_name = 'OCIMEM.BIN' +class Board8940(Board): + def __init__(self, socid): + super(Board8940, self).__init__() + self.socid = socid + self.board_num = "8940" + self.cpu = 'CORTEXA53' + self.ram_start = 0x80000000 + self.smem_addr = 0x6300000 + self.phys_offset = 0x40000000 + self.imem_start = 0x8600000 + self.wdog_addr = 0x8600658 + self.imem_file_name = 'OCIMEM.BIN' class Board8953(Board): def __init__(self, socid): super(Board8953, self).__init__() self.socid = socid - self.board_num = 8953 + self.board_num = "8953" + self.cpu = 'CORTEXA53' + self.ram_start = 0x80000000 + self.smem_addr = 0x6300000 + self.phys_offset = 0x40000000 + self.imem_start = 0x8600000 + self.wdog_addr = 0x8600658 + self.imem_file_name = 'OCIMEM.BIN' + + +class Board8917(Board): + def __init__(self, socid): + super(Board8917, self).__init__() + self.socid = socid + self.board_num = "8917" self.cpu = 'CORTEXA53' self.ram_start = 0x80000000 self.smem_addr = 0x6300000 @@ -347,6 +374,47 @@ class Board8953(Board): self.wdog_addr = 0x8600658 self.imem_file_name = 'OCIMEM.BIN' +class Board8920(Board): + def __init__(self, socid): + super(Board8920, self).__init__() + self.socid = socid + self.board_num = "8920" + self.cpu = 'CORTEXA53' + self.ram_start = 0x80000000 + self.smem_addr = 0x6300000 + self.phys_offset = 0x40000000 + self.imem_start = 0x8600000 + self.wdog_addr = 0x8600658 + self.imem_file_name = 'OCIMEM.BIN' + +class BoardCalifornium(Board): + def __init__(self, socid): + super(BoardCalifornium, self).__init__() + self.socid = socid + self.board_num = "californium" + self.cpu = 'CORTEXA7' + self.ram_start = 0x80000000 + self.smem_addr = 0x7e80000 + self.phys_offset = 0x80000000 + self.imem_start = 0x08600000 + self.wdog_addr = 0x8600658 + self.imem_file_name = 'OCIMEM.BIN' + + +class BoardCobalt(Board): + def __init__(self, socid): + super(BoardCobalt, self).__init__() + self.socid = socid + self.board_num = "cobalt" + self.cpu = 'CORTEXA53' + self.ram_start = 0x80000000 + self.smem_addr = 0x6000000 + self.phys_offset = 0x80000000 + self.imem_start = 0x14680000 + self.wdog_addr = 0x146BF658 + self.imem_file_name = 'OCIMEM.BIN' + + boards = [] boards.append(Board9640(socid=234)) @@ -441,7 +509,7 @@ boards.append(Board8610(socid=164)) boards.append(Board8610(socid=165)) boards.append(Board8610(socid=166)) -boards.append(Board8974(socid=178, board_num=8084)) +boards.append(Board8974(socid=178, board_num="8084")) boards.append(Board9635(socid=187)) boards.append(Board9635(socid=227)) @@ -450,61 +518,62 @@ boards.append(Board9635(socid=229)) boards.append(Board9635(socid=230)) boards.append(Board9635(socid=231)) -boards.append(Board8960(socid=87, board_num=8960)) -boards.append(Board8960(socid=122, board_num=8960)) -boards.append(Board8960(socid=123, board_num=8260)) -boards.append(Board8960(socid=124, board_num=8060)) - -boards.append(Board8960(socid=244, board_num=8064, phys_offset=0x40200000, ram_start=0x40000000)) -boards.append(Board8960(socid=109, board_num=8064)) -boards.append(Board8960(socid=130, board_num=8064)) -boards.append(Board8960(socid=153, board_num=8064)) - -boards.append(Board8960(socid=116, board_num=8930)) -boards.append(Board8960(socid=117, board_num=8930)) -boards.append(Board8960(socid=118, board_num=8930)) -boards.append(Board8960(socid=119, board_num=8930)) -boards.append(Board8960(socid=154, board_num=8930)) -boards.append(Board8960(socid=155, board_num=8930)) -boards.append(Board8960(socid=156, board_num=8930)) -boards.append(Board8960(socid=157, board_num=8930)) -boards.append(Board8960(socid=160, board_num=8930)) - -boards.append(Board8960(socid=120, board_num=8627)) -boards.append(Board8960(socid=121, board_num=8627)) -boards.append(Board8960(socid=138, board_num=8960)) -boards.append(Board8960(socid=139, board_num=8960)) -boards.append(Board8960(socid=140, board_num=8960)) -boards.append(Board8960(socid=141, board_num=8960)) -boards.append(Board8960(socid=142, board_num=8930)) -boards.append(Board8960(socid=143, board_num=8630)) -boards.append(Board8960(socid=144, board_num=8630)) +boards.append(Board8960(socid=87, board_num="8960")) +boards.append(Board8960(socid=122, board_num="8960")) +boards.append(Board8960(socid=123, board_num="8260")) +boards.append(Board8960(socid=124, board_num="8060")) + +boards.append(Board8960(socid=244, board_num="8064", phys_offset=0x40200000, + ram_start=0x40000000)) +boards.append(Board8960(socid=109, board_num="8064")) +boards.append(Board8960(socid=130, board_num="8064")) +boards.append(Board8960(socid=153, board_num="8064")) + +boards.append(Board8960(socid=116, board_num="8930")) +boards.append(Board8960(socid=117, board_num="8930")) +boards.append(Board8960(socid=118, board_num="8930")) +boards.append(Board8960(socid=119, board_num="8930")) +boards.append(Board8960(socid=154, board_num="8930")) +boards.append(Board8960(socid=155, board_num="8930")) +boards.append(Board8960(socid=156, board_num="8930")) +boards.append(Board8960(socid=157, board_num="8930")) +boards.append(Board8960(socid=160, board_num="8930")) + +boards.append(Board8960(socid=120, board_num="8627")) +boards.append(Board8960(socid=121, board_num="8627")) +boards.append(Board8960(socid=138, board_num="8960")) +boards.append(Board8960(socid=139, board_num="8960")) +boards.append(Board8960(socid=140, board_num="8960")) +boards.append(Board8960(socid=141, board_num="8960")) +boards.append(Board8960(socid=142, board_num="8930")) +boards.append(Board8960(socid=143, board_num="8630")) +boards.append(Board8960(socid=144, board_num="8630")) boards.append(Board9615(socid=104)) boards.append(Board9615(socid=105)) boards.append(Board9615(socid=106)) boards.append(Board9615(socid=107)) -boards.append(Board8625(socid=88, board_num=8625)) -boards.append(Board8625(socid=89, board_num=8625)) -boards.append(Board8625(socid=96, board_num=8625)) -boards.append(Board8625(socid=90, board_num=8625)) -boards.append(Board8625(socid=91, board_num=8625)) -boards.append(Board8625(socid=92, board_num=8625)) -boards.append(Board8625(socid=97, board_num=8625)) -boards.append(Board8625(socid=98, board_num=8625)) -boards.append(Board8625(socid=99, board_num=8625)) -boards.append(Board8625(socid=100, board_num=8625)) -boards.append(Board8625(socid=101, board_num=8625)) -boards.append(Board8625(socid=102, board_num=8625)) -boards.append(Board8625(socid=103, board_num=8625)) -boards.append(Board8625(socid=127, board_num=8625)) -boards.append(Board8625(socid=128, board_num=8625)) -boards.append(Board8625(socid=129, board_num=8625)) -boards.append(Board8625(socid=131, board_num=8625)) -boards.append(Board8625(socid=132, board_num=8625)) -boards.append(Board8625(socid=133, board_num=8625)) -boards.append(Board8625(socid=135, board_num=8625)) +boards.append(Board8625(socid=88, board_num="8625")) +boards.append(Board8625(socid=89, board_num="8625")) +boards.append(Board8625(socid=96, board_num="8625")) +boards.append(Board8625(socid=90, board_num="8625")) +boards.append(Board8625(socid=91, board_num="8625")) +boards.append(Board8625(socid=92, board_num="8625")) +boards.append(Board8625(socid=97, board_num="8625")) +boards.append(Board8625(socid=98, board_num="8625")) +boards.append(Board8625(socid=99, board_num="8625")) +boards.append(Board8625(socid=100, board_num="8625")) +boards.append(Board8625(socid=101, board_num="8625")) +boards.append(Board8625(socid=102, board_num="8625")) +boards.append(Board8625(socid=103, board_num="8625")) +boards.append(Board8625(socid=127, board_num="8625")) +boards.append(Board8625(socid=128, board_num="8625")) +boards.append(Board8625(socid=129, board_num="8625")) +boards.append(Board8625(socid=131, board_num="8625")) +boards.append(Board8625(socid=132, board_num="8625")) +boards.append(Board8625(socid=133, board_num="8625")) +boards.append(Board8625(socid=135, board_num="8625")) boards.append(Board8994(socid=207)) @@ -530,7 +599,22 @@ boards.append(Board9607(socid=299)) boards.append(Board8937(socid=294)) boards.append(Board8937(socid=295)) +boards.append(Board8940(socid=313)) + boards.append(Board8953(socid=293)) +boards.append(Board8953(socid=304)) + +boards.append(Board8917(socid=303)) +boards.append(Board8917(socid=307)) +boards.append(Board8917(socid=308)) +boards.append(Board8917(socid=309)) + +boards.append(Board8920(socid=320)) + +boards.append(BoardCalifornium(socid=279)) + +boards.append(BoardCobalt(socid=292)) + def get_supported_boards(): """ Called by other part of the code to get a list of boards """ diff --git a/linux-ramdump-parser-v2/cachedumplib.py b/linux-ramdump-parser-v2/cachedumplib.py index cad6d940983be46997505898fb772637cd704155..6f74261a9d7fe77efe00eb5647905ec7886ea626 100755 --- a/linux-ramdump-parser-v2/cachedumplib.py +++ b/linux-ramdump-parser-v2/cachedumplib.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015, The Linux Foundation. All rights reserved. +# Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 and @@ -9,21 +9,23 @@ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. import struct -from print_out import print_out_str, print_out_exception """dictionary mapping from (hw_id, client_id, version) to class CacheDump""" lookuptable = {} + def lookup_cache_type(hwid, client_id, version): """defaults to CacheDump() if no match found""" return lookuptable.get((hwid, client_id, version), CacheDump()) + def formatwidth(string, limit): if len(string) >= limit: return string[0:limit] formatstr = '{{0:{0}}}'.format(limit) return formatstr.format(string) + class TableOutputFormat: """ Not sure if using PrettyTable (python lib) is a good idea, since people would need to install it""" @@ -57,6 +59,7 @@ class TableOutputFormat: outfile.write('\n') + class CacheDump(object): """ Class to describe a method to parse a particular type of cachedump. Users should not make instances of this class.""" @@ -68,15 +71,15 @@ class CacheDump(object): raise NotImplementedError struct_CacheDumpType_v1 = [ - ('<I', 'status0'), #Status Registers + ('<I', 'status0'), # Status Registers ('I', 'status1'), ('I', 'status2'), ('I', 'status3'), - ('I', 'TagSize'), #Tag Size in u32 words - ('I', 'LineSize'), #Line Size in u32 words - ('I', 'NumSets'), #Number of sets - ('I', 'NumWays'), #Number of ways - ('Q', 'next'), #unused + ('I', 'TagSize'), # Tag Size in u32 words + ('I', 'LineSize'), # Line Size in u32 words + ('I', 'NumSets'), # Number of sets + ('I', 'NumWays'), # Number of ways + ('Q', 'next'), # unused ('I', '__reserved0'), ('I', '__reserved1'), ('I', '__reserved2'), @@ -85,8 +88,10 @@ struct_CacheDumpType_v1 = [ CacheDumpFormatStr_v1 = ''.join(zip(*struct_CacheDumpType_v1)[0]) CacheDumpKeys_v1 = zip(*struct_CacheDumpType_v1)[1] + class CacheDumpType_v1(CacheDump): - """Uses the format struct_CacheDumpType_v1, followed by an array of raw data""" + """Uses the format struct_CacheDumpType_v1, + followed by an array of raw data""" def __init__(self): super(CacheDumpType_v1, self).__init__() @@ -103,7 +108,7 @@ class CacheDumpType_v1(CacheDump): def add_table_data_columns(self): for i in range(0, self.LineSize): - str ="DATA{0}".format(i) + str = "DATA{0}".format(i) self.tableformat.addColumn(str, '{0:08x}', 8) def read_line(self, start): @@ -123,7 +128,8 @@ class CacheDumpType_v1(CacheDump): if self.unsupported_header_offset >= 0: return self.unsupported_header_offset - items = self.ramdump.read_string(start, CacheDumpFormatStr_v1, virtual=False) + items = self.ramdump.read_string(start, CacheDumpFormatStr_v1, + virtual=False) if items is None: raise Exception('Unable to read header information') @@ -131,7 +137,8 @@ class CacheDumpType_v1(CacheDump): setattr(self, CacheDumpKeys_v1[i], items[i]) struct_size = struct.calcsize(CacheDumpFormatStr_v1) - size = 0x4 * (self.LineSize + self.TagSize) * self.NumWays * self.NumSets + size = 0x4 * (self.LineSize + self.TagSize) * self.NumWays + size = size * self.NumSets size = size + struct_size if (size < 0x1000 or size > end - start): @@ -157,6 +164,7 @@ class CacheDumpType_v1(CacheDump): self.tableformat.printline(output, outfile) start = start + (self.TagSize + self.LineSize) * 0x4 + class L1_DCache_A53(CacheDumpType_v1): """Refer to ARM documentation:cortex_a53_trm""" def __init__(self): @@ -212,6 +220,34 @@ class L1_DCache_A53(CacheDumpType_v1): output.append(oa) output.append(os) + +class L1_ICache_A53(CacheDumpType_v1): + """Refer to ARM documentation:cortex_a53_trm""" + def __init__(self): + super(L1_ICache_A53, self).__init__() + self.tableformat.addColumn('VALID') + self.tableformat.addColumn('N') + self.tableformat.addColumn('PA [27:0]', '{0:016x}', 16) + self.unsupported_header_offset = 0 + self.TagSize = 2 + self.LineSize = 16 + self.NumSets = 0x80 + self.NumWays = 2 + + def parse_tag_fn(self, output, data, nset, nway): + if self.TagSize != 2: + raise Exception('cache tag size mismatch') + + valid = (data[0] >> 1) & 0x1 + n = (data[0] >> 0) & 0x1 + addr = (data[0] >> 0) & 0xffffffff + + addr = (addr << 12) | (nset << 6) + output.append(valid) + output.append(n) + output.append(addr) + + class L1_DCache_A57(CacheDumpType_v1): """Refer to ARM documentation:cortex_a57_trm""" def __init__(self): @@ -252,6 +288,7 @@ class L1_DCache_A57(CacheDumpType_v1): output.append(n) output.append(addr) + class L1_ICache_A57(CacheDumpType_v1): """Refer to ARM documentation:cortex_a57_trm""" def __init__(self): @@ -278,6 +315,7 @@ class L1_ICache_A57(CacheDumpType_v1): output.append(n) output.append(addr) + class L2_Cache_A57(CacheDumpType_v1): """Refer to ARM documentation:cortex_a57_trm""" def __init__(self, numsets): @@ -318,50 +356,191 @@ class L2_Cache_A57(CacheDumpType_v1): output.append(n) output.append(addr) -#8994 - -lookuptable[(8994, 0x80, 0)] = L1_DCache_A53() -lookuptable[(8994, 0x81, 0)] = L1_DCache_A53() -lookuptable[(8994, 0x82, 0)] = L1_DCache_A53() -lookuptable[(8994, 0x83, 0)] = L1_DCache_A53() -lookuptable[(8994, 0x84, 0)] = L1_DCache_A57() -lookuptable[(8994, 0x85, 0)] = L1_DCache_A57() -lookuptable[(8994, 0x86, 0)] = L1_DCache_A57() -lookuptable[(8994, 0x87, 0)] = L1_DCache_A57() - -lookuptable[(8994, 0x64, 0)] = L1_ICache_A57() -lookuptable[(8994, 0x65, 0)] = L1_ICache_A57() -lookuptable[(8994, 0x66, 0)] = L1_ICache_A57() -lookuptable[(8994, 0x67, 0)] = L1_ICache_A57() - -lookuptable[(8994, 0xC1, 0)] = L2_Cache_A57(numsets=0x800) - -lookuptable[(8994, 0x80, 0x100)] = L1_DCache_A53() -lookuptable[(8994, 0x81, 0x100)] = L1_DCache_A53() -lookuptable[(8994, 0x82, 0x100)] = L1_DCache_A53() -lookuptable[(8994, 0x83, 0x100)] = L1_DCache_A53() -lookuptable[(8994, 0x84, 0x100)] = L1_DCache_A57() -lookuptable[(8994, 0x85, 0x100)] = L1_DCache_A57() -lookuptable[(8994, 0x86, 0x100)] = L1_DCache_A57() -lookuptable[(8994, 0x87, 0x100)] = L1_DCache_A57() - -lookuptable[(8994, 0x64, 0x100)] = L1_ICache_A57() -lookuptable[(8994, 0x65, 0x100)] = L1_ICache_A57() -lookuptable[(8994, 0x66, 0x100)] = L1_ICache_A57() -lookuptable[(8994, 0x67, 0x100)] = L1_ICache_A57() - -lookuptable[(8994, 0xC1, 0x100)] = L2_Cache_A57(numsets=0x800) - - -#8992 -lookuptable[(8992, 0x80, 0x100)] = L1_DCache_A53() -lookuptable[(8992, 0x81, 0x100)] = L1_DCache_A53() -lookuptable[(8992, 0x82, 0x100)] = L1_DCache_A53() -lookuptable[(8992, 0x83, 0x100)] = L1_DCache_A53() -lookuptable[(8992, 0x84, 0x100)] = L1_DCache_A57() -lookuptable[(8992, 0x85, 0x100)] = L1_DCache_A57() - -lookuptable[(8992, 0x64, 0x100)] = L1_ICache_A57() -lookuptable[(8992, 0x65, 0x100)] = L1_ICache_A57() - -lookuptable[(8992, 0xC1, 0x100)] = L2_Cache_A57(numsets=0x400) + +class L1_DCache_KRYO2XX_GOLD(CacheDumpType_v1): + """Refer to documentation:KYRO2XX_trm""" + def __init__(self): + super(L1_DCache_KRYO2XX_GOLD, self).__init__() + self.tableformat.addColumn('MOESI') + self.tableformat.addColumn('O_Mem_E') + self.tableformat.addColumn('Addr [39:12]', '{0:016x}', 16) + self.tableformat.addColumn('OS', '{0:02b}') + self.tableformat.addColumn('MH', '{0:02b}') + self.unsupported_header_offset = 0 + self.TagSize = 2 + self.LineSize = 16 + self.NumSets = 0x100 + self.NumWays = 4 + + def MOESI_to_string(self, num): + if (num & 0x4 == 0x0): + return 'I' + if (num & 0x1 == 0x1): # shared + if (num & 0x8 == 0x1): # dirty + return 'O' + else: + return 'S' + else: # not shared + if (num & 0x8 == 0x1): # dirty + return 'M' + else: + return 'E' + + def parse_tag_fn(self, output, data, nset, nway): + MOESI_d = (data[0] >> 31) & 0x1 + mem_exclu = (data[0] >> 30) & 0x1 + MOESI_v = (data[0] >> 29) & 0x1 + MOESI_ns = (data[0] >> 28) & 0x1 + addr = (data[0] >> 0) & 0xfffffff + out_share = (data[1] >> 3) & 0x1 + memory_hint = (data[1] >> 1) & 0x3 + MOESI_gs = (data[1] >> 0) & 0x1 + + moesi = MOESI_d << 3 | MOESI_v << 2 | MOESI_ns << 1 | MOESI_gs + addr = ((addr) << 11) | (nset << 6) + output.append(self.MOESI_to_string(moesi)) + output.append(mem_exclu) + output.append(addr) + output.append(out_share) + output.append(memory_hint) + + +class L1_ICache_KRYO2XX_GOLD(CacheDumpType_v1): + """Refer to documentation:cortex_a57_trm""" + def __init__(self): + super(L1_ICache_KRYO2XX_GOLD, self).__init__() + self.tableformat.addColumn('VALID') + self.tableformat.addColumn('N') + self.tableformat.addColumn('PA [27:0]', '{0:016x}', 16) + self.unsupported_header_offset = 0 + self.TagSize = 2 + self.LineSize = 16 + self.NumSets = 0x100 + self.NumWays = 2 + + def parse_tag_fn(self, output, data, nset, nway): + if self.TagSize != 2: + raise Exception('cache tag size mismatch') + + valid = (data[0] >> 1) & 0x1 + n = (data[0] >> 0) & 0x1 + addr = (data[0] >> 0) & 0xffffffff + + addr = (addr << 12) | (nset << 6) + output.append(valid) + output.append(n) + output.append(addr) + +L1_DCache_KRYO2XX_SILVER = L1_DCache_A53 +L1_ICache_KYRO2XX_SILVER = L1_ICache_A53 + +# "msmcobalt" +lookuptable[("cobalt", 0x80, 0x14)] = L1_DCache_KRYO2XX_SILVER() +lookuptable[("cobalt", 0x81, 0x14)] = L1_DCache_KRYO2XX_SILVER() +lookuptable[("cobalt", 0x82, 0x14)] = L1_DCache_KRYO2XX_SILVER() +lookuptable[("cobalt", 0x84, 0x14)] = L1_DCache_KRYO2XX_SILVER() +lookuptable[("cobalt", 0x84, 0x14)] = L1_DCache_KRYO2XX_GOLD() +lookuptable[("cobalt", 0x85, 0x14)] = L1_DCache_KRYO2XX_GOLD() +lookuptable[("cobalt", 0x86, 0x14)] = L1_DCache_KRYO2XX_GOLD() +lookuptable[("cobalt", 0x87, 0x14)] = L1_DCache_KRYO2XX_GOLD() + + +lookuptable[("cobalt", 0x60, 0x14)] = L1_ICache_KYRO2XX_SILVER() +lookuptable[("cobalt", 0x61, 0x14)] = L1_ICache_KYRO2XX_SILVER() +lookuptable[("cobalt", 0x62, 0x14)] = L1_ICache_KYRO2XX_SILVER() +lookuptable[("cobalt", 0x63, 0x14)] = L1_ICache_KYRO2XX_SILVER() +lookuptable[("cobalt", 0x64, 0x14)] = L1_ICache_KRYO2XX_GOLD() +lookuptable[("cobalt", 0x65, 0x14)] = L1_ICache_KRYO2XX_GOLD() +lookuptable[("cobalt", 0x66, 0x14)] = L1_ICache_KRYO2XX_GOLD() +lookuptable[("cobalt", 0x67, 0x14)] = L1_ICache_KRYO2XX_GOLD() + + +# 8994 + +lookuptable[("8994", 0x80, 0)] = L1_DCache_A53() +lookuptable[("8994", 0x81, 0)] = L1_DCache_A53() +lookuptable[("8994", 0x82, 0)] = L1_DCache_A53() +lookuptable[("8994", 0x83, 0)] = L1_DCache_A53() +lookuptable[("8994", 0x84, 0)] = L1_DCache_A57() +lookuptable[("8994", 0x85, 0)] = L1_DCache_A57() +lookuptable[("8994", 0x86, 0)] = L1_DCache_A57() +lookuptable[("8994", 0x87, 0)] = L1_DCache_A57() + +lookuptable[("8994", 0x64, 0)] = L1_ICache_A57() +lookuptable[("8994", 0x65, 0)] = L1_ICache_A57() +lookuptable[("8994", 0x66, 0)] = L1_ICache_A57() +lookuptable[("8994", 0x67, 0)] = L1_ICache_A57() + +lookuptable[("8994", 0xC1, 0)] = L2_Cache_A57(numsets=0x800) + +lookuptable[("8994", 0x80, 0x100)] = L1_DCache_A53() +lookuptable[("8994", 0x81, 0x100)] = L1_DCache_A53() +lookuptable[("8994", 0x82, 0x100)] = L1_DCache_A53() +lookuptable[("8994", 0x83, 0x100)] = L1_DCache_A53() +lookuptable[("8994", 0x84, 0x100)] = L1_DCache_A57() +lookuptable[("8994", 0x85, 0x100)] = L1_DCache_A57() +lookuptable[("8994", 0x86, 0x100)] = L1_DCache_A57() +lookuptable[("8994", 0x87, 0x100)] = L1_DCache_A57() + +lookuptable[("8994", 0x64, 0x100)] = L1_ICache_A57() +lookuptable[("8994", 0x65, 0x100)] = L1_ICache_A57() +lookuptable[("8994", 0x66, 0x100)] = L1_ICache_A57() +lookuptable[("8994", 0x67, 0x100)] = L1_ICache_A57() + +lookuptable[("8994", 0xC1, 0x100)] = L2_Cache_A57(numsets=0x800) + + +# 8992 +lookuptable[("8992", 0x80, 0x100)] = L1_DCache_A53() +lookuptable[("8992", 0x81, 0x100)] = L1_DCache_A53() +lookuptable[("8992", 0x82, 0x100)] = L1_DCache_A53() +lookuptable[("8992", 0x83, 0x100)] = L1_DCache_A53() +lookuptable[("8992", 0x84, 0x100)] = L1_DCache_A57() +lookuptable[("8992", 0x85, 0x100)] = L1_DCache_A57() + +lookuptable[("8992", 0x64, 0x100)] = L1_ICache_A57() +lookuptable[("8992", 0x65, 0x100)] = L1_ICache_A57() + +lookuptable[("8992", 0xC1, 0x100)] = L2_Cache_A57(numsets=0x400) + +# 8917 +lookuptable[("8917", 0x84, 0x14)] = L1_DCache_A53() +lookuptable[("8917", 0x85, 0x14)] = L1_DCache_A53() +lookuptable[("8917", 0x86, 0x14)] = L1_DCache_A53() +lookuptable[("8917", 0x87, 0x14)] = L1_DCache_A53() + +# 8920 +lookuptable[("8920", 0x84, 0x14)] = L1_DCache_A53() +lookuptable[("8920", 0x85, 0x14)] = L1_DCache_A53() +lookuptable[("8920", 0x86, 0x14)] = L1_DCache_A53() +lookuptable[("8920", 0x87, 0x14)] = L1_DCache_A53() + +# 8937 +lookuptable[("8937", 0x80, 0x14)] = L1_DCache_A53() +lookuptable[("8937", 0x81, 0x14)] = L1_DCache_A53() +lookuptable[("8937", 0x82, 0x14)] = L1_DCache_A53() +lookuptable[("8937", 0x83, 0x14)] = L1_DCache_A53() +lookuptable[("8937", 0x84, 0x14)] = L1_DCache_A53() +lookuptable[("8937", 0x85, 0x14)] = L1_DCache_A53() +lookuptable[("8937", 0x86, 0x14)] = L1_DCache_A53() +lookuptable[("8937", 0x87, 0x14)] = L1_DCache_A53() + +# 8940 +lookuptable[("8940", 0x80, 0x14)] = L1_DCache_A53() +lookuptable[("8940", 0x81, 0x14)] = L1_DCache_A53() +lookuptable[("8940", 0x82, 0x14)] = L1_DCache_A53() +lookuptable[("8940", 0x83, 0x14)] = L1_DCache_A53() +lookuptable[("8940", 0x84, 0x14)] = L1_DCache_A53() +lookuptable[("8940", 0x85, 0x14)] = L1_DCache_A53() +lookuptable[("8940", 0x86, 0x14)] = L1_DCache_A53() +lookuptable[("8940", 0x87, 0x14)] = L1_DCache_A53() + +# 8953 +lookuptable[("8953", 0x80, 0x14)] = L1_DCache_A53() +lookuptable[("8953", 0x81, 0x14)] = L1_DCache_A53() +lookuptable[("8953", 0x82, 0x14)] = L1_DCache_A53() +lookuptable[("8953", 0x83, 0x14)] = L1_DCache_A53() +lookuptable[("8953", 0x84, 0x14)] = L1_DCache_A53() +lookuptable[("8953", 0x85, 0x14)] = L1_DCache_A53() +lookuptable[("8953", 0x86, 0x14)] = L1_DCache_A53() +lookuptable[("8953", 0x87, 0x14)] = L1_DCache_A53() diff --git a/linux-ramdump-parser-v2/debug_image_v2.py b/linux-ramdump-parser-v2/debug_image_v2.py old mode 100644 new mode 100755 index 6a04104305c6d7fac2ab6e3faecd550c9f50cfc0..b6b3655926ee614384e910a0f315ae5dd3eef7ea --- a/linux-ramdump-parser-v2/debug_image_v2.py +++ b/linux-ramdump-parser-v2/debug_image_v2.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. +# Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 and @@ -19,6 +19,7 @@ import random import subprocess import sys import time +import local_settings from dcc import DccRegDump, DccSramDump from pmic import PmicRegDump @@ -50,26 +51,29 @@ class client(object): MSM_DUMP_DATA_LOG_BUF = 0x110 MSM_DUMP_DATA_LOG_BUF_FIRST_IDX = 0x111 MSM_DUMP_DATA_L2_TLB = 0x120 + MSM_DUMP_DATA_SCANDUMP = 0xEB MSM_DUMP_DATA_MAX = MAX_NUM_ENTRIES -client_table = { - 'MSM_DUMP_DATA_CPU_CTX': 'parse_cpu_ctx', - 'MSM_DUMP_DATA_L1_INST_TLB': 'parse_l1_inst_tlb', - 'MSM_DUMP_DATA_L1_DATA_TLB': 'parse_l1_data_tlb', - 'MSM_DUMP_DATA_L1_INST_CACHE': 'parse_cache_common', - 'MSM_DUMP_DATA_L1_DATA_CACHE': 'parse_cache_common', - 'MSM_DUMP_DATA_L2_CACHE': 'parse_cache_common', - 'MSM_DUMP_DATA_L3_CACHE': 'parse_l3_cache', - 'MSM_DUMP_DATA_OCMEM': 'parse_ocmem', - 'MSM_DUMP_DATA_DBGUI_REG' : 'parse_qdss_common', - 'MSM_DUMP_DATA_VSENSE': 'parse_vsens', - 'MSM_DUMP_DATA_PMIC': 'parse_pmic', - 'MSM_DUMP_DATA_DCC_REG':'parse_dcc_reg', - 'MSM_DUMP_DATA_DCC_SRAM':'parse_dcc_sram', - 'MSM_DUMP_DATA_TMC_ETF': 'parse_qdss_common', - 'MSM_DUMP_DATA_TMC_REG': 'parse_qdss_common', - 'MSM_DUMP_DATA_L2_TLB': 'parse_l2_tlb', -} +# Client functions will be executed in top-to-bottom order +client_types = [ + ('MSM_DUMP_DATA_SCANDUMP', 'parse_scandump'), + ('MSM_DUMP_DATA_CPU_CTX', 'parse_cpu_ctx'), + ('MSM_DUMP_DATA_L1_INST_TLB', 'parse_l1_inst_tlb'), + ('MSM_DUMP_DATA_L1_DATA_TLB', 'parse_l1_data_tlb'), + ('MSM_DUMP_DATA_L1_INST_CACHE', 'parse_cache_common'), + ('MSM_DUMP_DATA_L1_DATA_CACHE', 'parse_cache_common'), + ('MSM_DUMP_DATA_L2_CACHE', 'parse_cache_common'), + ('MSM_DUMP_DATA_L3_CACHE', 'parse_l3_cache'), + ('MSM_DUMP_DATA_OCMEM', 'parse_ocmem'), + ('MSM_DUMP_DATA_DBGUI_REG', 'parse_qdss_common'), + ('MSM_DUMP_DATA_VSENSE', 'parse_vsens'), + ('MSM_DUMP_DATA_PMIC', 'parse_pmic'), + ('MSM_DUMP_DATA_DCC_REG', 'parse_dcc_reg'), + ('MSM_DUMP_DATA_DCC_SRAM', 'parse_dcc_sram'), + ('MSM_DUMP_DATA_TMC_ETF', 'parse_qdss_common'), + ('MSM_DUMP_DATA_TMC_REG', 'parse_qdss_common'), + ('MSM_DUMP_DATA_L2_TLB', 'parse_l2_tlb'), +] qdss_tag_to_field_name = { 'MSM_DUMP_DATA_TMC_REG': 'tmc_etr_start', @@ -79,11 +83,42 @@ qdss_tag_to_field_name = { class DebugImage_v2(): - def __init__(self): + def __init__(self, ramdump): self.qdss = QDSSDump() self.dump_type_lookup_table = [] self.dump_table_id_lookup_table = [] self.dump_data_id_lookup_table = [] + version = re.findall(r'\d+', ramdump.version) + if int(version[0]) > 3: + self.event_call = 'struct trace_event_call' + self.event_class = 'struct trace_event_class' + else: + self.event_call = 'struct ftrace_event_call' + self.event_class = 'struct ftrace_event_class' + self.has_scan_dump = False + + def parse_scandump(self, version, start, end, client_id, ram_dump): + scandump_file_prefix = "scandump" + try: + scan_wrapper_path = local_settings.scandump_parser_path + except AttributeError: + print_out_str('Could not find scandump_parser_path . Please define scandump_parser_path in local_settings') + return + if client_id == client.MSM_DUMP_DATA_SCANDUMP: + self.has_scan_dump = True + output = os.path.join(ram_dump.outdir, scandump_file_prefix) + input = os.path.join(ram_dump.outdir, "vv_msg_4_header.bin") + print_out_str( + 'Parsing scandump context start {0:x} end {1:x} {2} {3}'.format(start, end, output, input)) + if ram_dump.arm64: + arch = "aarch64" + header_bin = ram_dump.open_file(input) + it = range(start, end) + for i in it: + val = ram_dump.read_byte(i, False) + header_bin.write(struct.pack("<B", val)) + header_bin.close() + subprocess.call('python {0} -d {1} -o {2} -f {3}'.format(scan_wrapper_path, input, output, arch)) def parse_cpu_ctx(self, version, start, end, client_id, ram_dump): core = client_id - client.MSM_DUMP_DATA_CPU_CTX @@ -91,7 +126,7 @@ class DebugImage_v2(): print_out_str( 'Parsing CPU{2} context start {0:x} end {1:x}'.format(start, end, core)) - regs = TZRegDump_v2() + regs = TZRegDump_v2(self.has_scan_dump) if regs.init_regs(version, start, end, core, ram_dump) is False: print_out_str('!!! Could not get registers from TZ dump') return @@ -201,23 +236,23 @@ class DebugImage_v2(): self.formats_out.write("\tfield:{0} {1};\toffset:{2};\tsize:{3};\tsigned:{4};\n".format(type_str, field_name, offset, size, signed)) def ftrace_events_func(self, ftrace_list, ram_dump): - event_offset = ram_dump.field_offset('struct ftrace_event_call', 'event') - fmt_offset = ram_dump.field_offset('struct ftrace_event_call', 'print_fmt') - class_offset = ram_dump.field_offset('struct ftrace_event_call', 'class') - flags_offset = ram_dump.field_offset('struct ftrace_event_call', 'flags') + event_offset = ram_dump.field_offset(self.event_call, 'event') + fmt_offset = ram_dump.field_offset(self.event_call,'print_fmt') + class_offset = ram_dump.field_offset(self.event_call, 'class') + flags_offset = ram_dump.field_offset(self.event_call, 'flags') flags = ram_dump.read_word(ftrace_list + flags_offset) if (ram_dump.kernel_version >= (3, 18) and (flags & TRACE_EVENT_FL_TRACEPOINT)): - tp_offset = ram_dump.field_offset('struct ftrace_event_call', 'tp') + tp_offset = ram_dump.field_offset(self.event_call, 'tp') tp_name_offset = ram_dump.field_offset('struct tracepoint', 'name') tp = ram_dump.read_word(ftrace_list + tp_offset) name = ram_dump.read_word(tp + tp_name_offset) else: - name_offset = ram_dump.field_offset('struct ftrace_event_call', 'name') + name_offset = ram_dump.field_offset(self.event_call, 'name') name = ram_dump.read_word(ftrace_list + name_offset) type_offset = ram_dump.field_offset('struct trace_event', 'type') - fields_offset = ram_dump.field_offset('struct ftrace_event_class', 'fields') + fields_offset = ram_dump.field_offset(self.event_class, 'fields') common_field_list = ram_dump.address_of('ftrace_common_fields') field_next_offset = ram_dump.field_offset('struct ftrace_event_field', 'link') @@ -247,7 +282,7 @@ class DebugImage_v2(): self.formats_out = formats_out ftrace_events_list = ram_dump.address_of('ftrace_events') - next_offset = ram_dump.field_offset('struct ftrace_event_call', 'list') + next_offset = ram_dump.field_offset(self.event_call, 'list') list_walker = llist.ListWalker(ram_dump, ftrace_events_list, next_offset) list_walker.walk_prev(ftrace_events_list, self.ftrace_events_func, ram_dump) @@ -360,6 +395,52 @@ class DebugImage_v2(): print_out_str('--------') print_out_str(p.communicate()[0]) + def sorted_dump_data_clients(self, ram_dump, table, table_num_entries): + """ Returns a sorted list of (client_name, func, client_address) where + + client_address -- + the (struct msm_dump_entry*) which contains a client_id mapping to + client_name + + func -- + registered function in client_types to parse entries of + this type + + the return value is sorted in the same order as the client names + in client_types + """ + + dump_entry_id_offset = ram_dump.field_offset( + 'struct msm_dump_entry', 'id') + dump_entry_size = ram_dump.sizeof('struct msm_dump_entry') + results = list() + + client_table = dict(client_types) + # get first column of client_types + client_names = zip(*client_types)[0] + + for j in range(0, table_num_entries): + client_entry = table + j * dump_entry_size + client_id = ram_dump.read_u32( + client_entry + dump_entry_id_offset, False) + + if (client_id < 0 or + client_id > len(self.dump_data_id_lookup_table)): + print_out_str( + '!!! Invalid dump client id found {0:x}'.format(client_id)) + continue + + client_name = self.dump_data_id_lookup_table[client_id] + if client_name not in client_table: + print_out_str( + '!!! {0} Does not have an associated function. Skipping!'.format(client_name)) + continue + + results.append((client_name, client_table[client_name], client_entry)) + + results.sort(key=lambda(x): client_names.index(x[0])) + return results + def parse_dump_v2(self, ram_dump): self.dump_type_lookup_table = ram_dump.gdbmi.get_enum_lookup_table( 'msm_dump_type', 2) @@ -479,53 +560,56 @@ class DebugImage_v2(): table_version >> 20, table_version & 0xFFFFF, self.dump_table_id_lookup_table[entry_id], self.dump_type_lookup_table[entry_type], table_num_entries)) - for j in range(0, table_num_entries): + lst = self.sorted_dump_data_clients( + ram_dump, entry_addr + dump_table_entry_offset, + table_num_entries) + for (client_name, func, client_entry) in lst: print_out_str('--------') - client_entry = entry_addr + dump_table_entry_offset + j * dump_entry_size - client_id = ram_dump.read_u32(client_entry + dump_entry_id_offset, False) - client_type = ram_dump.read_u32(client_entry + dump_entry_type_offset, False) - client_addr = ram_dump.read_word(client_entry + dump_entry_addr_offset, False) - - if client_id < 0 or client_id > len(self.dump_data_id_lookup_table): - print_out_str( - '!!! Invalid dump client id found {0:x}'.format(client_id)) - continue + client_id = ram_dump.read_u32( + client_entry + dump_entry_id_offset, False) + client_type = ram_dump.read_u32( + client_entry + dump_entry_type_offset, False) + client_addr = ram_dump.read_word( + client_entry + dump_entry_addr_offset, False) if client_type > len(self.dump_type_lookup_table): print_out_str( '!!! Invalid dump client type found {0:x}'.format(client_type)) continue - dump_data_magic = ram_dump.read_u32(client_addr + dump_data_magic_offset, False) - dump_data_version = ram_dump.read_u32(client_addr + dump_data_version_offset, False) - dump_data_name = ram_dump.read_cstring(client_addr + dump_data_name_offset, - ram_dump.sizeof('((struct msm_dump_data *)0x0)->name'), False) - dump_data_addr = ram_dump.read_dword(client_addr + dump_data_addr_offset, False) - dump_data_len = ram_dump.read_dword(client_addr + dump_data_len_offset, False) + dump_data_magic = ram_dump.read_u32( + client_addr + dump_data_magic_offset, False) + dump_data_version = ram_dump.read_u32( + client_addr + dump_data_version_offset, False) + dump_data_name = ram_dump.read_cstring( + client_addr + dump_data_name_offset, + ram_dump.sizeof('((struct msm_dump_data *)0x0)->name'), + False) + dump_data_addr = ram_dump.read_dword( + client_addr + dump_data_addr_offset, False) + dump_data_len = ram_dump.read_dword( + client_addr + dump_data_len_offset, False) + print_out_str('Parsing debug information for {0}. Version: {1} Magic: {2:x} Source: {3}'.format( + client_name, dump_data_version, dump_data_magic, + dump_data_name)) + + if dump_data_magic is None: + print_out_str("!!! Address {0:x} is bogus! Can't parse!".format( + client_addr + dump_data_magic_offset)) + continue + + if dump_data_magic != MEMDUMPV2_MAGIC: + print_out_str("!!! Magic {0:x} doesn't match! No context will be parsed".format(dump_data_magic)) + continue + + getattr(DebugImage_v2, func)( + self, dump_data_version, dump_data_addr, + dump_data_addr + dump_data_len, client_id, ram_dump) - client_name = self.dump_data_id_lookup_table[client_id] - if client_name not in client_table: - print_out_str( - '!!! {0} Does not have an associated function. The parser needs to be updated!'.format(client_name)) - else: - print_out_str('Parsing debug information for {0}. Version: {1} Magic: {2:x} Source: {3}'.format( - client_name, dump_data_version, dump_data_magic, dump_data_name)) - - if dump_data_magic is None: - print_out_str( - "!!! Address {0:x} is bogus! Can't parse!".format(start)) - continue - - if dump_data_magic != MEMDUMPV2_MAGIC: - print_out_str( - "!!! Magic {0:x} doesn't match! No context will be parsed".format(dump_data_magic)) - continue - - func = client_table[client_name] - getattr(DebugImage_v2, func)(self, dump_data_version, dump_data_addr, dump_data_addr + dump_data_len, - client_id, ram_dump) - - self.qdss.dump_all(ram_dump) + self.qdss.dump_standard(ram_dump) + if not ram_dump.skip_qdss_bin: + self.qdss.save_etf_bin(ram_dump) + self.qdss.save_etr_bin(ram_dump) if ram_dump.qtf: self.parse_qtf(ram_dump) if ram_dump.dcc: diff --git a/linux-ramdump-parser-v2/iommulib.py b/linux-ramdump-parser-v2/iommulib.py index 5cf1981ccad72fb61b8c25bc7155f40c3b5e051b..edfbe3c1fba5b220de5f7c2013311916587e89a0 100644 --- a/linux-ramdump-parser-v2/iommulib.py +++ b/linux-ramdump-parser-v2/iommulib.py @@ -1,4 +1,4 @@ -# Copyright (c) 2014, The Linux Foundation. All rights reserved. +# Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 and @@ -12,31 +12,139 @@ import rb_tree import linux_list as llist +ARM_SMMU_DOMAIN = 0 +MSM_SMMU_DOMAIN = 1 +MSM_SMMU_AARCH64_DOMAIN = 2 + + class Domain(object): - def __init__(self, domain_num, pg_table, redirect, ctx_list, client_name): + def __init__(self, pg_table, redirect, ctx_list, client_name, + domain_type=MSM_SMMU_DOMAIN, level=3, domain_num=-1): self.domain_num = domain_num self.pg_table = pg_table self.redirect = redirect self.ctx_list = ctx_list self.client_name = client_name + self.level = level + self.domain_type = domain_type def __repr__(self): return "#%d: %s" % (self.domain_num, self.client_name) + class IommuLib(object): def __init__(self, ramdump): self.ramdump = ramdump self.domain_list = [] root = self.ramdump.read_word('domain_root') - if root is None: + + list_head_attachments = self.ramdump.read_pointer( + 'iommu_debug_attachments') + + if list_head_attachments is not None: + list_head_arm_addr = self.ramdump.read_structure_field( + list_head_attachments, 'struct list_head', 'prev') + list_walker = llist.ListWalker( + self.ramdump, list_head_arm_addr, + self.ramdump.field_offset('struct iommu_debug_attachment', + 'list')) + list_walker.walk(list_head_attachments, + self._iommu_domain_find_default, + self.domain_list) + + if root is not None: + rb_walker = rb_tree.RbTreeWalker(self.ramdump) + rb_walker.walk(root, self._iommu_domain_func, self.domain_list) + + def _iommu_domain_find_default(self, node, domain_list): + domain_ptr = self.ramdump.read_structure_field( + node, 'struct iommu_debug_attachment', 'domain') + + if not domain_ptr: return - rb_walker = rb_tree.RbTreeWalker(self.ramdump) - rb_walker.walk(root, self._iommu_domain_func, self.domain_list) + + if self.ramdump.field_offset('struct iommu_domain', 'priv') \ + is not None: + priv_ptr = self.ramdump.read_structure_field( + domain_ptr, 'struct iommu_domain', 'priv') + + if not priv_ptr: + return + else: + priv_ptr = None + + arm_smmu_ops = self.ramdump.address_of('arm_smmu_ops') + + dev_ptr = self.ramdump.read_structure_field( + node, 'struct iommu_debug_attachment', 'dev') + + kobj_ptr = dev_ptr + self.ramdump.field_offset('struct device', 'kobj') + + client_name = self.ramdump.read_structure_cstring( + kobj_ptr, 'struct kobject', 'name') + + iommu_domain_ops = self.ramdump.read_structure_field( + domain_ptr, 'struct iommu_domain', 'ops') + + if iommu_domain_ops == arm_smmu_ops: + if priv_ptr is not None: + arm_smmu_domain_ptr = priv_ptr + else: + arm_smmu_domain_ptr = self.ramdump.container_of( + domain_ptr, 'struct arm_smmu_domain', 'domain') + + pgtbl_ops_ptr = self.ramdump.read_structure_field( + arm_smmu_domain_ptr, 'struct arm_smmu_domain', 'pgtbl_ops') + + pg_table = 0 + level = 0 + if pgtbl_ops_ptr != 0: + io_pgtable_ptr = self.ramdump.container_of( + pgtbl_ops_ptr, 'struct io_pgtable', 'ops') + + arm_lpae_io_pgtable_ptr = self.ramdump.container_of( + io_pgtable_ptr, 'struct arm_lpae_io_pgtable', 'iop') + + pg_table = self.ramdump.read_structure_field( + arm_lpae_io_pgtable_ptr, 'struct arm_lpae_io_pgtable', + 'pgd') + + level = self.ramdump.read_structure_field( + arm_lpae_io_pgtable_ptr, 'struct arm_lpae_io_pgtable', + 'levels') + + domain_create = Domain(pg_table, 0, [], client_name, + ARM_SMMU_DOMAIN, level) + domain_list.append(domain_create) + + else: + priv_pt_offset = self.ramdump.field_offset('struct msm_iommu_priv', + 'pt') + pgtable_offset = self.ramdump.field_offset('struct msm_iommu_pt', + 'fl_table') + redirect_offset = self.ramdump.field_offset('struct msm_iommu_pt', + 'redirect') + + if priv_pt_offset is not None: + pg_table = self.ramdump.read_u64( + priv_ptr + priv_pt_offset + pgtable_offset) + redirect = self.ramdump.read_u64( + priv_ptr + priv_pt_offset + redirect_offset) + + if (self.ramdump.is_config_defined('CONFIG_IOMMU_AARCH64')): + domain_create = Domain(pg_table, redirect, [], client_name, + MSM_SMMU_AARCH64_DOMAIN) + else: + domain_create = Domain(pg_table, redirect, [], client_name, + MSM_SMMU_DOMAIN) + + domain_list.append(domain_create) def _iommu_list_func(self, node, ctx_list): ctx_drvdata_name_ptr = self.ramdump.read_word( - node + self.ramdump.field_offset('struct msm_iommu_ctx_drvdata', 'name')) + node + self.ramdump.field_offset('struct msm_iommu_ctx_drvdata', + 'name')) ctxdrvdata_num_offset = self.ramdump.field_offset( 'struct msm_iommu_ctx_drvdata', 'num') num = self.ramdump.read_u32(node + ctxdrvdata_num_offset) @@ -70,13 +178,17 @@ class IommuLib(object): 'struct msm_iommu_priv', 'list_attached') if list_attached_offset is not None: - list_attached = self.ramdump.read_word(priv_ptr + list_attached_offset) + list_attached = self.ramdump.read_word(priv_ptr + + list_attached_offset) else: list_attached = None - priv_pt_offset = self.ramdump.field_offset('struct msm_iommu_priv', 'pt') - pgtable_offset = self.ramdump.field_offset('struct msm_iommu_pt', 'fl_table') - redirect_offset = self.ramdump.field_offset('struct msm_iommu_pt', 'redirect') + priv_pt_offset = self.ramdump.field_offset('struct msm_iommu_priv', + 'pt') + pgtable_offset = self.ramdump.field_offset('struct msm_iommu_pt', + 'fl_table') + redirect_offset = self.ramdump.field_offset('struct msm_iommu_pt', + 'redirect') if priv_pt_offset is not None: pg_table = self.ramdump.read_word( @@ -87,12 +199,14 @@ class IommuLib(object): # On some builds we are unable to look up the offsets so hardcode # the offsets. pg_table = self.ramdump.read_word(priv_ptr + 0) - redirect = self.ramdump.read_u32(priv_ptr + self.ramdump.sizeof('void *')) - - # Note: On some code bases we don't have this pg_table and redirect in the priv structure (see msm_iommu_sec.c). It only - # contains list_attached. If this is the case we can detect that by checking whether - # pg_table == redirect (prev == next pointers of the attached - # list). + redirect = self.ramdump.read_u32(priv_ptr + + self.ramdump.sizeof('void *')) + + # Note: On some code bases we don't have this pg_table and redirect + # in the priv structure (see msm_iommu_sec.c). It only contains + # list_attached. If this is the case we can detect that by checking + # whether pg_table == redirect (prev == next pointers of the + # attached list). if pg_table == redirect: # This is a secure domain. We don't have access to the page # tables. @@ -103,8 +217,15 @@ class IommuLib(object): if list_attached is not None and list_attached != 0: list_walker = llist.ListWalker( self.ramdump, list_attached, - self.ramdump.field_offset('struct msm_iommu_ctx_drvdata', 'attached_elm')) + self.ramdump.field_offset('struct msm_iommu_ctx_drvdata', + 'attached_elm')) list_walker.walk(list_attached, self._iommu_list_func, ctx_list) - domain_list.append( - Domain(domain_num, pg_table, redirect, ctx_list, client_name)) + if (self.ramdump.is_config_defined('CONFIG_IOMMU_AARCH64')): + domain_create = Domain(pg_table, redirect, ctx_list, client_name, + MSM_SMMU_AARCH64_DOMAIN, domain_num=domain_num) + else: + domain_create = Domain(pg_table, redirect, ctx_list, client_name, + MSM_SMMU_DOMAIN, domain_num=domain_num) + + domain_list.append(domain_create) diff --git a/linux-ramdump-parser-v2/linux_list.py b/linux-ramdump-parser-v2/linux_list.py index c1457f59294e4ea2bc7467a5e261bceb21f2dbcb..d7381c637a6ea41249bdd6ea21749f0590e84847 100644 --- a/linux-ramdump-parser-v2/linux_list.py +++ b/linux-ramdump-parser-v2/linux_list.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. +# Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 and @@ -31,11 +31,35 @@ class ListWalker(object): self.list_elem_offset = list_elem_offset self.last_node = node_addr self.seen_nodes = [] + self.curr_node = node_addr + + def __iter__(self): + return self + + def next(self): + next_node_addr = self.curr_node + \ + self.ram_dump.field_offset('struct list_head', 'next') + next_node = self.ram_dump.read_word(next_node_addr) + + self.curr_node = next_node + if next_node == self.last_node: + raise StopIteration() + elif next_node in self.seen_nodes: + print_out_str( + '[!] WARNING: Cycle found in attach list. List is corrupted!') + raise StopIteration() + else: + self.seen_nodes.append(next_node) + + return next_node - self.list_elem_offset def is_empty(self): """Return True if the list is empty, False otherwise. """ + if self.last_node is None: + return True + next_node_addr = self.last_node + self.ram_dump.field_offset('struct list_head', 'next') next_node = self.ram_dump.read_word(next_node_addr) diff --git a/linux-ramdump-parser-v2/lpaeiommulib.py b/linux-ramdump-parser-v2/lpaeiommulib.py index 64fe5eed0f7c2b64109083697528c8502b742df1..b99aa0db6c5ce02636e3238a2d7361c6982470a3 100644 --- a/linux-ramdump-parser-v2/lpaeiommulib.py +++ b/linux-ramdump-parser-v2/lpaeiommulib.py @@ -157,8 +157,9 @@ def get_coalesced_mappings(flat_mappings): cc = OrderedDict(sorted(cc.items())) return cc -def parse_long_form_tables(dump, d): - fname = 'msm_iommu_domain_%02d.txt' % (d.domain_num) + +def parse_long_form_tables(dump, d, domain_num): + fname = 'msm_iommu_domain_%02d.txt' % (domain_num) with dump.open_file(fname) as outfile: print_out_str('LPAE Iommu page tables: ' + fname) diff --git a/linux-ramdump-parser-v2/mm.py b/linux-ramdump-parser-v2/mm.py old mode 100755 new mode 100644 index c6790766e533d60db16cf499522e9ab815fc26fb..02fa7aa918566760d150651f7fc3148cf4f5b07d --- a/linux-ramdump-parser-v2/mm.py +++ b/linux-ramdump-parser-v2/mm.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. +# Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 and @@ -24,6 +24,7 @@ def get_debug_flags(ramdump, page): flagval = ramdump.read_word(page + debug_flag_offset) return flagval + def page_zonenum(page_flags): # save this in a variable somewhere... return (page_flags >> 26) & 3 @@ -134,7 +135,11 @@ def get_vmemmap(ramdump): # See: include/asm-generic/pgtable-nopud.h, # arch/arm64/include/asm/pgtable-hwdef.h, # arch/arm64/include/asm/pgtable.h - nlevels = int(ramdump.get_config_val("CONFIG_ARM64_PGTABLE_LEVELS")) + if (ramdump.kernel_version < (3, 18, 0)): + nlevels = int(ramdump.get_config_val("CONFIG_ARM64_PGTABLE_LEVELS")) + else: + nlevels = int(ramdump.get_config_val("CONFIG_PGTABLE_LEVELS")) + if ramdump.is_config_defined("CONFIG_ARM64_64K_PAGES"): page_shift = 16 else: @@ -146,7 +151,15 @@ def get_vmemmap(ramdump): spsize = ramdump.sizeof('struct page') vmemmap_size = bitops.align((1 << (va_bits - page_shift)) * spsize, pud_size) - vmalloc_end = ramdump.page_offset - pud_size - vmemmap_size + if (ramdump.kernel_version < (3, 18, 31)): + vmalloc_end = ramdump.page_offset - pud_size - vmemmap_size + # vmalloc_end = 0xFFFFFFBC00000000 + else: + # for version >= 3.18.31, + # vmemmap is shifted to base addr (0x80000000) pfn. + pfn_offset = (ramdump.phys_offset >> page_shift) + offset = pfn_offset * spsize + vmalloc_end = ramdump.page_offset - pud_size - vmemmap_size - offset return vmalloc_end diff --git a/linux-ramdump-parser-v2/mmu.py b/linux-ramdump-parser-v2/mmu.py index 16bc2220d102337991cf16275edf1e9a5a90e933..4444cd2dd4eaf067f90120688b0d3c7a5bd014ad 100644 --- a/linux-ramdump-parser-v2/mmu.py +++ b/linux-ramdump-parser-v2/mmu.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. +# Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 and @@ -81,7 +81,8 @@ class Armv7MMU(MMU): self.secondary_page_tables = [ [0 for col in range(256)] for row in range(4096)] - msm_ttbr0 = self.ramdump.phys_offset + self.ramdump.swapper_pg_dir_addr + msm_ttbr0 = self.ramdump.kernel_virt_to_phys( + self.ramdump.swapper_pg_dir_addr) self.ttbr = msm_ttbr0 virt_address = 0x0 gb_i = 0 @@ -392,7 +393,8 @@ class Armv7LPAEMMU(MMU): pass def page_table_walk(self, virt): - self.ttbr = self.ramdump.swapper_pg_dir_addr + self.ramdump.phys_offset + self.ttbr = self.ramdump.kernel_virt_to_phys( + self.ramdump.swapper_pg_dir_addr) info = self.translate(virt) return info.phys if info is not None else None @@ -504,7 +506,7 @@ class Armv8MMU(MMU): else: raise Exception( 'Invalid stage 1 first- or second-level translation\ndescriptor: (%s)\naddr: (%s)' - % (str(descriptor), str(addr)) + % (str(descriptor), str(addr)) ) return descriptor @@ -577,7 +579,8 @@ class Armv8MMU(MMU): def page_table_walk(self, virt): - self.ttbr = self.ramdump.swapper_pg_dir_addr + self.ramdump.phys_offset + self.ttbr = self.ramdump.kernel_virt_to_phys( + self.ramdump.swapper_pg_dir_addr) virt_r = Register(virt, zl_index=(47,39), @@ -586,7 +589,10 @@ class Armv8MMU(MMU): tl_index=(20,12), page_index=(11,0)) - fl_desc = self.do_fl_sl_level_lookup(self.ttbr, virt_r.fl_index, 12, 30) + try: + fl_desc = self.do_fl_sl_level_lookup(self.ttbr, virt_r.fl_index, 12, 30) + except: + return None if fl_desc.dtype == Armv8MMU.DESCRIPTOR_BLOCK: return self.fl_block_desc_2_phys(fl_desc, virt_r) diff --git a/linux-ramdump-parser-v2/parsers/cpr3_info.py b/linux-ramdump-parser-v2/parsers/cpr3_info.py index 49a166642b5d7dd263b62bb4529f17d9390e8ae4..ca5d9ddf472c73945a904408f1bb9ad753790d20 100644 --- a/linux-ramdump-parser-v2/parsers/cpr3_info.py +++ b/linux-ramdump-parser-v2/parsers/cpr3_info.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015, The Linux Foundation. All rights reserved. +# Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 and @@ -14,7 +14,7 @@ from print_out import print_out_str from parser_util import register_parser, RamParser from collections import defaultdict - +CPRH_CTRL_TYPE = 2 @register_parser('--cpr3-info', 'Print CPR3 information') class CPR3Info(RamParser): def __init__(self, *args): @@ -264,7 +264,7 @@ class CPR3Info(RamParser): "not yet executed") self.output.append(tmp) - def dump_cpr3_regulator_state(self, vreg_addr): + def dump_cpr3_regulator_state(self, vreg_addr, ctrl_type): tmp = "" if vreg_addr is None: return @@ -318,13 +318,14 @@ class CPR3Info(RamParser): 'speed_bin_fuse')) tmp += "%-30s = %d\n" % ("Speed-bin fuse", speed_bin_fuse) - tmp += "\n%-30s = %d/%d\n" % ("CPR corner", current_corner + 1, - corner_count) - if vreg_enabled is True: - vreg_enabled = 1 - else: - vreg_enabled = 0 - tmp += "%-30s = %d\n" % ("Enabled", vreg_enabled) + if ctrl_type != CPRH_CTRL_TYPE: + tmp += "\n%-30s = %d/%d\n" % ("CPR corner", current_corner + 1, + corner_count) + if vreg_enabled is True: + vreg_enabled = 1 + else: + vreg_enabled = 0 + tmp += "%-30s = %d\n" % ("Enabled", vreg_enabled) if ldo_regulator_addr != 0: if ldo_mode_allowed is True: @@ -340,8 +341,10 @@ class CPR3Info(RamParser): vreg_addr + self.ramdump.field_offset( 'struct cpr3_regulator', 'corner')) size = self.ramdump.sizeof("struct cpr3_corner") - corner_addr = corner_addr + current_corner * size - self.dump_cpr3_corner_info(corner_addr, 0, 1, 0) + + if ctrl_type != CPRH_CTRL_TYPE: + corner_addr = corner_addr + current_corner * size + self.dump_cpr3_corner_info(corner_addr, 0, 1, 0) self.dump_cpr3_regulator_voltages(vreg_addr) rdev_addr = self.ramdump.read_word( @@ -349,9 +352,10 @@ class CPR3Info(RamParser): 'rdev')) offset = self.ramdump.field_offset('struct regulator_dev', 'consumer_list') - self.dump_consumer(rdev_addr + offset) + if ctrl_type != CPRH_CTRL_TYPE: + self.dump_consumer(rdev_addr + offset) - def dump_cpr3_thread_state(self, thread_addr): + def dump_cpr3_thread_state(self, thread_addr, ctrl_type): tmp = "" thread_id = self.ramdump.read_u32( thread_addr + self.ramdump.field_offset( @@ -361,10 +365,12 @@ class CPR3Info(RamParser): tmp += "-" * 80 + "\n" tmp += "Thread: %d\n" % thread_id tmp += "-" * 80 + "\n" - tmp += "CPR aggregated voltages:\n" + if ctrl_type != CPRH_CTRL_TYPE: + tmp += "CPR aggregated voltages:\n" self.output.append(tmp) + if ctrl_type != CPRH_CTRL_TYPE: + self.dump_cpr3_corner_info(aggr_corner_addr, 0, 1, 0) - self.dump_cpr3_corner_info(aggr_corner_addr, 0, 1, 0) vreg_addr = self.ramdump.read_word( thread_addr + self.ramdump.field_offset('struct cpr3_thread', 'vreg')) @@ -375,7 +381,7 @@ class CPR3Info(RamParser): size_reg = self.ramdump.sizeof('struct cpr3_regulator') for i in range(vreg_count): - self.dump_cpr3_regulator_state(vreg_addr + i * size_reg) + self.dump_cpr3_regulator_state(vreg_addr + i * size_reg, ctrl_type) def cpr_walker(self, ctrl_addr): if ctrl_addr == self.head: @@ -397,21 +403,33 @@ class CPR3Info(RamParser): ctrl_addr + self.ramdump.field_offset( 'struct cpr3_controller', 'cpr_allowed_sw')) + cpr_allowed_hw = self.ramdump.read_bool( + ctrl_addr + self.ramdump.field_offset( + 'struct cpr3_controller', + 'cpr_allowed_hw')) cpr_enabled = self.ramdump.read_bool( ctrl_addr + self.ramdump.field_offset( 'struct cpr3_controller', 'cpr_enabled')) - if supports_hw_closed_loop == 1: - if cpr_allowed_sw == 0: + ctrl_type = self.ramdump.read_int( + ctrl_addr + self.ramdump.field_offset( + 'struct cpr3_controller', + 'ctrl_type')) + + if cpr_allowed_sw == 0 or cpr_allowed_hw == 0: cpr_mode = "open-loop" - elif use_hw_closed_loop == 0: + elif supports_hw_closed_loop == 1 and ctrl_type != CPRH_CTRL_TYPE: + if use_hw_closed_loop == 0: cpr_mode = "SW closed-loop" else: cpr_mode = "HW closed-loop" - else: - if cpr_allowed_sw == 0: + elif supports_hw_closed_loop == 1 and ctrl_type == CPRH_CTRL_TYPE: + if use_hw_closed_loop == 0: cpr_mode = "open-loop" else: - cpr_mode = "closed-loop" + cpr_mode = "full HW closed-loop" + else: + cpr_mode = "closed-loop" + tmp = "" if cpr_controller_name is None: return @@ -425,17 +443,27 @@ class CPR3Info(RamParser): tmp = "" self.get_apm_threshold(ctrl_addr) self.get_aging_info(ctrl_addr) - self.dump_vdd_regulator(ctrl_addr) - if cpr_allowed_sw == 1 and use_hw_closed_loop == 1: + if ctrl_type != CPRH_CTRL_TYPE: + self.dump_vdd_regulator(ctrl_addr) + + if cpr_allowed_sw == 1 and use_hw_closed_loop == 1 and ctrl_type != CPRH_CTRL_TYPE: tmp = "* The actual voltage at the PMIC may be anywhere " \ "between the aggregated ceiling and floor voltage when"\ " using CPR HW closed-loop mode.\n" - self.output.append(tmp) - tmp = "" - aggr_corner_addr = ctrl_addr + self.ramdump.field_offset( - 'struct cpr3_controller', 'aggr_corner') - self.output.append("\nCPR aggregated voltages:\n") - self.dump_cpr3_corner_info(aggr_corner_addr, 0, 0, 0) + elif ctrl_type == CPRH_CTRL_TYPE: + tmp = "* With full HW closed-loop operation, the expected PMIC " \ + "voltage can be checked via the CPRH_STATUS and " \ + "L2_SAW4_PMIC_STS registers in the DCC register dump.\n" + + self.output.append(tmp) + tmp = "" + + if ctrl_type != CPRH_CTRL_TYPE: + aggr_corner_addr = ctrl_addr + self.ramdump.field_offset( + 'struct cpr3_controller', 'aggr_corner') + self.output.append("\nCPR aggregated voltages:\n") + self.dump_cpr3_corner_info(aggr_corner_addr, 0, 0, 0) + thread_addr = self.ramdump.read_word( ctrl_addr + self.ramdump.field_offset( @@ -446,7 +474,7 @@ class CPR3Info(RamParser): 'struct cpr3_controller', 'thread_count')) size_thr = self.ramdump.sizeof('struct cpr3_thread') for i in range(thread_count): - self.dump_cpr3_thread_state(thread_addr + i * size_thr) + self.dump_cpr3_thread_state(thread_addr + i * size_thr, ctrl_type) # print new line for each regulator struct tmp += '\n' self.output.append(tmp) diff --git a/linux-ramdump-parser-v2/parsers/debug_image.py b/linux-ramdump-parser-v2/parsers/debug_image.py index 1d0d1efcc38c1ce56766705789ef0d49552ea2e8..b8cabea8a2443046d328cfe788d8a2075e65e355 100644 --- a/linux-ramdump-parser-v2/parsers/debug_image.py +++ b/linux-ramdump-parser-v2/parsers/debug_image.py @@ -190,7 +190,10 @@ class DebugImage(RamParser): client_end, client_name) print_out_str('--------') - self.qdss.dump_all(self.ramdump) + self.qdss.dump_standard(self.ramdump) + if not self.ramdump.skip_qdss_bin: + self.qdss.save_etf_bin(self.ramdump) + self.qdss.save_etr_bin(self.ramdump) def parse(self): # use the mem_dump_data variable to detect if debug image feature was compiled in, @@ -199,7 +202,7 @@ class DebugImage(RamParser): if self.ramdump.address_of('mem_dump_data'): self.parse_dump() elif self.ramdump.address_of('memdump'): - regs = DebugImage_v2() + regs = DebugImage_v2(self.ramdump) regs.parse_dump_v2(self.ramdump) else: print_out_str( diff --git a/linux-ramdump-parser-v2/parsers/iommu.py b/linux-ramdump-parser-v2/parsers/iommu.py index f4d01725869ab116a53b0524cd58c61b723be5c8..5a8146c7ce9d086314ed3626624ab40680f0f6c1 100644 --- a/linux-ramdump-parser-v2/parsers/iommu.py +++ b/linux-ramdump-parser-v2/parsers/iommu.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. +# Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 and @@ -14,8 +14,10 @@ import math from print_out import print_out_str from parser_util import register_parser, RamParser from sizes import SZ_4K, SZ_64K, SZ_1M, SZ_16M, get_order, order_size_strings -from iommulib import IommuLib +from iommulib import IommuLib, MSM_SMMU_DOMAIN, MSM_SMMU_AARCH64_DOMAIN, ARM_SMMU_DOMAIN from lpaeiommulib import parse_long_form_tables +from aarch64iommulib import parse_aarch64_tables + @register_parser('--print-iommu-pg-tables', 'Print IOMMU page tables') class IOMMU(RamParser): @@ -303,9 +305,9 @@ class IOMMU(RamParser): self.out_file.write('0x%08x--0x%08x [0x%08x] [UNMAPPED]\n' % (mapping.virt_start, mapping.virt_end, mapping.virt_size())) - def parse_short_form_tables(self, d): + def parse_short_form_tables(self, d, domain_num): self.out_file = self.ramdump.open_file( - 'msm_iommu_domain_%02d.txt' % (d.domain_num)) + 'msm_iommu_domain_%02d.txt' % (domain_num)) redirect = 'OFF' if d.redirect is None: redirect = 'UNKNOWN' @@ -318,8 +320,9 @@ class IOMMU(RamParser): iommu_context += '%s (%d) ' % (name, num) iommu_context = iommu_context.strip() - self.out_file.write('IOMMU Context: %s. Domain: %s (%d) [L2 cache redirect for page tables is %s]\n' % ( - iommu_context, d.client_name, d.domain_num, redirect)) + self.out_file.write('IOMMU Context: %s. Domain: %s' + '[L2 cache redirect for page tables is %s]\n' % ( + iommu_context, d.client_name, redirect)) self.out_file.write( '[VA Start -- VA End ] [Size ] [PA Start -- PA End ] [Size ] [Read/Write][Page Table Entry Size]\n') if d.pg_table == 0: @@ -339,8 +342,11 @@ class IOMMU(RamParser): '[!] WARNING: IOMMU domains was not found in this build. No IOMMU page tables will be generated') return - for d in self.domain_list: + for (domain_num, d) in enumerate(self.domain_list): if self.ramdump.is_config_defined('CONFIG_IOMMU_LPAE'): - parse_long_form_tables(self.ramdump, d) - else: - self.parse_short_form_tables(d) + parse_long_form_tables(self.ramdump, d, domain_num) + elif (d.domain_type == MSM_SMMU_DOMAIN): + self.parse_short_form_tables(d, domain_num) + elif ((d.domain_type == ARM_SMMU_DOMAIN) or + (d.domain_type == MSM_SMMU_AARCH64_DOMAIN)): + parse_aarch64_tables(self.ramdump, d, domain_num) diff --git a/linux-ramdump-parser-v2/parsers/ion_buffer_parse.py b/linux-ramdump-parser-v2/parsers/ion_buffer_parse.py new file mode 100644 index 0000000000000000000000000000000000000000..47972f19d397e5c2582afa6ff356bb0bec1c7806 --- /dev/null +++ b/linux-ramdump-parser-v2/parsers/ion_buffer_parse.py @@ -0,0 +1,371 @@ +""" +Copyright (c) 2016, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +from parser_util import register_parser, RamParser + +RB_PARENT_COLOR_MASK = 0xFFFFFFFFFFFFFFFC +grand_total = 0 +TASK_NAME_LENGTH = 16 +ion_heap_buffers = [] + + +def bytes_to_KB(bytes): + kb_val = 0 + if bytes != 0: + kb_val = bytes / 1024 + return kb_val + + +def do_dump_ionbuff_info(self, ramdump, ion_info): + addressspace = 8 + heap_addr_array = [] + ion_info = ramdump.open_file('ionbuffer.txt') + # read num of heaps + number_of_heaps = ramdump.read_word('num_heaps') + ion_info.write('Number of heaps:{0} \n'.format(number_of_heaps)) + + # get heap starting address + heap_addr = ramdump.read_pointer('heaps') + + if self.ramdump.arm64: + addressspace = 8 + else: + addressspace = 4 + + # get address of all heaps + nIndex = 0 + for nIndex in range(0, number_of_heaps): + heap_addr_array.append(heap_addr + (nIndex*addressspace)) + + # parse a heap + nIndex = 0 + for nIndex in range(0, number_of_heaps): + str = "\n\n parsing {0:0} of {1:0} heap Heap: 0x{2:x}" + ion_info.write(str.format( + nIndex + 1, + number_of_heaps, + ramdump.read_word( + heap_addr_array[nIndex]))) + parse_heap(self, ramdump, heap_addr_array[nIndex], ion_info) + ion_info.write( + '\n Total ION buffer size: {0:1} KB'.format( + bytes_to_KB(grand_total))) + + +def parse_heap(self, ramdump, heap_addr, ion_info): + global grand_total + + nr_clients = 0 + total_orphan_buffer_size = 0 + ion_heap = ramdump.read_word(heap_addr) + ionheap_id = ramdump.read_structure_field( + ion_heap, 'struct ion_heap', 'id') + ionheap_name_addr = ramdump.read_structure_field( + ion_heap, 'struct ion_heap', 'name') + ionheap_name = ramdump.read_cstring(ionheap_name_addr, TASK_NAME_LENGTH) + ionheap_type = ramdump.read_structure_field( + ion_heap, 'struct ion_heap', 'type') + ionheap_total_allocated = ramdump.read_structure_field( + ion_heap, 'struct ion_heap', 'total_allocated.counter') + ionheap_total_handles = ramdump.read_structure_field( + ion_heap, 'struct ion_heap', 'total_handles.counter') + self.ion_handle_node_offset = ramdump.field_offset( + 'struct ion_handle', 'node') + + ion_info.write("\n*********************************************") + str = "\n Heap ID : {0} Heap Type: {1} Heap Name : {2}\n" + ion_info.write(str.format(ionheap_id, ionheap_type, ionheap_name)) + ion_info.write('\n Total allocated : {0:1} KB'.format( + bytes_to_KB(ionheap_total_allocated))) + ion_info.write('\n Total Handles : {0:1} KB'.format( + bytes_to_KB(ionheap_total_handles))) + orphan = bytes_to_KB(ionheap_total_allocated - ionheap_total_handles) + ion_info.write('\n Orphan : {0:1} KB'.format(orphan)) + ion_info.write("\n*********************************************") + + ion_dev = ramdump.read_structure_field( + ion_heap, 'struct ion_heap', 'dev') + + ion_dev_rb_root = ramdump.read_structure_field( + ion_dev, 'struct ion_device', 'clients') + + if ionheap_total_allocated != 0: + nr_clients = show_ion_dev_client( + self, ramdump, + ion_dev_rb_root, + ionheap_id, ion_info) + + str = "\n \nTotal number of clients: {0:1}" + ion_info.write(str.format(nr_clients)) + ion_info.write("\n ----------------------------------") + str = "\n orphaned allocations (info is from last known client):\n" + ion_info.write(str) + total_orphan_buffer_size, total_buffer_size = \ + parse_orphan_buffers(self, ramdump, ion_dev, ionheap_id, ion_info) + ion_info.write("\n ----------------------------------") + ion_info.write( + '\n total orphan size: {0} KB'.format( + bytes_to_KB(total_orphan_buffer_size))) + ion_info.write( + '\n total buffer size: {0} KB'.format( + bytes_to_KB(total_buffer_size))) + ion_info.write("\n ----------------------------------") + grand_total = grand_total + total_buffer_size + + +def parse_orphan_buffers(self, ramdump, ion_dev, heap_id, ion_info): + orphan_buffer_size = 0 + total_buffer_size = 0 + ion_dev_buffer_rb_root = ramdump.read_structure_field( + ion_dev, 'struct ion_device', 'buffers') + + rb_node = parser( + self, 1, ramdump, ion_dev_buffer_rb_root, ion_info) + + ion_buffer_rb_node_offset = ramdump.field_offset( + 'struct ion_buffer', 'node') + ion_buffer_task_comm_offset = ramdump.field_offset( + 'struct ion_buffer', 'task_comm') + ion_buffer_ref_offset = ramdump.field_offset( + 'struct ion_buffer', 'ref') + str = "\n buffer: 0x{0:x}, Buffer size: {1} KB " + str = str + "comm: {2} PID: {3} kmap count: {4} ref_count : {5}" + while rb_node != 0: + ion_buffer = rb_node - ion_buffer_rb_node_offset + ion_buffer_ref_add = ion_buffer + ion_buffer_ref_offset + ion_buffer_heap = ramdump.read_structure_field( + ion_buffer, 'struct ion_buffer', 'heap') + ion_heap_id = ramdump.read_structure_field( + ion_buffer_heap, 'struct ion_heap', 'id') + ion_buffer_size = ramdump.read_structure_field( + ion_buffer, 'struct ion_buffer', 'size') + ion_buffer_handlecount = ramdump.read_structure_field( + ion_buffer, 'struct ion_buffer', 'handle_count') + ref_counter = ramdump.read_structure_field( + ion_buffer_ref_add, 'struct kref', 'refcount.counter') + if heap_id == ion_heap_id: + total_buffer_size = total_buffer_size + ion_buffer_size + # if orphaned allocation + if ion_buffer_handlecount == 0: + ion_buffer_pid = ramdump.read_structure_field( + ion_buffer, 'struct ion_buffer', 'pid') + ion_buffer_kmap_count = ramdump.read_structure_field( + ion_buffer, 'struct ion_buffer', 'kmap_cnt') + client_name = ramdump.read_cstring( + (ion_buffer + ion_buffer_task_comm_offset), + TASK_NAME_LENGTH) + ion_info.write(str.format( + ion_buffer, + bytes_to_KB(ion_buffer_size), + client_name, + ion_buffer_pid, + ion_buffer_kmap_count, + ref_counter)) + orphan_buffer_size = orphan_buffer_size + ion_buffer_size + rb_node = parser(self, 2, ramdump, rb_node, ion_info) + return orphan_buffer_size, total_buffer_size + + +def show_ion_dev_client( + self, + ramdump, + ion_dev_rb_root, + ionheap_id, ion_info): + global ion_heap_buffers + nr_clients = 0 + client_name = 0 + rb_node = parser(self, 1, ramdump, ion_dev_rb_root, ion_info) + ion_client_node_offset = ramdump.field_offset( + 'struct ion_client', 'node') + task_comm_offset = ramdump.field_offset( + 'struct task_struct', 'comm') + tempstr = "\n\n CLIENT: (struct ion_client *)0x{0:x} , " + str = tempstr + "task : {1} / ion_client : {2} / PID: {3} / Size : {4} KB" + str1 = tempstr + "ion_client : {1} / PID: {2} / Size : {3} KB" + if rb_node != 0: + while rb_node != 0: + ion_client = rb_node - ion_client_node_offset + heap_size = traverse_ion_heap_buffer( + self, + ramdump, + ion_client, + ionheap_id, + ion_info) + if heap_size > 0: + nr_clients = nr_clients + 1 + ion_client_task = ramdump.read_structure_field( + ion_client, 'struct ion_client', 'task') + task_comm = ion_client_task + task_comm_offset + client_name = ramdump.read_cstring( + task_comm, TASK_NAME_LENGTH) + ion_client_name = ramdump.read_structure_field( + ion_client, + 'struct ion_client', + 'display_name') + ion_client_name = ramdump.read_cstring( + ion_client_name, + TASK_NAME_LENGTH) + + client_PID = ramdump.read_structure_field( + ion_client, 'struct ion_client', 'pid') + if ion_client_task != 0: + ion_info.write(str.format( + ion_client, client_name, ion_client_name, + client_PID, bytes_to_KB(heap_size))) + else: + ion_info.write(str1.format( + ion_client, ion_client_name, + client_PID, bytes_to_KB(heap_size))) + for heap_buffer in ion_heap_buffers: + ion_info.write(heap_buffer) + rb_node = parser(self, 2, ramdump, rb_node, ion_info) + return nr_clients + + +def traverse_ion_heap_buffer(self, ramdump, ion_client, ionheap_id, ion_info): + global ion_heap_buffers + ion_handle_root_offset = ramdump.field_offset( + 'struct ion_client', 'handles') + ion_handle_root_address = ion_client + ion_handle_root_offset + ion_buffer_heap_size = 0 + ion_heap_buffers = [] + str = "\n (+) ion_buffer: 0x{0:x} size: {1:0} KB Handle Count: {2:0}" + ion_handle_rb_node = parser( + self, 1, ramdump, ion_handle_root_address, ion_info) + while ion_handle_rb_node != 0: + ion_handle = ion_handle_rb_node - self.ion_handle_node_offset + ion_buffer = ramdump.read_structure_field( + ion_handle, 'struct ion_handle', 'buffer') + ion_buffer_size = ramdump.read_structure_field( + ion_buffer, 'struct ion_buffer', 'size') + ion_buffer_heap = ramdump.read_structure_field( + ion_buffer, 'struct ion_buffer', 'heap') + ion_heap_id = ramdump.read_structure_field( + ion_buffer_heap, 'struct ion_heap', 'id') + if ionheap_id == ion_heap_id: + ion_buffer_heap_size = ion_buffer_heap_size + ion_buffer_size + ion_buffer_handlecount = ramdump.read_structure_field( + ion_buffer, + 'struct ion_buffer', 'handle_count') + temp = str.format( + ion_buffer, + bytes_to_KB(ion_buffer_size), + ion_buffer_handlecount) + ion_heap_buffers.append(temp) + ion_handle_rb_node = parser( + self, 2, ramdump, ion_handle_rb_node, ion_info) + return ion_buffer_heap_size + + +def parser(self, arg, ramdump, node, ion_info): + rb_root = 0 + last_node = 0 + self.orphan_size = 0 + rbnode_left_offset = ramdump.field_offset('struct rb_node', 'rb_left') + temp = ramdump.read_word(node) + if temp == 0: + return 0 + if arg == 1: + rb_root = find_rb_root(self, ramdump, node, ion_info) + last_node = find_rb_first( + self, ramdump, rb_root, rbnode_left_offset, ion_info) + if arg == 2: + last_node = find_rb_next( + self, arg, ramdump, node, rbnode_left_offset, ion_info) + return last_node + + +def find_rb_next(self, arg, ramdump, node, rbnode_left_offset, ion_info): + parent = cal_rb_parent(self, ramdump, node, ion_info) + tmp_node = 0 + if parent == node: + ion_info.write("RETURNING NULL") + return 0 + rbnode_right_offset = ramdump.field_offset('struct rb_node', 'rb_right') + rb_right = ramdump.read_word(node + rbnode_right_offset) + if rb_right != 0: # right node exist + next_rb_node = find_rb_first( + self, ramdump, rb_right, rbnode_left_offset, ion_info) + return next_rb_node + else: # no right node, parse left node + flag = 1 + while flag: + if parent == 0 or None: + tmp_node = 0 + parent = 0 + else: + parent = cal_rb_parent(self, ramdump, node, ion_info) + tmp_node = ramdump.read_word(parent + rbnode_right_offset) + if tmp_node == node: + node = parent + continue + else: + return parent + return 0 + + +def find_rb_first(self, ramdump, node, rbnode_left_offset, ion_info): + last_node = node + while node != 0: + last_node = node + node = ramdump.read_word(node + rbnode_left_offset) + return last_node + + +def cal_rb_parent(self, ramdump, ion_dev_rb_root, ion_info): + rbnode_color_offset = ramdump.field_offset( + 'struct rb_node', '__rb_parent_color') + color = ramdump.read_word(ion_dev_rb_root + rbnode_color_offset) + color = color & RB_PARENT_COLOR_MASK + return color + + +def find_rb_root(self, ramdump, ion_dev_rb_root, ion_info): + parent = ion_dev_rb_root + rbnode_color_offset = ramdump.field_offset( + 'struct rb_node', '__rb_parent_color') + color = ramdump.read_word(ion_dev_rb_root + rbnode_color_offset) + while color != 1: + parent = cal_rb_parent(self, ramdump, parent, ion_info) + color = ramdump.read_word(parent + rbnode_color_offset) + return parent + + +@register_parser('--print-ionbuffer', 'Print ion buffer', optional=True) +class DumpIonBuffer(RamParser): + + def parse(self): + with self.ramdump.open_file('ionbuffer.txt') as ion_info: + if (self.ramdump.kernel_version < (3, 18, 0)): + ion_info.write('Kernel version 3.18 \ + and above are supported, current version {0}.\ + {1}'.format(self.ramdump.kernel_version[0], + self.ramdump.kernel_version[1])) + return + do_dump_ionbuff_info(self, self.ramdump, ion_info) diff --git a/linux-ramdump-parser-v2/parsers/lpm.py b/linux-ramdump-parser-v2/parsers/lpm.py index 5eb94cf28ae7612b8d249a5d83c618fa9988dcbf..e02b13872313e61501099a4a6e3126d58d6b3fc9 100644 --- a/linux-ramdump-parser-v2/parsers/lpm.py +++ b/linux-ramdump-parser-v2/parsers/lpm.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015, The Linux Foundation. All rights reserved. +# Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 and @@ -117,10 +117,17 @@ class lpm(RamParser): offset = self.ramdump.field_offset('struct lpm_cluster', 'child_cpus') node = self.ramdump.read_int(lpm_cluster + offset) self.output.append("{:20}:{}({})\n".format("child_cpus", hex(node).rstrip("L"), bin(node).lstrip("0b"))) - - offset = self.ramdump.field_offset('struct lpm_cluster', 'num_childs_in_sync') + if (self.ramdump.kernel_version >= (3, 18, 0) or + self.ramdump.kernel_version < (3, 14, 0) ): + offset = self.ramdump.field_offset( + 'struct lpm_cluster', 'num_children_in_sync') + else: + offset = self.ramdump.field_offset( + 'struct lpm_cluster', 'num_childs_in_sync') node = self.ramdump.read_int(lpm_cluster + offset) - self.output.append("{:20}:{}({})\n".format("num_childs_in_sync", hex(node).rstrip("L"), bin(node).lstrip("0b"))) + self.output.append("{:20}:{}({})\n".format( + "num_children_in_sync", hex(node).rstrip("L"), + bin(node).lstrip("0b"))) self.output.append("\n") def lpm_walker(self, lpm_cluster): diff --git a/linux-ramdump-parser-v2/parsers/lsof.py b/linux-ramdump-parser-v2/parsers/lsof.py new file mode 100644 index 0000000000000000000000000000000000000000..1e1189fae16188a3e74519d38135d77632916970 --- /dev/null +++ b/linux-ramdump-parser-v2/parsers/lsof.py @@ -0,0 +1,122 @@ +""" +Copyright (c) 2016, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +from parser_util import register_parser, RamParser +DNAME_INLINE_LEN = 40 +TASK_NAME_LENGTH = 16 + + +def do_dump_lsof_info(self, ramdump, lsof_info): + task_list_head_offset = ramdump.field_offset('struct task_struct', 'tasks') + init_task_address = self.ramdump.address_of('init_task') + init_tasklist_head = init_task_address + task_list_head_offset + task_list_head = ramdump.read_structure_field( + init_tasklist_head, 'struct list_head', 'next') + while task_list_head != init_tasklist_head: + task = task_list_head - task_list_head_offset + parse_task(self, ramdump, task, lsof_info) + lsof_info.write("\n*********************************") + task_list_head = ramdump.read_structure_field( + task_list_head, 'struct list_head', 'next') + + +def parse_task(self, ramdump, task, lsof_info): + index = 0 + if self.ramdump.arm64: + addressspace = 8 + else: + addressspace = 4 + + task_comm_offset = ramdump.field_offset( + 'struct task_struct', 'comm') + task_comm_offset = task + task_comm_offset + client_name = ramdump.read_cstring( + task_comm_offset, TASK_NAME_LENGTH) + task_pid = ramdump.read_structure_field( + task, 'struct task_struct', 'pid') + files = ramdump.read_structure_field( + task, 'struct task_struct', 'files') + str_task_file = '\n Task: {0:x}, comm: {1}, pid : {2:1}, files : {3:x}' + lsof_info.write(str_task_file.format( + task, client_name, task_pid, files)) + fdt = ramdump.read_structure_field( + files, 'struct files_struct', 'fdt') + max_fds = ramdump.read_structure_field( + fdt, 'struct fdtable', 'max_fds') + fd = ramdump.read_structure_field( + fdt, 'struct fdtable', 'fd') + ion_str = "\n [{0}] file : 0x{1:x} {2} {3} client : 0x{4:x}" + str = "\n [{0}] file : 0x{1:x} {2} {3}" + + while index < max_fds: + file = ramdump.read_word(fd + (index * addressspace)) + if file != 0: + fop = ramdump.read_structure_field( + file, 'struct file', 'f_op') + priv_data = ramdump.read_structure_field( + file, 'struct file', 'private_data') + look = ramdump.unwind_lookup(fop) + if look is None: + lsof_info.write(" look is none") + fop, offset = look + + f_pathoffset = ramdump.field_offset( + 'struct file', 'f_path') + f_path = f_pathoffset + file + dentry = ramdump.read_structure_field( + f_path, 'struct path', 'dentry') + dentry_iname_offset = ramdump.field_offset( + 'struct dentry', 'd_iname') + iname_address = dentry + dentry_iname_offset + iname = ramdump.read_cstring( + iname_address, DNAME_INLINE_LEN) + if iname != "null": + look = ramdump.unwind_lookup(iname_address) + if look is not None: + iname, offset = look + if iname == "ion": + lsof_info.write(ion_str.format( + index, file, fop, iname, priv_data)) + else: + lsof_info.write(str.format(index, file, fop, iname)) + index = index + 1 + + +@register_parser('--print-lsof', 'Print list of open files', optional=True) +class DumpLsof(RamParser): + + def parse(self): + with self.ramdump.open_file('lsof.txt') as lsof_info: + if (self.ramdump.kernel_version < (3, 18, 0)): + lsof_info.write('Kernel version 3.18 \ + and above are supported, current version {0}.\ + {1}'.format(self.ramdump.kernel_version[0], + self.ramdump.kernel_version[1])) + return + do_dump_lsof_info(self, self.ramdump, lsof_info) diff --git a/linux-ramdump-parser-v2/parsers/mdpinfo.py b/linux-ramdump-parser-v2/parsers/mdpinfo.py new file mode 100755 index 0000000000000000000000000000000000000000..a8f0e7d339099a5054eeafc5b4d5fe789101bfa8 --- /dev/null +++ b/linux-ramdump-parser-v2/parsers/mdpinfo.py @@ -0,0 +1,138 @@ +# Copyright (c) 2016, The Linux Foundation. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 and +# only version 2 as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + + +from parser_util import register_parser, RamParser +from print_out import print_out_str +from linux_list import ListWalker +from ramdump import Struct + + +class MdssDbgBase(Struct): + _struct_name = "struct mdss_debug_base" + _fields = { + 'name': Struct.get_cstring, + 'base': Struct.get_pointer, + 'max_offset': Struct.get_u32, + 'dump_list': Struct.get_address, + 'reg_dump': Struct.get_pointer, + } + + +class MdssDbgXlog(Struct): + def get_dbgbase_arr(self, key): + arr = self.get_array_ptrs(key) + return [MdssDbgBase(self.ramdump, b) for b in arr] + + _struct_name = "struct mdss_dbg_xlog" + _fields = { + 'blk_arr': get_dbgbase_arr, + } + + +class RangeDumpNode(Struct): + def get_offset(self, key): + return Struct(self.ramdump, self.get_address(key), + struct_name="struct dump_offset", + fields={ + 'start': Struct.get_u32, + 'end': Struct.get_u32, + }) + + _struct_name = "struct range_dump_node" + _fields = { + 'offset': get_offset, + 'range_name': Struct.get_cstring, + 'reg_dump': Struct.get_pointer, + } + + +@register_parser('--print-mdpinfo', 'print mdp info') +class MDPinfo(RamParser): + def __init__(self, *args): + super(MDPinfo, self).__init__(*args) + self.outfile = None + + def mdss_dump_reg(self, addr, length, reg_dump): + if reg_dump == 0: + return + + # Making length multiple of 16 + length = int((length + 15) / 16) + + # Print out registers + for i in range(0, length): + self.outfile.write('{0:x} : '.format(addr)) + for j in range(0, 4): + read = reg_dump + (16 * i) + (4 * j) + self.outfile.write('{0:#0{1}x} ' + .format(self.ramdump.read_u32(read), 10)) + + self.outfile.write('\n') + addr += 16 + + def print_range(self, blk, node): + rng = RangeDumpNode(self.ramdump, node) + + if (rng.offset.start > rng.offset.end) or (rng.offset.end == 0): + print_out_str("Invalid offsets (%d, %d) for range: %s" % + (rng.offset.start, rng.offset.end, rng.range_name)) + return + + addr = blk.base + rng.offset.start + + self.outfile.write('{0}: base=0x{1:x} start=0x{2:x} end=0x{3:x}\n' + .format(rng.range_name, addr, + rng.offset.start, rng.offset.end)) + self.outfile.write('start_addr:{0:x} end_addr:{1:x} reg_addr={2:x}\n' + .format(rng.offset.start, rng.offset.end, addr)) + + # Calculating length + length = min(blk.max_offset, rng.offset.end) - rng.offset.start + + self.mdss_dump_reg(addr, length, rng.reg_dump) + + def parse(self): + mdss_dbg = MdssDbgXlog(self.ramdump, 'mdss_dbg_xlog') + + if mdss_dbg.is_empty(): + return + + for blk in mdss_dbg.blk_arr: + if blk.is_empty(): + continue + + # Delays file creation until we have found a non-null array element + if not self.outfile: + self.outfile = self.ramdump.open_file('mdpinfo_out.txt') + + self.outfile.write('mdss_dump_reg_by_ranges:' + '=========%s DUMP=========\n' % blk.name) + + head_offset = self.ramdump.field_offset('struct range_dump_node', + 'head') + + dump_list = ListWalker(self.ramdump, blk.dump_list, head_offset) + if dump_list.is_empty(): + self.outfile.write('Ranges not found, ' + 'will dump full registers\n') + self.outfile.write('base:0x%x length:%d\n' % + (blk.base, blk.max_offset)) + + self.mdss_dump_reg(blk.base, blk.max_offset, blk.reg_dump) + else: + for node in dump_list: + self.print_range(blk, node) + + # Close the file only if it was created + if self.outfile: + self.outfile.close() + self.outfile = None diff --git a/linux-ramdump-parser-v2/parsers/memstat.py b/linux-ramdump-parser-v2/parsers/memstat.py new file mode 100644 index 0000000000000000000000000000000000000000..aabe62bf3982d70fffee167255d65871f155e55f --- /dev/null +++ b/linux-ramdump-parser-v2/parsers/memstat.py @@ -0,0 +1,168 @@ +# Copyright (c) 2016 The Linux Foundation. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 and +# only version 2 as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +from parser_util import register_parser, RamParser +import linux_list as llist + +VM_ALLOC = 0x00000002 + + +@register_parser('--print-memstat', 'Print memory stats ') +class MemStats(RamParser): + + def list_func(self, vmlist): + vm = self.ramdump.read_word(vmlist + self.vm_offset) + if vm is None: + return + size = self.ramdump.read_structure_field( + vm, 'struct vm_struct', 'size') + flags = self.ramdump.read_structure_field( + vm, 'struct vm_struct', 'flags') + if flags == VM_ALLOC: + self.vmalloc_size = self.vmalloc_size + size + + def pages_to_mb(self, pages): + val = 0 + if pages != 0: + val = ((pages * 4) / 1024) + return val + + def bytes_to_mb(self, bytes): + val = 0 + if bytes != 0: + val = (bytes / 1024) / 1024 + return val + + def calculate_vmalloc(self): + next_offset = self.ramdump.field_offset('struct vmap_area', 'list') + vmlist = self.ramdump.read_word('vmap_area_list') + vm_offset = self.ramdump.field_offset('struct vmap_area', 'vm') + self.vm_offset = vm_offset + list_walker = llist.ListWalker(self.ramdump, vmlist, next_offset) + list_walker.walk(vmlist, self.list_func) + self.vmalloc_size = self.bytes_to_mb(self.vmalloc_size) + + def calculate_others(self): + # Other memory : NR_ANON_PAGES + NR_FILE_PAGES + NR_PAGETABLE \ + # + NR_KERNEL_STACK - NR_SWAPCACHE + vmstat_anon_pages = self.ramdump.read_word( + 'vm_stat[NR_ANON_PAGES]') + vmstat_file_pages = self.ramdump.read_word( + 'vm_stat[NR_FILE_PAGES]') + vmstat_pagetbl = self.ramdump.read_word( + 'vm_stat[NR_PAGETABLE]') + vmstat_kernelstack = self.ramdump.read_word( + 'vm_stat[NR_KERNEL_STACK]') + vmstat_swapcache = self.ramdump.read_word( + 'vm_stat[NR_SWAPCACHE]') + other_mem = (vmstat_anon_pages + vmstat_file_pages + vmstat_pagetbl + + vmstat_kernelstack - vmstat_swapcache) + other_mem = self.pages_to_mb(other_mem) + return other_mem + + def calculate_ionmem(self): + number_of_ion_heaps = self.ramdump.read_int('num_heaps') + heap_addr = self.ramdump.read_word('heaps') + offset_total_allocated = \ + self.ramdump.field_offset( + 'struct ion_heap', 'total_allocated') + if self.ramdump.arm64: + addressspace = 8 + else: + addressspace = 4 + heap_addr_array = [] + grandtotal = 0 + for i in range(0, number_of_ion_heaps): + heap_addr_array.append(heap_addr + i * addressspace) + temp = self.ramdump.read_word(heap_addr_array[i]) + total_allocated = self.ramdump.read_int( + temp + + offset_total_allocated) + if total_allocated is None: + total_allocated = 0 + break + grandtotal = grandtotal + total_allocated + grandtotal = self.bytes_to_mb(grandtotal) + return grandtotal + + def print_mem_stats(self, out_mem_stat): + # Total memory + total_mem = self.ramdump.read_word('totalram_pages') + total_mem = self.pages_to_mb(total_mem) + + # Free Memory + total_free = self.ramdump.read_word('vm_stat[NR_FREE_PAGES]') + total_free = self.pages_to_mb(total_free) + + # slab Memory + slab_rec = \ + self.ramdump.read_word('vm_stat[NR_SLAB_RECLAIMABLE]') + slab_unrec = \ + self.ramdump.read_word('vm_stat[NR_SLAB_UNRECLAIMABLE]') + total_slab = self.pages_to_mb(slab_rec + slab_unrec) + + # ion memory + ion_mem = self.calculate_ionmem() + + # kgsl memory + kgsl_memory = self.ramdump.read_word( + 'kgsl_driver.stats.page_alloc') + if kgsl_memory is not None: + kgsl_memory = self.bytes_to_mb(kgsl_memory) + else: + kgsl_memory = 0 + + # zcompressed ram + zram_devices_word = self.ramdump.read_word('zram_devices') + if zram_devices_word is not None: + zram_devices_stat_offset = self.ramdump.field_offset( + 'struct zram', 'stats') + stat_addr = zram_devices_word + zram_devices_stat_offset + stat_val = self.ramdump.read_u64(stat_addr) + stat_val = self.bytes_to_mb(stat_val) + else: + stat_val = 0 + + self.out_mem_stat = out_mem_stat + self.vmalloc_size = 0 + # vmalloc area + self.calculate_vmalloc() + + # Others + other_mem = self.calculate_others() + + # Output prints + out_mem_stat.write('{0:30}: {1:8} MB'.format( + "Total RAM", total_mem)) + out_mem_stat.write('\n{0:30}: {1:8} MB\n'.format( + "Free memory:", total_free)) + out_mem_stat.write('\n{0:30}: {1:8} MB'.format( + "Total Slab memory:", total_slab)) + out_mem_stat.write('\n{0:30}: {1:8} MB'.format( + "Total ion memory:", ion_mem)) + out_mem_stat.write('\n{0:30}: {1:8} MB'.format( + "KGSL ", kgsl_memory)) + out_mem_stat.write('\n{0:30}: {1:8} MB'.format( + "ZRAM compressed ", stat_val)) + out_mem_stat.write('\n{0:30}: {1:8} MB'.format( + "vmalloc ", self.vmalloc_size)) + out_mem_stat.write('\n{0:30}: {1:8} MB'.format( + "Others ", other_mem)) + + def parse(self): + with self.ramdump.open_file('mem_stat.txt') as out_mem_stat: + if (self.ramdump.kernel_version < (3, 18, 0)): + out_mem_stat.write('Kernel version 3.18 \ + and above are supported, current version {0}.\ + {1}'.format(self.ramdump.kernel_version[0], + self.ramdump.kernel_version[1])) + return + self.print_mem_stats(out_mem_stat) diff --git a/linux-ramdump-parser-v2/parsers/memusage.py b/linux-ramdump-parser-v2/parsers/memusage.py new file mode 100755 index 0000000000000000000000000000000000000000..ef86e54ef07198f9debce60e410a5e2c11d916c0 --- /dev/null +++ b/linux-ramdump-parser-v2/parsers/memusage.py @@ -0,0 +1,131 @@ +# Copyright (c) 2016 The Linux Foundation. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 and +# only version 2 as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +from print_out import print_out_str +from parser_util import register_parser, RamParser, cleanupString + + +def do_dump_process_memory(ramdump): + vmstat_names = [ + "NR_FREE_PAGES", "NR_SLAB_RECLAIMABLE", + "NR_SLAB_UNRECLAIMABLE", "NR_SHMEM"] + vmstat_data = {} + vmstats_addr = ramdump.address_of('vm_stat') + for x in vmstat_names: + i = ramdump.gdbmi.get_value_of(x) + vmstat_data[x] = ramdump.read_word( + ramdump.array_index(vmstats_addr, 'atomic_long_t', i)) + total_mem = ramdump.read_word('totalram_pages') * 4 + offset_tasks = ramdump.field_offset('struct task_struct', 'tasks') + offset_comm = ramdump.field_offset('struct task_struct', 'comm') + offset_signal = ramdump.field_offset('struct task_struct', 'signal') + offset_adj = ramdump.field_offset('struct signal_struct', 'oom_score_adj') + offset_thread_group = ramdump.field_offset( + 'struct task_struct', 'thread_group') + offset_pid = ramdump.field_offset('struct task_struct', 'pid') + init_addr = ramdump.address_of('init_task') + init_next_task = init_addr + offset_tasks + orig_init_next_task = init_next_task + init_thread_group = init_addr + offset_thread_group + seen_tasks = set() + task_info = [] + offset_thread_group = ramdump.field_offset( + 'struct task_struct', 'thread_group') + memory_file = ramdump.open_file('memory.txt') + total_slab = ( + vmstat_data["NR_SLAB_RECLAIMABLE"] + + vmstat_data["NR_SLAB_UNRECLAIMABLE"]) * 4 + memory_file.write('Total RAM: {0:,}kB\n'.format(total_mem)) + memory_file.write('Total free memory: {0:,}kB({1:.1f}%)\n'.format( + vmstat_data["NR_FREE_PAGES"] * 4, + (100.0 * vmstat_data["NR_FREE_PAGES"] * 4) / total_mem)) + memory_file.write('Slab reclaimable: {0:,}kB({1:.1f}%)\n'.format( + vmstat_data["NR_SLAB_RECLAIMABLE"] * 4, + (100.0 * vmstat_data["NR_SLAB_RECLAIMABLE"] * 4) / total_mem)) + memory_file.write('Slab unreclaimable: {0:,}kB({1:.1f}%)\n'.format( + vmstat_data["NR_SLAB_UNRECLAIMABLE"] * 4, + (100.0 * vmstat_data["NR_SLAB_UNRECLAIMABLE"] * 4) / total_mem)) + memory_file.write('Total Slab memory: {0:,}kB({1:.1f}%)\n'.format( + total_slab, (100.0 * total_slab) / total_mem)) + memory_file.write('Total SHMEM: {0:,}kB({1:.1f}%)\n\n'.format( + vmstat_data["NR_SHMEM"] * 4, + (100.0 * vmstat_data["NR_SHMEM"] * 4) / total_mem)) + while True: + task_struct = init_thread_group - offset_thread_group + next_thread_comm = task_struct + offset_comm + thread_task_name = cleanupString( + ramdump.read_cstring(next_thread_comm, 16)) + next_thread_pid = task_struct + offset_pid + thread_task_pid = ramdump.read_int(next_thread_pid) + signal_struct = ramdump.read_word(task_struct + offset_signal) + adj = ramdump.read_u16(signal_struct + offset_adj) + if adj & 0x8000: + adj = adj - 0x10000 + rss = get_rss(ramdump, task_struct) * 4 + if rss != 0: + task_info.append([thread_task_name, thread_task_pid, rss, adj]) + next_task = ramdump.read_word(init_next_task) + if next_task is None: + break + + if (next_task == init_next_task and + next_task != orig_init_next_task): + break + + if next_task in seen_tasks: + break + + seen_tasks.add(next_task) + init_next_task = next_task + init_thread_group = init_next_task - offset_tasks + offset_thread_group + if init_next_task == orig_init_next_task: + break + + task_info = sorted(task_info, key=lambda l: l[2], reverse=True) + str = '{0:<17s}{1:>8s}{2:>17s}{3:>8}\n'.format( + 'Task name', 'PID', 'RSS in kB', 'ADJ') + memory_file.write(str) + for item in task_info: + str = '{0:<17s}{1:8d}{2:13,d}({3:2.1f}%) {4:6}\n'.format( + item[0], item[1], item[2], (100.0 * item[2]) / total_mem, item[3]) + memory_file.write(str) + memory_file.close() + print_out_str('---wrote meminfo to memory.txt') + + +def get_rss(ramdump, task_struct): + offset_mm = ramdump.field_offset('struct task_struct', 'mm') + offset_rss_stat = ramdump.field_offset('struct mm_struct', 'rss_stat') + offset_rss = ramdump.field_offset('struct mm_rss_stat', 'count') + offset_anon_rss = ramdump.field_offset('struct mm_rss_stat', 'count[1]') + offset_file_rss = ramdump.field_offset('struct mm_rss_stat', 'count[2]') + mm_struct = ramdump.read_word(task_struct + offset_mm) + if mm_struct == 0: + return 0 + anon_rss = ramdump.read_word(mm_struct + offset_rss_stat + offset_anon_rss) + rss = ramdump.read_word(mm_struct + offset_rss_stat + offset_rss) + file_rss = ramdump.read_word(mm_struct + offset_rss_stat + offset_file_rss) + # Ignore negative RSS values + if anon_rss > 0x80000000: + anon_rss = 0 + if rss > 0x80000000: + rss = 0 + if file_rss > 0x80000000: + file_rss = 0 + total_rss = rss + anon_rss + file_rss + return total_rss + + +@register_parser('--print-memory-info', 'Print memory usage info') +class DumpProcessMemory(RamParser): + + def parse(self): + do_dump_process_memory(self.ramdump) diff --git a/linux-ramdump-parser-v2/parsers/module_wlan.py b/linux-ramdump-parser-v2/parsers/module_wlan.py new file mode 100755 index 0000000000000000000000000000000000000000..a595d1a1134ec34e5cbb00e97f6229484a9fabf2 --- /dev/null +++ b/linux-ramdump-parser-v2/parsers/module_wlan.py @@ -0,0 +1,630 @@ +# Copyright (c) 2016 The Linux Foundation. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 and +# only version 2 as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +import parser_util +from parser_util import register_parser, RamParser +from print_out import print_out_str + + +@register_parser( + '--print-wlan', + 'Print WLAN debugging information(if enabled)', + optional=True) +class ModuleWlan(RamParser): + """ This class defines when WLAN module is loaded """ + def __init__(self, *args): + super(ModuleWlan, self).__init__(*args) + + self.dir_char = '' + self.dir_char_out = '' + self.opt_dbg = False + + self.wlan_path = '' + self.wlan_module_addr = 0 + self.wlan_text_addr = 0 + self.wlan_data_addr = 0 + self.wlan_bss_addr = 0 + + def convert_dir_for_arg(self, ori): + """ + Convert full path as an argument of function + """ + if self.opt_dbg is True: + print_out_str('** convert_dir_for_arg() **') + + if self.dir_char == '': + if parser_util.get_system_type() == 'Linux': + self.dir_char = '/' + self.dir_char_out = '/' + else: + self.dir_char = '\\' + self.dir_char_out = '\\\\' + dst = '' + for c in ori: + if c == self.dir_char: + dst = dst + self.dir_char_out + else: + dst = dst + c + + if self.opt_dbg is True: + print_out_str('ori - [{}]'.format(ori)) + print_out_str('dst - [{}]'.format(dst)) + + return dst + + def load_wlan_ko(self): + """ Load wlan.ko to GDB """ + + if self.opt_dbg is True: + print_out_str('** load_wlan_ko() **') + + if self.wlan_text_addr == 0: + print_out_str('self.wlan_text_addr is zero') + return False + + cmd = 'add-symbol-file ' + cmd = cmd + self.convert_dir_for_arg(self.wlan_path) + cmd = cmd + ' {} -s .data {} -s .bss {}'.format( + self.wlan_text_addr, self.wlan_data_addr, + self.wlan_bss_addr) + + return self.ramdump.gdbmi._run_for_first(cmd) + + def get_sections_of_wlan(self): + """ + Get wlan.ko's sectino addresses + """ + if self.opt_dbg is True: + print_out_str('** get_sections_of_wlan() **') + + # Step-A) Find wlan.ko + modules_addr = self.ramdump.address_of('modules') + next_module_addr = self.ramdump.read_structure_field( + modules_addr, 'struct list_head', 'next') + name_offset = self.ramdump.field_offset('struct module', 'name') + module_addr = 0 + idx = 0 + + while modules_addr != next_module_addr: + module_addr = self.ramdump.container_of( + next_module_addr, 'struct module', 'list') + module_name_addr = module_addr + name_offset + module_name_str = self.ramdump.read_cstring( + module_name_addr, 32, True) + + if module_name_str == 'wlan': + self.wlan_module_addr = module_addr + break + + if self.opt_dbg is True: + print_out_str( + '[{}]th - next_module[{}], module[{}], name[{}]'.format( + hex(idx), hex(next_module_addr), + hex(module_addr), module_name_str)) + + # move the list entry to the next + next_module_addr = self.ramdump.read_structure_field( + modules_addr, 'struct list_head', 'next') + idx = idx + 1 + + if self.wlan_module_addr == 0: + print_out_str('[Caution] Fail to find wlan.ko') + return False + + # Step-B) get sections in wlan.ko + sect_attrs_addr = self.ramdump.read_structure_field( + module_addr, 'struct module', 'sect_attrs') + nsections = self.ramdump.read_structure_field( + sect_attrs_addr, + 'struct module_sect_attrs', + 'nsections') + attrs_offset = self.ramdump.field_offset( + 'struct module_sect_attrs', 'attrs') + attrs_addr = sect_attrs_addr + attrs_offset + module_sect_attr_size = self.ramdump.sizeof('struct module_sect_attr') + + if self.opt_dbg is True: + print_out_str('module_addr : {}'.format(hex(module_addr))) + print_out_str('sect_attrs_addr : {}'.format(hex(sect_attrs_addr))) + print_out_str('nsections : {}'.format(hex(nsections))) + print_out_str('attrs_offset : {}'.format(hex(attrs_offset))) + if attrs_addr is not None: + print_out_str('attrs_addr : {}'.format(hex(attrs_addr))) + else: + print_out_str('attrs_addr : {}'.format(attrs_addr)) + + section_name_offset = self.ramdump.field_offset( + 'struct module_sect_attr', 'name') + idx = 0 + while idx < nsections: + section_attr_address = attrs_addr + idx * module_sect_attr_size + section_name_addr = self.ramdump.read_pointer( + section_attr_address + section_name_offset) + section_name_str = self.ramdump.read_cstring( + section_name_addr, 32, True) + section_address = self.ramdump.read_structure_field( + section_attr_address, + 'struct module_sect_attr', 'address') + + if self.opt_dbg is True: + print_out_str('section[{}]th - name[{}], attr[{}]'.format( + hex(idx), section_name_str, hex(section_address))) + + if section_name_str == '.text': + self.wlan_text_addr = section_address + if section_name_str == '.data': + self.wlan_data_addr = section_address + if section_name_str == '.bss': + self.wlan_bss_addr = section_address + idx = idx + 0x1 + + print_out_str('wlan_text_addr : {}'.format(hex(self.wlan_text_addr))) + print_out_str('wlan_data_addr : {}'.format(hex(self.wlan_data_addr))) + print_out_str('wlan_bss_addr : {}'.format(hex(self.wlan_bss_addr))) + + return True + + def run(self): + """ + Main + """ + + if self.ramdump.arm64 is None: + print_out_str('[Caution] this script supports on ARM64') + return False + + if self.ramdump.wlan == "INTEGRATED": + print_out_str('self.wlan doen\'t exist, skip') + else: + print_out_str('self.wlan exist {}'.format(self.ramdump.wlan)) + self.wlan_path = self.ramdump.wlan + + if self.get_sections_of_wlan() is False: + print_out_str('wlan.ko is not loaded.') + return False + else: + print_out_str('** Find WLAN Module **') + self.load_wlan_ko() + + self.get_wmi_command_log_buffer() + self.get_host_wmi_command_tx_cmp_buf() + self.get_host_wmi_event_buf() + self.get_host_wmi_rx_event_buf() + self.get_host_extract_log() + + return True + + ################################### + # Parse internal variables + def get_wmi_command_log_buffer(self): + """ + Parse 'struct wmi_command_debug' + """ + + if self.opt_dbg is True: + print_out_str('*** get_wmi_command_log_buffer() ***') + + element_size = self.ramdump.sizeof('struct wmi_command_debug') + if (element_size is None): + print_out_str('[Caution] symbols of host driver do not exist') + return False + + out_file = self.ramdump.open_file("wmi_command_log_buffer.txt") + + wmi_total_size = self.ramdump.sizeof('wmi_command_log_buffer') + num_elements = wmi_total_size / element_size + + if self.opt_dbg is True: + print_out_str('** wlan_host_wmi_command_log_buffer **') + print_out_str('*************************************') + print_out_str('wmi_total_size({})'.format(hex(wmi_total_size))) + print_out_str('element_size({})'.format(hex(element_size))) + print_out_str('num_elements({})'.format(hex(num_elements))) + print_out_str('*************************************') + + # info of the data structure + command_offset = self.ramdump.field_offset( + 'struct wmi_command_debug', 'command') + data0_offset = self.ramdump.field_offset( + 'struct wmi_command_debug', 'data[0]') + data1_offset = self.ramdump.field_offset( + 'struct wmi_command_debug', 'data[1]') + data2_offset = self.ramdump.field_offset( + 'struct wmi_command_debug', 'data[2]') + data3_offset = self.ramdump.field_offset( + 'struct wmi_command_debug', 'data[3]') + time_offset = self.ramdump.field_offset( + 'struct wmi_command_debug', 'time') + + if self.opt_dbg is True: + print_out_str('command_offset({})'.format(command_offset)) + print_out_str('data0_offset({})'.format(data0_offset)) + print_out_str('data1_offset({})'.format(data1_offset)) + print_out_str('data2_offset({})'.format(data2_offset)) + print_out_str('data3_offset({})'.format(data3_offset)) + print_out_str('time_offset({})'.format(time_offset)) + print_out_str('*************************************') + + buffer_start_address = self.ramdump.address_of( + 'wmi_command_log_buffer') + wmi_command_buf_idx = self.ramdump.read_u32( + self.ramdump.address_of( + 'g_wmi_command_buf_idx')) + + cnt = 0 + idx = wmi_command_buf_idx + while cnt < num_elements: + if idx == num_elements: + idx = 0 + + buffer_address = buffer_start_address + idx * element_size + command = self.ramdump.read_u32(buffer_address) + data0 = self.ramdump.read_u32(buffer_address + data0_offset) + data1 = self.ramdump.read_u32(buffer_address + data1_offset) + data2 = self.ramdump.read_u32(buffer_address + data2_offset) + data3 = self.ramdump.read_u32(buffer_address + data3_offset) + time = self.ramdump.read_u64(buffer_address + time_offset) + + idx = idx + 1 + cnt = cnt + 1 + + out_buf = '{0} us'.format(float(time/100000.0)) + out_buf = out_buf + ' : command({})'.format(hex(command)) + out_buf = out_buf + ', data[{}'.format(hex(data0)) + out_buf = out_buf + ', {}'.format(hex(data1)) + out_buf = out_buf + ', {}'.format(hex(data2)) + out_buf = out_buf + ', {}]'.format(hex(data3)) + if self.opt_dbg is True: + print_out_str(out_buf) + out_file.write(out_buf + '\n') + out_file.close() + + return True + + def get_host_wmi_command_tx_cmp_buf(self): + """ + Parse 'struct wmi_command_debug wmi_command_tx_cmp_log_buffer' + """ + if self.opt_dbg is True: + print_out_str('*** get_host_wmi_command_tx_cmp_buf() ***') + + element_size = self.ramdump.sizeof('struct wmi_command_debug') + if (element_size is None): + print_out_str('[Caution] symbols of host driver do not exist') + return False + + out_file = self.ramdump.open_file("wmi_command_tx_cmp_buf.txt") + + wmi_total_size = self.ramdump.sizeof('wmi_command_tx_cmp_log_buffer') + num_elements = wmi_total_size / element_size + + if self.opt_dbg is True: + print_out_str('** wlan_host_wmi_command_tx_cmp_buf **') + print_out_str('*************************************') + print_out_str('wmi_total_size({})'.format(hex(wmi_total_size))) + print_out_str('element_size({})'.format(hex(element_size))) + print_out_str('num_elements({})'.format(hex(num_elements))) + print_out_str('*************************************') + + # info of the data structure + command_offset = self.ramdump.field_offset( + 'struct wmi_command_debug', 'command') + data0_offset = self.ramdump.field_offset( + 'struct wmi_command_debug', 'data[0]') + data1_offset = self.ramdump.field_offset( + 'struct wmi_command_debug', 'data[1]') + data2_offset = self.ramdump.field_offset( + 'struct wmi_command_debug', 'data[2]') + data3_offset = self.ramdump.field_offset( + 'struct wmi_command_debug', 'data[3]') + time_offset = self.ramdump.field_offset( + 'struct wmi_command_debug', 'time') + + if self.opt_dbg is True: + print_out_str("command_offset({})".format(command_offset)) + print_out_str("data0_offset({})".format(data0_offset)) + print_out_str("data1_offset({})".format(data1_offset)) + print_out_str("data2_offset({})".format(data2_offset)) + print_out_str("data3_offset({})".format(data3_offset)) + print_out_str("time_offset({})".format(time_offset)) + print_out_str('*************************************') + + log_buffer_address = self.ramdump.address_of( + 'wmi_command_tx_cmp_log_buffer') + wmi_command_buf_idx = self.ramdump.read_u32( + self.ramdump.address_of( + 'g_wmi_command_tx_cmp_buf_idx')) + + cnt = 0 + idx = wmi_command_buf_idx + while cnt < num_elements: + if idx == num_elements: + idx = 0 + + buffer_address = log_buffer_address + idx * element_size + command = self.ramdump.read_u32(buffer_address) + data0 = self.ramdump.read_u32(buffer_address + data0_offset) + data1 = self.ramdump.read_u32(buffer_address + data1_offset) + data2 = self.ramdump.read_u32(buffer_address + data2_offset) + data3 = self.ramdump.read_u32(buffer_address + data3_offset) + time = self.ramdump.read_u64(buffer_address + time_offset) + + idx = idx + 1 + cnt = cnt + 1 + + out_buf = '{0} us'.format(float(time/100000.0)) + out_buf = out_buf + ' : command({})'.format(hex(command)) + out_buf = out_buf + ', data[{}'.format(hex(data0)) + out_buf = out_buf + ', {}'.format(hex(data1)) + out_buf = out_buf + ', {}'.format(hex(data2)) + out_buf = out_buf + ', {}]'.format(hex(data3)) + if self.opt_dbg is True: + print_out_str(out_buf) + out_file.write(out_buf + '\n') + + out_file.close() + return True + + def get_host_wmi_event_buf(self): + """ + Parse 'struct wmi_event_debug wmi_event_log_buffer[]' + """ + + if self.opt_dbg is True: + print_out_str('*** get_host_wmi_event_buf() ***') + + element_size = self.ramdump.sizeof('struct wmi_event_debug') + if (element_size is None): + print_out_str('[Caution] symbols of host driver do not exist') + return False + + out_file = self.ramdump.open_file("wmi_event_log_buffer.txt") + + wmi_total_size = self.ramdump.sizeof('wmi_event_log_buffer') + num_elements = wmi_total_size / element_size + + if self.opt_dbg is True: + print_out_str('[Debug] wmi_total_size({})'.format( + hex(wmi_total_size))) + print_out_str('[Debug] element_size({})'.format(hex(element_size))) + print_out_str('[Debug] num_elements({})'.format(hex(num_elements))) + + # info of the data structure + event_offset = self.ramdump.field_offset( + 'struct wmi_event_debug', 'event') + data0_offset = self.ramdump.field_offset( + 'struct wmi_event_debug', 'data[0]') + data1_offset = self.ramdump.field_offset( + 'struct wmi_event_debug', 'data[1]') + data2_offset = self.ramdump.field_offset( + 'struct wmi_event_debug', 'data[2]') + data3_offset = self.ramdump.field_offset( + 'struct wmi_event_debug', 'data[3]') + time_offset = self.ramdump.field_offset( + 'struct wmi_event_debug', 'time') + + if self.opt_dbg is True: + print_out_str("[Debug] event_offset({})".format(event_offset)) + print_out_str("[Debug] data0_offset({})".format(data0_offset)) + print_out_str("[Debug] data1_offset({})".format(data1_offset)) + print_out_str("[Debug] data2_offset({})".format(data2_offset)) + print_out_str("[Debug] data3_offset({})".format(data3_offset)) + print_out_str("[Debug] time_offset({})".format(time_offset)) + + wmi_log_address = self.ramdump.address_of('wmi_event_log_buffer') + wmi_event_buf_idx = self.ramdump.read_u32( + self.ramdump.address_of('g_wmi_event_buf_idx')) + + cnt = 0 + idx = wmi_event_buf_idx + while cnt < num_elements: + if idx == num_elements: + idx = 0 + + buffer_address = wmi_log_address + idx * element_size + event = self.ramdump.read_u32(buffer_address) + data0 = self.ramdump.read_u32(buffer_address + data0_offset) + data1 = self.ramdump.read_u32(buffer_address + data1_offset) + data2 = self.ramdump.read_u32(buffer_address + data2_offset) + data3 = self.ramdump.read_u32(buffer_address + data3_offset) + time = self.ramdump.read_u64(buffer_address + time_offset) + + idx = idx + 1 + cnt = cnt + 1 + + out_buf = '{0} us'.format(float(time/100000.0)) + out_buf = out_buf + ' : event({})'.format(hex(event)) + out_buf = out_buf + ', data[{}'.format(hex(data0)) + out_buf = out_buf + ', {}'.format(hex(data1)) + out_buf = out_buf + ', {}'.format(hex(data2)) + out_buf = out_buf + ', {}]'.format(hex(data3)) + if self.opt_dbg is True: + print_out_str(out_buf) + out_file.write(out_buf + '\n') + + out_file.close() + return True + + def get_host_wmi_rx_event_buf(self): + """ + Parse 'struct wmi_event_debug wmi_rx_event_log_buffer' + """ + + if self.opt_dbg is True: + print_out_str('*** get_host_wmi_rx_event_buf() ***') + + wmi_elem_size = self.ramdump.sizeof('struct wmi_event_debug') + if (wmi_elem_size is None): + print_out_str('[Caution] symbols of host driver do not exist') + return False + wmi_total_size = self.ramdump.sizeof('wmi_rx_event_log_buffer') + num_elements = wmi_total_size / wmi_elem_size + + out_file = self.ramdump.open_file("wmi_rx_event_log_buffer.txt") + + # info of the data structure + event_offset = self.ramdump.field_offset( + 'struct wmi_event_debug', 'event') + data0_offset = self.ramdump.field_offset( + 'struct wmi_event_debug', 'data[0]') + data1_offset = self.ramdump.field_offset( + 'struct wmi_event_debug', 'data[1]') + data2_offset = self.ramdump.field_offset( + 'struct wmi_event_debug', 'data[2]') + data3_offset = self.ramdump.field_offset( + 'struct wmi_event_debug', 'data[3]') + time_offset = self.ramdump.field_offset( + 'struct wmi_event_debug', 'time') + + wmi_event_address = self.ramdump.address_of('wmi_rx_event_log_buffer') + wmi_event_buf_idx = self.ramdump.read_u32( + self.ramdump.address_of( + 'g_wmi_rx_event_buf_idx')) + + if self.opt_dbg is True: + print_out_str('[Debug] wmi_total_size({})'.format(wmi_total_size)) + print_out_str('[Debug] wmi_elem_size({})'.format(wmi_elem_size)) + print_out_str('[Debug] num_elements({})'.format(num_elements)) + print_out_str('[Debug] event_offset({})'.format(event_offset)) + print_out_str('[Debug] data0_offset({})'.format(data0_offset)) + print_out_str('[Debug] data1_offset({})'.format(data1_offset)) + print_out_str('[Debug] data2_offset({})'.format(data2_offset)) + print_out_str('[Debug] data3_offset({})'.format(data3_offset)) + print_out_str('[Debug] time_offset({})'.format(time_offset)) + + cnt = 0 + idx = wmi_event_buf_idx + while cnt < num_elements: + if idx == num_elements: + idx = 0 + + buffer_address = wmi_event_address + idx * wmi_elem_size + event = self.ramdump.read_u32(buffer_address) + data0 = self.ramdump.read_u32(buffer_address + data0_offset) + data1 = self.ramdump.read_u32(buffer_address + data1_offset) + data2 = self.ramdump.read_u32(buffer_address + data2_offset) + data3 = self.ramdump.read_u32(buffer_address + data3_offset) + time = self.ramdump.read_u64(buffer_address + time_offset) + + out_buf = '{0} us'.format(float(time/100000.0)) + out_buf = out_buf + ' : event({})'.format(hex(event)) + out_buf = out_buf + ', data[{}'.format(hex(data0)) + out_buf = out_buf + ', {}'.format(hex(data1)) + out_buf = out_buf + ', {}'.format(hex(data2)) + out_buf = out_buf + ', {}]'.format(hex(data3)) + if self.opt_dbg is True: + print_out_str(out_buf) + out_file.write(out_buf + '\n') + + idx = idx + 1 + cnt = cnt + 1 + + out_file.close() + return True + + def get_host_extract_log(self): + """ + refer functions in wlan_logging_sock_svc.c + """ + + if self.opt_dbg is True: + print_out_str('*** wlan_host_extract_log() ***') + + out_file = self.ramdump.open_file("gwlan_logging.txt") + + # get number of struct wlan_logging + num_buf = self.ramdump.read_s32( + self.ramdump.address_of('gwlan_logging') + + self.ramdump.field_offset( + 'struct wlan_logging', 'num_buf')) + if self.opt_dbg is True: + print_out_str('num_buf : {}'.format(num_buf)) + + # gwlan_logging + element_size = self.ramdump.sizeof('struct log_msg') + if element_size % 32: + elem_aligned_size = element_size + (element_size % 32) + if self.opt_dbg is True: + print_out_str('element_size({})'.format(hex(element_size))) + print_out_str('element_align_size({})'.format( + hex(elem_aligned_size))) + else: + elem_aligned_size = element_size + if self.opt_dbg is True: + print_out_str('element_size({})'.format(hex(element_size))) + print_out_str('element_align_size({})'.format( + hex(elem_aligned_size))) + + filled_length_offset = self.ramdump.field_offset( + 'struct log_msg', + 'filled_length') + logbuf_offset = self.ramdump.field_offset( + 'struct log_msg', 'logbuf') + logbuf_size = element_size - logbuf_offset + gplog_msg_address = self.ramdump.read_pointer('gplog_msg') + + if self.opt_dbg is True: + print_out_str('filled_length_offset : {}'.format( + hex(filled_length_offset))) + print_out_str('logbuf_size : {}'.format(hex(logbuf_size))) + print_out_str('gplog_msg_address : {}'.format( + hex(gplog_msg_address))) + + cnt = 0 + while cnt < num_buf: + buffer_address = gplog_msg_address + cnt * elem_aligned_size + + filled_length = self.ramdump.read_u32( + buffer_address + filled_length_offset) + + v_address = buffer_address + logbuf_offset + 4 + p_address = self.ramdump.virt_to_phys(v_address) + if self.opt_dbg is True: + print_out_str('** gplog_msg[{}] : {}, {}, VA{}-PA{} **'.format( + cnt, + hex(buffer_address), + hex(filled_length), + hex(v_address), + hex(p_address))) + + out_file.write('** gplog_msg[{}] : {}, {}, VA{}-PA{} **\n'.format( + cnt, + hex(buffer_address), + hex(filled_length), + hex(v_address), + hex(p_address))) + + if filled_length != 0: + left_bytes = filled_length + logbuf_str = "" + while left_bytes > 0: + p_address = self.ramdump.virt_to_phys(v_address) + logbuf_out = self.ramdump.read_physical(p_address, 4) + logbuf_str = logbuf_str + logbuf_out + + v_address = v_address + 4 + left_bytes = left_bytes - 4 + + if self.opt_dbg is True: + print_out_str(logbuf_str) + out_file.write(logbuf_str) + out_file.write('\n') + + # We may be able to delete first [] + # like [VosMCThread] or [kworker/0:0] + cnt = cnt + 1 + + out_file.close() + return True + + def parse(self): + self.run() + return True diff --git a/linux-ramdump-parser-v2/parsers/slabinfo.py b/linux-ramdump-parser-v2/parsers/slabinfo.py old mode 100755 new mode 100644 index 0f9a95a47ef65284798cb3c6568a742bbf580738..460c08ea5cc108e9dd060dc098c8a1d028180707 --- a/linux-ramdump-parser-v2/parsers/slabinfo.py +++ b/linux-ramdump-parser-v2/parsers/slabinfo.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. +# Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 and @@ -9,11 +9,12 @@ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -import re +import sys from mm import page_address, pfn_to_page from print_out import print_out_str from parser_util import register_parser, RamParser +import operator SLAB_RED_ZONE = 0x400 SLAB_POISON = 0x800 @@ -25,52 +26,80 @@ SLUB_RED_ACTIVE = 0xcc POISON_INUSE = 0x5a POISON_FREE = 0x6b POISON_END = 0xa5 +g_printfreeobjStack = False + class kmem_cache(object): def __init__(self, ramdump, addr): self.valid = False - offset = ramdump.field_offset( 'struct kmem_cache', 'flags') self.flags = ramdump.read_word(addr + offset) if self.flags is None: return - offset = ramdump.field_offset( 'struct kmem_cache', 'size') self.size = ramdump.read_int(addr + offset) if self.size is None: return - offset = ramdump.field_offset( 'struct kmem_cache', 'object_size') self.object_size = ramdump.read_int(addr + offset) if self.object_size is None: return - offset = ramdump.field_offset( 'struct kmem_cache', 'offset') self.offset = ramdump.read_int(addr + offset) if self.offset is None: return - - offset = ramdump.field_offset( - 'struct kmem_cache', 'max') - self.max = ramdump.read_word(addr + offset) - if self.max is None: - return - offset = ramdump.field_offset( 'struct kmem_cache', 'inuse') self.inuse = ramdump.read_int(addr + offset) if self.inuse is None: return - self.addr = addr self.valid = True + +class struct_member_offset(object): + def __init__(self, ramdump): + self.kmemcache_list = ramdump.field_offset( + 'struct kmem_cache', 'list') + self.kmemcache_name = ramdump.field_offset( + 'struct kmem_cache', 'name') + self.kmemcache_node = ramdump.field_offset( + 'struct kmem_cache', 'node') + self.kmemcache_cpu_page = ramdump.field_offset( + 'struct kmem_cache_cpu', 'page') + self.kmemcpucache_cpu_slab = ramdump.field_offset( + 'struct kmem_cache', 'cpu_slab') + self.kmemcachenode_partial = ramdump.field_offset( + 'struct kmem_cache_node', 'partial') + self.kmemcachenode_full = ramdump.field_offset( + 'struct kmem_cache_node', 'full') + self.page_lru = ramdump.field_offset( + 'struct page', 'lru') + self.page_flags = ramdump.field_offset( + 'struct page', 'flags') + self.page_mapcount = ramdump.field_offset( + 'struct page', '_mapcount') + self.track_addrs = ramdump.field_offset( + 'struct track', 'addrs') + self.page_freelist = ramdump.field_offset( + 'struct page', 'freelist') + self.sizeof_struct_track = ramdump.sizeof( + 'struct track') + self.sizeof_void_pointer = ramdump.sizeof( + "void *") + self.sizeof_unsignedlong = ramdump.sizeof( + "unsigned long") + + @register_parser('--slabinfo', 'print information about slabs', optional=True) class Slabinfo(RamParser): + g_allstacks = {} # hold callstack stack + g_index = 0 + g_offsetof = None def get_free_pointer(self, ramdump, s, obj): # just like validate_slab_slab! @@ -80,72 +109,278 @@ class Slabinfo(RamParser): return (p - addr) / slab.size def get_map(self, ramdump, slab, page, bitarray): - freelist_offset = self.ramdump.field_offset('struct page', 'freelist') - freelist = self.ramdump.read_word(page + freelist_offset) + freelist = self.ramdump.read_word(page + g_offsetof.page_freelist) p = freelist addr = page_address(self.ramdump, page) seen = [] if addr is None: return while p != 0 and p is not None and p not in seen: - idx = self.slab_index(self.ramdump, p, addr, slab) - if idx >= len(bitarray) or idx < 0: + index = self.slab_index(self.ramdump, p, addr, slab) + if index >= len(bitarray) or index < 0: return - bitarray[idx] = 1 + bitarray[index] = 1 seen.append(p) p = self.get_free_pointer(self.ramdump, slab, p) - def get_track(self, ramdump, slab, obj, track_type): - track_size = self.ramdump.sizeof('struct track') - slab_offset_offset = self.ramdump.field_offset( - 'struct kmem_cache', 'offset') - slab_inuse_offset = self.ramdump.field_offset( - 'struct kmem_cache', 'inuse') - slab_offset = self.ramdump.read_int(slab + slab_offset_offset) - slab_inuse = self.ramdump.read_int(slab + slab_inuse_offset) - if slab_offset != 0: - p = obj + slab_offset + self.ramdump.sizeof("void *") + def get_track(self, ramdump, slab, obj, track_type): + track_size = g_offsetof.sizeof_struct_track + if slab.offset != 0: + p = obj + slab.offset + g_offsetof.sizeof_void_pointer else: - p = obj + slab_inuse + p = obj + slab.inuse return p + track_type * track_size + def extract_callstack(self, ramdump, a, stack, out_file): + for a in stack: + look = ramdump.unwind_lookup(a) + if look is None: + out_file.write("look is None") + continue + symname, offset = look + out_file.write( + ' [<{0:x}>] {1}+0x{2:x}\n'.format(a, symname, offset)) + return + def print_track(self, ramdump, slab, obj, track_type, out_file): - p = self.get_track(self.ramdump, slab, obj, track_type) - track_addrs_offset = self.ramdump.field_offset('struct track', 'addrs') + stack = [] + stackstr = "" + p = self.get_track(ramdump, slab, obj, track_type) + track_addrs_offset = g_offsetof.track_addrs start = p + track_addrs_offset - pointer_size = self.ramdump.sizeof("unsigned long") - if track_type == 0: - out_file.write(' ALLOC STACK\n') - else: - out_file.write(' FREE STACK\n') + pointer_size = g_offsetof.sizeof_unsignedlong for i in range(0, 16): a = self.ramdump.read_word(start + pointer_size * i) if a == 0: - break - look = self.ramdump.unwind_lookup(a) - if look is None: + continue + stack += [a] + stackstr += str(a) + stackstr_len = len(stackstr) + if stackstr_len == 0: + return + try: + self.g_allstacks[stackstr][0] += 1 + if self.g_allstacks[stackstr][0] > 1: return - symname, offset = look - out_file.write( - ' [<{0:x}>] {1}+0x{2:x}\n'.format(a, symname, offset)) - out_file.write('\n') - - def get_nobjects(self, ramdump, page): - if re.search('3\.0\.\d', self.ramdump.version) is not None: - n_objects_offset = self.ramdump.field_offset( - 'struct page', 'objects') - n_objects = self.ramdump.read_halfword(page + n_objects_offset) - return n_objects + self.extract_callstack(self.ramdump, a, stack, out_file) + except KeyError: + if g_printfreeobjStack is False: + if track_type != 0: + # if free object and g_printfreeobjStack is False, + # ignore it for printing its call stack + return + if track_type == 1: + out_file.write( + "FREE Call stack index:{0}".format( + self.g_index)) + else: + out_file.write( + "ALLOCATED Call stack index:{0}".format( + self.g_index)) + self.extract_callstack(self.ramdump, a, stack, out_file) + self.g_allstacks[stackstr] = [1, self.g_index] + self.g_index += 1 + out_file.write('\n') + + def print_slab( + self, ramdump, slab, page, + out_file, map_fn, out_slabs_addrs): + + page_addr = page_address(ramdump, page) + p = page_addr + if page is None: + return + n_objects = self.ramdump.read_word(page + g_offsetof.page_mapcount) + n_objects = (n_objects >> 16) & 0x00007FFF + if n_objects is None: + return + bitarray = [0] * n_objects + addr = page_address(self.ramdump, page) + self.get_map(self.ramdump, slab, page, bitarray) + while p < page_addr + (n_objects * slab.size): + bitidx = self.slab_index(self.ramdump, p, addr, slab) + if bitidx >= n_objects or bitidx < 0: + return + map_fn( + ramdump, p, bitarray[bitidx], slab, + page, out_file, out_slabs_addrs) + p = p + slab.size + + def printsummary(self, slabs_output_summary): + sorted_val = sorted( + self.g_allstacks.items(), + key=operator.itemgetter(1), reverse=True) + for key, value in sorted_val: + slabs_output_summary.write( + " stack index:{0} frequency:{1}\n".format(value[1], value[0])) + + def print_slab_page_info( + self, ramdump, slab_obj, slab_node, start, + out_file, map_fn, out_slabs_addrs): + page = self.ramdump.read_word(start) + if page == 0: + return + seen = [] + max_pfn_addr = self.ramdump.address_of('max_pfn') + max_pfn = self.ramdump.read_word(max_pfn_addr) + max_page = pfn_to_page(ramdump, max_pfn) + while page != start: + if page is None: + return + if page in seen: + return + if page > max_page: + return + seen.append(page) + page = page - g_offsetof.page_lru + self.print_slab( + self.ramdump, slab_obj, page, out_file, map_fn, + out_slabs_addrs) + page = self.ramdump.read_word(page + g_offsetof.page_lru) + + def print_per_cpu_slab_info( + self, ramdump, slab, slab_node, start, out_file, map_fn): + page = self.ramdump.read_word(start) + if page == 0: + return + if page is None: + return + page_addr = page_address(self.ramdump, page) + self.print_slab( + self.ramdump, page_addr, slab, page, out_file, map_fn) + + def print_all_objects( + self, ramdump, p, free, slab, page, out_file, out_slabs_addrs): + + if free: + out_slabs_addrs.write( + '\n Object {0:x}-{1:x} FREE'.format( + p, p + slab.size)) + else: + out_slabs_addrs.write( + '\n Object {0:x}-{1:x} ALLOCATED'.format( + p, p + slab.size)) + if self.ramdump.is_config_defined('CONFIG_SLUB_DEBUG_ON'): + if g_printfreeobjStack is True: + self.print_track(ramdump, slab, p, 0, out_file) + self.print_track(ramdump, slab, p, 1, out_file) + else: + self.print_track(ramdump, slab, p, free, out_file) + + def print_check_poison(self, p, free, slab, page, out_file): + if free: + self.check_object(slab, page, p, SLUB_RED_INACTIVE, out_file) else: - # The objects field is now a bit field. This confuses GDB as it thinks the - # offset is always 0. Work around this for now - map_count_offset = self.ramdump.field_offset( - 'struct page', '_mapcount') - count = self.ramdump.read_int(page + map_count_offset) - if count is None: - return None - n_objects = (count >> 16) & 0xFFFF - return n_objects + self.check_object(slab, page, p, SLUB_RED_ACTIVE, out_file) + + def initializeOffset(self): + global g_offsetof + g_offsetof = struct_member_offset(self.ramdump) + + # based on validate_slab_cache. Currently assuming there + # is only one numa node in the system because the code to + # do that correctly is a big pain. This will + # need to be changed if we ever do NUMA properly. + def validate_slab_cache(self, slab_out, input_slabname, map_fn): + slab_name_found = False + original_slab = self.ramdump.address_of('slab_caches') + cpu_present_bits_addr = self.ramdump.address_of('cpu_present_bits') + cpu_present_bits = self.ramdump.read_word(cpu_present_bits_addr) + cpus = bin(cpu_present_bits).count('1') + offsetof = struct_member_offset(self.ramdump) + self.initializeOffset() + slab_list_offset = g_offsetof.kmemcache_list + slab_name_offset = g_offsetof.kmemcache_name + slab_node_offset = g_offsetof.kmemcache_node + cpu_slab_offset = g_offsetof.kmemcpucache_cpu_slab + slab_partial_offset = g_offsetof.kmemcachenode_partial + slab_full_offset = g_offsetof.kmemcachenode_full + slab = self.ramdump.read_word(original_slab) + slabs_output_summary = self.ramdump.open_file('slabs_output.txt') + out_slabs_addrs = self.ramdump.open_file('out_slabs_addrs.txt') + while slab != original_slab: + slab = slab - slab_list_offset + slab_obj = kmem_cache(self.ramdump, slab) + if not slab_obj.valid: + slab_out.write( + 'Invalid slab object {0:x}'.format(slab)) + slab = self.ramdump.read_word(slab + slab_list_offset) + continue + slab_name_addr = self.ramdump.read_word( + slab + slab_name_offset) + slab_name = self.ramdump.read_cstring( + slab_name_addr, 48) + if input_slabname is not None: + if input_slabname != slab_name: + slab = self.ramdump.read_word(slab + slab_list_offset) + continue + else: + slab_name_found = True + # actually an array but again, no numa + slab_node_addr = self.ramdump.read_word( + slab + slab_node_offset) + slab_node = self.ramdump.read_word( + slab_node_addr) + print_out_str( + '\nExtracting slab details of : {0}'.format( + slab_name)) + cpu_slab_addr = self.ramdump.read_word( + slab + cpu_slab_offset) + nr_total_objects = self.ramdump.read_structure_field( + slab_node_addr, + 'struct kmem_cache_node', 'total_objects') + slab_out.write( + '\n {0:x} slab {1} {2:x} total objects: {3}\n'.format( + slab, slab_name, slab_node_addr, nr_total_objects)) + + self.print_slab_page_info( + self.ramdump, slab_obj, slab_node, + slab_node_addr + slab_partial_offset, + slab_out, map_fn, out_slabs_addrs) + + if self.ramdump.is_config_defined('CONFIG_SLUB_DEBUG'): + self.print_slab_page_info( + self.ramdump, slab_obj, slab_node, + slab_node_addr + slab_full_offset, + slab_out, map_fn, out_slabs_addrs) + + # per cpu slab + for i in range(0, cpus): + cpu_slabn_addr = self.ramdump.read_word( + cpu_slab_addr, cpu=i) + if cpu_slabn_addr == 0 or None: + break + self.print_per_cpu_slab_info( + self.ramdump, slab_obj, + slab_node, cpu_slabn_addr + offsetof.cpu_cache_page_offset, + slab_out, map_fn) + + self.printsummary(slabs_output_summary) + self.g_allstacks.clear() + if slab_name_found is True: + break + slab = self.ramdump.read_word(slab + slab_list_offset) + out_slabs_addrs.close() + slabs_output_summary.close() + + def parse(self): + global g_printfreeobjStack + slabname = None + for arg in sys.argv: + if 'slabname=' in arg: + k, slabname = arg.split('=') + if 'freeobj' in arg: + g_printfreeobjStack = True + slab_out = self.ramdump.open_file('slabs.txt') + self.validate_slab_cache(slab_out, slabname, self.print_all_objects) + slab_out.close() + + +@register_parser('--slabpoison', 'check slab poison', optional=True) +class Slabpoison(Slabinfo): + """Note that this will NOT find any slab errors which are printed out by the + kernel, because the slab object is removed from the freelist while being + processed""" def print_section(self, text, addr, length, out_file): out_file.write('{}\n'.format(text)) @@ -250,138 +485,6 @@ class Slabinfo(RamParser): # check_pad_bytes cleans up on its own. self.check_pad_bytes(s, page, p, out_file) - - def print_slab(self, ramdump, slab_start, slab, page, out_file, map_fn): - p = slab_start - if page is None: - return - n_objects = self.get_nobjects(self.ramdump, page) - if n_objects is None: - return - bitarray = [0] * slab.max - addr = page_address(self.ramdump, page) - self.get_map(self.ramdump, slab, page, bitarray) - while p < slab_start + n_objects * slab.size: - idx = self.slab_index(self.ramdump, p, addr, slab) - bitidx = self.slab_index(self.ramdump, p, addr, slab) - if bitidx >= len(bitarray) or bitidx < 0: - return - map_fn(p, bitarray[bitidx], slab, page, out_file) - p = p + slab.size - - def print_slab_page_info(self, ramdump, slab, slab_node, start, out_file, map_fn): - page = self.ramdump.read_word(start) - seen = [] - if page == 0: - return - slab_lru_offset = self.ramdump.field_offset('struct page', 'lru') - page_flags_offset = self.ramdump.field_offset('struct page', 'flags') - max_pfn_addr = self.ramdump.address_of('max_pfn') - max_pfn = self.ramdump.read_word(max_pfn_addr) - max_page = pfn_to_page(ramdump, max_pfn) - while page != start: - if page is None: - return - if page in seen: - return - if page > max_page: - return - seen.append(page) - page = page - slab_lru_offset - page_flags = self.ramdump.read_word(page + page_flags_offset) - page_addr = page_address(self.ramdump, page) - self.print_slab(self.ramdump, page_addr, slab, page, out_file, map_fn) - page = self.ramdump.read_word(page + slab_lru_offset) - - def print_per_cpu_slab_info(self, ramdump, slab, slab_node, start, out_file, map_fn): - page = self.ramdump.read_word(start) - if page == 0: - return - page_flags_offset = self.ramdump.field_offset('struct page', 'flags') - if page is None: - return - page_flags = self.ramdump.read_word(page + page_flags_offset) - page_addr = page_address(self.ramdump, page) - self.print_slab(self.ramdump, page_addr, slab, page, out_file, map_fn) - - def print_all_objects(self, p, free, slab, page, out_file): - if free: - out_file.write( - ' Object {0:x}-{1:x} FREE\n'.format(p, p + slab.size)) - else: - out_file.write( - ' Object {0:x}-{1:x} ALLOCATED\n'.format(p, p + slab.size)) - if self.ramdump.is_config_defined('CONFIG_SLUB_DEBUG_ON'): - self.print_track(self.ramdump, slab, p, 0, out_file) - self.print_track(self.ramdump, slab, p, 1, out_file) - - def print_check_poison(self, p, free, slab, page, out_file): - if free: - self.check_object(slab, page, p, SLUB_RED_INACTIVE, out_file) - else: - self.check_object(slab, page, p, SLUB_RED_ACTIVE, out_file) - - # based on validate_slab_cache. Currently assuming there is only one numa node - # in the system because the code to do that correctly is a big pain. This will - # need to be changed if we ever do NUMA properly. - def validate_slab_cache(self, slab_out, map_fn): - original_slab = self.ramdump.address_of('slab_caches') - cpu_present_bits_addr = self.ramdump.address_of('cpu_present_bits') - cpu_present_bits = self.ramdump.read_word(cpu_present_bits_addr) - cpus = bin(cpu_present_bits).count('1') - slab_list_offset = self.ramdump.field_offset( - 'struct kmem_cache', 'list') - slab_name_offset = self.ramdump.field_offset( - 'struct kmem_cache', 'name') - slab_node_offset = self.ramdump.field_offset( - 'struct kmem_cache', 'node') - cpu_cache_page_offset = self.ramdump.field_offset( - 'struct kmem_cache_cpu', 'page') - cpu_slab_offset = self.ramdump.field_offset( - 'struct kmem_cache', 'cpu_slab') - slab_partial_offset = self.ramdump.field_offset( - 'struct kmem_cache_node', 'partial') - slab = self.ramdump.read_word(original_slab) - while slab != original_slab: - slab = slab - slab_list_offset - slab_obj = kmem_cache(self.ramdump, slab) - if not slab_obj.valid: - continue - slab_name_addr = self.ramdump.read_word(slab + slab_name_offset) - # actually an array but again, no numa - slab_node_addr = self.ramdump.read_word(slab + slab_node_offset) - slab_node = self.ramdump.read_word(slab_node_addr) - slab_name = self.ramdump.read_cstring(slab_name_addr, 48) - cpu_slab_addr = self.ramdump.read_word(slab + cpu_slab_offset) - print_out_str('Parsing slab {0}'.format(slab_name)) - slab_out.write( - '{0:x} slab {1} {2:x}\n'.format(slab, slab_name, slab_node_addr)) - self.print_slab_page_info( - self.ramdump, slab_obj, slab_node, slab_node_addr + slab_partial_offset, slab_out, map_fn) - if self.ramdump.is_config_defined('CONFIG_SLUB_DEBUG'): - slab_full_offset = self.ramdump.field_offset( - 'struct kmem_cache_node', 'full') - self.print_slab_page_info( - self.ramdump, slab_obj, slab_node, slab_node_addr + slab_full_offset, slab_out, map_fn) - - for i in range(0, cpus): - cpu_slabn_addr = self.ramdump.read_word(cpu_slab_addr, cpu=i) - self.print_per_cpu_slab_info( - self.ramdump, slab_obj, slab_node, cpu_slabn_addr + cpu_cache_page_offset, slab_out, map_fn) - - slab = self.ramdump.read_word(slab + slab_list_offset) - - def parse(self): - slab_out = self.ramdump.open_file('slabs.txt') - self.validate_slab_cache(slab_out, self.print_all_objects) - print_out_str('---wrote slab information to slabs.txt') - -@register_parser('--slabpoison', 'check slab poison', optional=True) -class Slabpoison(Slabinfo): - """Note that this will NOT find any slab errors which are printed out by the - kernel, because the slab object is removed from the freelist while being - processed""" - # since slabs are relatively "packed", caching has a large # performance benefit def read_byte_array(self, addr, size): @@ -406,5 +509,5 @@ class Slabpoison(Slabinfo): self.cache = None self.cache_addr = None slab_out = self.ramdump.open_file('slabpoison.txt') - self.validate_slab_cache(slab_out, self.print_check_poison) + self.validate_slab_cache(slab_out, None, self.print_check_poison) print_out_str('---wrote slab information to slabpoison.txt') diff --git a/linux-ramdump-parser-v2/parsers/slabsummary.py b/linux-ramdump-parser-v2/parsers/slabsummary.py new file mode 100644 index 0000000000000000000000000000000000000000..34b43dc496f544a81a855ea6370c0cdba9d955a8 --- /dev/null +++ b/linux-ramdump-parser-v2/parsers/slabsummary.py @@ -0,0 +1,142 @@ +# Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 and +# only version 2 as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +import math + +from mm import pfn_to_page +from parser_util import register_parser, RamParser + +# /kernel/msm-4.4/mm/slub.c +OO_SHIFT = 16 +PAGE_SHIFT = 12 + + +@register_parser('--slabsummary', 'print summary of slab', optional=True) +class Slabinfo_summary(RamParser): + + def cal_free_pages( + self, ramdump, + start, slab_lru_offset, + max_page): + page = self.ramdump.read_word(start) + if page == 0: + return + seen = [] + totalfree = 0 + mapcount = 0 + total_objects = 0 + inuse = 0 + while page != start: + if page is None: + return + if page in seen: + return + if page > max_page: + return + seen.append(page) + page = page - slab_lru_offset + mapcount = self.ramdump.read_structure_field( + page, 'struct page', '_mapcount') + inuse = mapcount & 0x0000FFFF + total_objects = (mapcount >> 16) & 0x00007FFF + freeobj = total_objects - inuse + totalfree = totalfree + freeobj + page = self.ramdump.read_word(page + slab_lru_offset) + return totalfree + + # Currently with assumption there is only one numa node + def print_slab_summary(self, slab_out): + total_freeobjects = 0 + original_slab = self.ramdump.address_of('slab_caches') + cpu_present_bits_addr = self.ramdump.address_of('cpu_present_bits') + cpu_present_bits = self.ramdump.read_word(cpu_present_bits_addr) + cpus = bin(cpu_present_bits).count('1') + slab_list_offset = self.ramdump.field_offset( + 'struct kmem_cache', 'list') + slab_name_offset = self.ramdump.field_offset( + 'struct kmem_cache', 'name') + slab_node_offset = self.ramdump.field_offset( + 'struct kmem_cache', 'node') + cpu_cache_page_offset = self.ramdump.field_offset( + 'struct kmem_cache_cpu', 'page') + cpu_slab_offset = self.ramdump.field_offset( + 'struct kmem_cache', 'cpu_slab') + slab_partial_offset = self.ramdump.field_offset( + 'struct kmem_cache_node', 'partial') + slab = self.ramdump.read_word(original_slab) + slab_lru_offset = self.ramdump.field_offset( + 'struct page', 'lru') + max_pfn_addr = self.ramdump.address_of('max_pfn') + max_pfn = self.ramdump.read_word(max_pfn_addr) + max_page = pfn_to_page(self.ramdump, max_pfn) + format_string = '\n{0:35} {1:9} {2:10} {3:10} {4:8}K {5:10} {6:10}K' + slab_out.write( + '{0:35} {1:9} {2:10} {3:10} {4:8} {5:10} {6:10}'.format( + "NAME", "OBJSIZE", "ALLOCATED", + "TOTAL", "TOTAL*SIZE", "SLABS", + "SSIZE")) + + while slab != original_slab: + total_freeobjects = 0 + slab = slab - slab_list_offset + slab_name_addr = self.ramdump.read_word( + slab + slab_name_offset) + # actually an array but again, no numa + slab_node_addr = self.ramdump.read_word( + slab + slab_node_offset) + slab_name = self.ramdump.read_cstring( + slab_name_addr, 48) + cpu_slab_addr = self.ramdump.read_word( + slab + cpu_slab_offset) + oo = self.ramdump.read_structure_field( + slab, 'struct kmem_cache', 'oo') + obj_size = self.ramdump.read_structure_field( + slab, 'struct kmem_cache', 'object_size') + objsize_w_metadata = self.ramdump.read_structure_field( + slab, 'struct kmem_cache', 'size') + nr_total_objects = self.ramdump.read_structure_field( + slab_node_addr, + 'struct kmem_cache_node', 'total_objects') + num_slabs = self.ramdump.read_structure_field( + slab_node_addr, + 'struct kmem_cache_node', 'nr_slabs') + # per cpu slab + for i in range(0, cpus): + cpu_slabn_addr = self.ramdump.read_word( + cpu_slab_addr, cpu=i) + if cpu_slabn_addr == 0 or None: + break + total_freeobjects = total_freeobjects + self.cal_free_pages( + self.ramdump, + (cpu_slabn_addr + cpu_cache_page_offset), + slab_lru_offset, + max_page) + + total_freeobjects = total_freeobjects + self.cal_free_pages( + self.ramdump, + slab_node_addr + slab_partial_offset, + slab_lru_offset, max_page) + + total_allocated = nr_total_objects - total_freeobjects + page_order = oo >> OO_SHIFT + slab_size = int(math.pow(2, page_order + PAGE_SHIFT)) + slab_size = slab_size / 1024 + slab = self.ramdump.read_word(slab + slab_list_offset) + slab_out.write(format_string.format( + slab_name, obj_size, total_allocated, + nr_total_objects, + (objsize_w_metadata * nr_total_objects)/1024, + num_slabs, slab_size)) + + def parse(self): + slab_out = self.ramdump.open_file('slabsummary.txt') + self.print_slab_summary(slab_out) + slab_out.close() diff --git a/linux-ramdump-parser-v2/parsers/taskdump.py b/linux-ramdump-parser-v2/parsers/taskdump.py index ae3918e00dc54345030b14d29ef50065adbc11b7..d81b10c977781abf3fdce731753a1fc25e000cf8 100755 --- a/linux-ramdump-parser-v2/parsers/taskdump.py +++ b/linux-ramdump-parser-v2/parsers/taskdump.py @@ -208,10 +208,14 @@ def do_dump_task_timestamps(ramdump): break for i in range(0, no_of_cpus): t[i] = sorted(t[i],key=lambda l:l[2], reverse=True) - str = '{0:<17s}{1:>8s}{2:>17s}{3:>17s}{4:>17s}{5:>17s}\n'.format('Task name','PID','Exec_Started_at','Last_Queued_at','Total_wait_time','No_of_times_exec') + str = '{0:<17s}{1:>8s}{2:>18s}{3:>18s}{4:>18s}{5:>17s}\n'.format( + 'Task name', 'PID', 'Exec_Started_at', 'Last_Queued_at', + 'Total_wait_time', 'No_of_times_exec') task_out[i].write(str) for item in t[i]: - str = '{0:<17s}{1:8d}{2:17d}{3:17d}{4:17d}{5:17d}\n'.format(item[0],item[1],item[2],item[3],item[4],item[5]) + str = '{0:<17s}{1:8d}{2:18.9f}{3:18.9f}{4:18.9f}{5:17d}\n'.format( + item[0], item[1], item[2]/1000000000.0, + item[3]/1000000000.0, item[4]/1000000000.0, item[5]) task_out[i].write(str) task_out[i].close() print_out_str('---wrote tasks to tasks_sched_stats{0}.txt'.format(i)) @@ -271,7 +275,6 @@ def dump_thread_group_timestamps(ramdump, thread_group, t): break return True - @register_parser('--print-tasks', 'Print all the task information', shortopt='-t') class DumpTasks(RamParser): diff --git a/linux-ramdump-parser-v2/parsers/timerlist.py b/linux-ramdump-parser-v2/parsers/timerlist.py index 042911282c0219e27b65bcbf65138452f93fe734..c1b7318bd7cbc147bf3766f51a9bec9af0797327 100755 --- a/linux-ramdump-parser-v2/parsers/timerlist.py +++ b/linux-ramdump-parser-v2/parsers/timerlist.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015, The Linux Foundation. All rights reserved. +# Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 and @@ -21,6 +21,13 @@ class TimerList(RamParser) : super(TimerList, self).__init__(*args) self.vectors = {'tv1': 256, 'tv2': 64, 'tv3': 64, 'tv4': 64, 'tv5': 64} self.output = [] + self.timer_42 = False + + # Timerlist structure changed in kernel 4.2 + # Requires separate processing + if self.ramdump.kernel_version[0] >= 4: + if self.ramdump.kernel_version[1] >= 2: + self.timer_42 = True def timer_list_walker(self, node, type, index, base): if node == self.head: @@ -30,7 +37,6 @@ class TimerList(RamParser) : function_addr = node + self.ramdump.field_offset('struct timer_list', 'function') expires_addr = node + self.ramdump.field_offset('struct timer_list', 'expires') data_addr = node + self.ramdump.field_offset('struct timer_list', 'data') - timer_base_addr = node + self.ramdump.field_offset('struct timer_list', 'base') function = self.ramdump.unwind_lookup(self.ramdump.read_word(function_addr))[0] expires = self.ramdump.read_word(expires_addr) @@ -40,7 +46,6 @@ class TimerList(RamParser) : self.output_file.write("+ Corruption detected at index {0} in {1} list, found corrupted value: {2:x}\n".format(index, type, data_addr)) return - timer_base = self.ramdump.read_word(timer_base_addr) & ~3 if function == "delayed_work_timer_fn": timer_list_offset = self.ramdump.field_offset('struct delayed_work', 'timer') @@ -49,8 +54,12 @@ class TimerList(RamParser) : work_func = self.ramdump.unwind_lookup(self.ramdump.read_word(func_addr))[0] data += " / " + work_func - if timer_base != base: - remarks += "Timer Base Mismatch detected" + if not self.timer_42: + timer_base_addr = node + self.ramdump.field_offset( + 'struct timer_list', 'base') + timer_base = self.ramdump.read_word(timer_base_addr) & ~3 + if timer_base != base: + remarks += "Timer Base Mismatch detected" output = "\t{0:<6} {1:<18x} {2:<14} {3:<40} {4:<52} {5}\n".format(index, node, expires, function, data, remarks) self.output.append(output) @@ -64,6 +73,19 @@ class TimerList(RamParser) : timer_list_walker = linux_list.ListWalker(self.ramdump, index, node_offset) timer_list_walker.walk(index, self.timer_list_walker, type, i, base) + def iterate_vec_v2(self, type, base): + vec_addr = base + self.ramdump.field_offset('struct tvec_base', type) + for i in range(0, self.vectors[type]): + index = self.ramdump.array_index(vec_addr, 'struct hlist_head', i) + self.head = index + index = self.ramdump.read_word(index) + node_offset = self.ramdump.field_offset( + 'struct hlist_node', 'next') + timer_list_walker = linux_list.ListWalker(self.ramdump, index, + node_offset) + timer_list_walker.walk(index, self.timer_list_walker, type, i, + base) + def print_vec(self, type): if len(self.output): self.output_file.write("+ {0} Timers ({1})\n\n".format(type, len(self.output))) @@ -78,18 +100,23 @@ class TimerList(RamParser) : self.output_file.write("Timer List Dump\n\n") tvec_bases_addr = self.ramdump.address_of('tvec_bases') + for cpu in range(0, self.ramdump.get_num_cpus()): title = "CPU {0}".format(cpu) base_addr = tvec_bases_addr + self.ramdump.per_cpu_offset(cpu) - base = self.ramdump.read_word(base_addr) + if self.timer_42: + base = base_addr + else: + base = self.ramdump.read_word(base_addr) + title += "(tvec_base: {0:x} ".format(base) timer_jiffies_addr = base + self.ramdump.field_offset('struct tvec_base', 'timer_jiffies') next_timer_addr = base + self.ramdump.field_offset('struct tvec_base', 'next_timer') + timer_jiffies = self.ramdump.read_word(timer_jiffies_addr) next_timer = self.ramdump.read_word(next_timer_addr) - active_timers_offset = self.ramdump.field_offset('struct tvec_base', 'active_timers') if active_timers_offset is not None: active_timers_addr = base + self.ramdump.field_offset('struct tvec_base', 'active_timers') @@ -104,12 +131,20 @@ class TimerList(RamParser) : for vec in sorted(self.vectors): self.output = [] - self.iterate_vec(vec, base) + if self.timer_42: + self.iterate_vec_v2(vec, base) + else: + self.iterate_vec(vec, base) self.print_vec(vec) + tick_do_timer_cpu_addr = self.ramdump.address_of('tick_do_timer_cpu') + tick_do_timer_cpu_val = "tick_do_timer_cpu: {0}\n".format(self.ramdump.read_int(tick_do_timer_cpu_addr)) + self.output_file.write("=" * len(tick_do_timer_cpu_val) + "\n") + self.output_file.write(tick_do_timer_cpu_val) + self.output_file.write("=" * len(tick_do_timer_cpu_val) + "\n") + def parse(self): self.output_file = self.ramdump.open_file('timerlist.txt') - self.get_timer_list() self.output_file.close() diff --git a/linux-ramdump-parser-v2/qdss.py b/linux-ramdump-parser-v2/qdss.py index 08e2cf163e001adc2adc2c69919611c22ce3bf8f..261d792a0740e0ffc4c1f122795a39fb212d57f2 100755 --- a/linux-ramdump-parser-v2/qdss.py +++ b/linux-ramdump-parser-v2/qdss.py @@ -455,10 +455,8 @@ class QDSSDump(): ram_dump.read_u32(self.dbgui_start + data_offset + (4 * i), False))) dbgui_out.close() - def dump_all(self, ram_dump): + def dump_standard(self, ram_dump): self.print_tmc_etf(ram_dump) self.print_tmc_etr(ram_dump) self.print_dbgui_registers(ram_dump) self.print_all_etm_register(ram_dump) - self.save_etf_bin(ram_dump) - self.save_etr_bin(ram_dump) diff --git a/linux-ramdump-parser-v2/ramdump.py b/linux-ramdump-parser-v2/ramdump.py index 91d1bc55c0d42681e1b37a29197169a238230056..a4a7bf1d9724698c81ccee86576f0324be89f1a0 100755 --- a/linux-ramdump-parser-v2/ramdump.py +++ b/linux-ramdump-parser-v2/ramdump.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. +# Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 and @@ -42,7 +42,87 @@ BUILD_ID_LENGTH = 32 first_mem_file_names = ['EBICS0.BIN', 'EBI1.BIN', 'DDRCS0.BIN', 'ebi1_cs0.bin', 'DDRCS0_0.BIN'] -extra_mem_file_names = ['EBI1CS1.BIN', 'DDRCS1.BIN', 'ebi1_cs1.bin', 'DDRCS0_1.BIN', 'DDRCS1_0.BIN', 'DDRCS1_1.BIN'] +extra_mem_file_names = ['EBI1CS1.BIN', 'DDRCS1.BIN', 'ebi1_cs1.bin', + 'DDRCS0_1.BIN', 'DDRCS1_0.BIN', 'DDRCS1_1.BIN', + 'DDRCS1_2.BIN', 'DDRCS1_3.BIN', 'DDRCS1_4.BIN', + 'DDRCS1_5.BIN'] + +DDR_FILE_NAMES = ['DDRCS0.BIN', 'DDRCS1.BIN', 'DDRCS0_0.BIN', + 'DDRCS1_0.BIN', 'DDRCS0_1.BIN', 'DDRCS1_1.BIN'] +OTHER_DUMP_FILE_NAMES = ['PIMEM.BIN', 'OCIMEM.BIN'] +RAM_FILE_NAMES = set(DDR_FILE_NAMES + + OTHER_DUMP_FILE_NAMES + + first_mem_file_names + + extra_mem_file_names) + + +class AutoDumpInfo(object): + priority = 0 + + def __init__(self, autodumpdir): + self.autodumpdir = autodumpdir + self.ebi_files = [] + + def parse(self): + for (filename, base_addr) in self._parse(): + fullpath = os.path.join(self.autodumpdir, filename) + end = base_addr + os.path.getsize(fullpath) - 1 + self.ebi_files.append((open(fullpath, 'rb'), base_addr, end, fullpath)) + # sort by addr, DDR files first. The goal is for + # self.ebi_files[0] to be the DDR file with the lowest address. + self.ebi_files.sort( + key=lambda x: (os.path.basename(x[-1]) not in DDR_FILE_NAMES, + x[1])) + + def _parse(self): + # Implementations should return an interable of (filename, base_addr) + raise NotImplementedError + + +class AutoDumpInfoCMM(AutoDumpInfo): + # Parses CMM scripts (like load.cmm) + def _parse(self): + filename = 'load.cmm' + if not os.path.exists(os.path.join(self.autodumpdir, filename)): + print_out_str('!!! AutoParse could not find load.cmm!') + return + + with open(os.path.join(self.autodumpdir, filename)) as f: + for line in f.readlines(): + words = line.split() + if len(words) == 4 and words[1] in RAM_FILE_NAMES: + fname = words[1] + start = int(words[2], 16) + yield fname, start + + +class AutoDumpInfoDumpInfoTXT(AutoDumpInfo): + # Parses dump_info.txt + priority = 1 + + def _parse(self): + filename = 'dump_info.txt' + if not os.path.exists(os.path.join(self.autodumpdir, filename)): + print_out_str('!!! AutoParse could not find dump_info.txt!') + return + + with open(os.path.join(self.autodumpdir, filename)) as f: + for line in f.readlines(): + words = line.split() + if not words or words[-1] not in RAM_FILE_NAMES: + continue + fname = words[-1] + start = int(words[1], 16) + size = int(words[2]) + filesize = os.path.getsize( + os.path.join(self.autodumpdir, fname)) + if size != filesize: + print_out_str( + ("!!! Size of %s on disk (%d) doesn't match size " + + "from dump_info.txt (%d). Skipping...") + % (fname, filesize, size)) + continue + yield fname, start class RamDump(): @@ -441,6 +521,7 @@ class RamDump(): self.thread_size = 8192 self.qtf_path = options.qtf_path self.qtf = options.qtf + self.skip_qdss_bin = options.skip_qdss_bin self.dcc = False self.t32_host_system = options.t32_host_system or None self.ipc_log_test = options.ipc_test @@ -471,6 +552,8 @@ class RamDump(): '[!!!] Phys offset was set to {0:x}'.format(\ options.phys_offset)) self.phys_offset = options.phys_offset + + self.wlan = options.wlan self.lookup_table = [] self.config = [] self.config_dict = {} @@ -483,21 +566,46 @@ class RamDump(): self.page_offset = options.page_offset self.setup_symbol_tables() + va_bits = 39 + modules_vsize = 0x08000000 + self.va_start = (0xffffffffffffffff << va_bits) \ + & 0xffffffffffffffff + if self.address_of("kasan_init") is None: + self.kasan_shadow_size = 0 + else: + self.kasan_shadow_size = 1 << (va_bits - 3) + + self.kimage_vaddr = self.va_start + self.kasan_shadow_size + \ + modules_vsize + + self.modules_end = self.page_offset + self.kimage_voffset = self.address_of("kimage_voffset") + if self.kimage_voffset is not None: + self.kimage_voffset = self.kimage_vaddr - self.phys_offset + self.modules_end = self.kimage_vaddr + # The address of swapper_pg_dir can be used to determine # whether or not we're running with LPAE enabled since an # extra 4k is needed for LPAE. If it's 0x5000 below # PAGE_OFFSET + TEXT_OFFSET then we know we're using LPAE. For # non-LPAE it should be 0x4000 below PAGE_OFFSET + TEXT_OFFSET - swapper_pg_dir = self.address_of('swapper_pg_dir') - if swapper_pg_dir is None: + self.swapper_pg_dir_addr = self.address_of('swapper_pg_dir') + if self.swapper_pg_dir_addr is None: print_out_str('!!! Could not get the swapper page directory!') print_out_str( '!!! Your vmlinux is probably wrong for these dumps') print_out_str('!!! Exiting now') sys.exit(1) - self.swapper_pg_dir_addr = swapper_pg_dir - self.page_offset - self.kernel_text_offset = self.address_of('stext') - self.page_offset - pg_dir_size = self.kernel_text_offset - self.swapper_pg_dir_addr + + stext = self.address_of('stext') + if self.kimage_voffset is None: + self.kernel_text_offset = stext - self.page_offset + else: + self.kernel_text_offset = stext - self.kimage_vaddr + + pg_dir_size = self.kernel_text_offset + self.page_offset \ + - self.swapper_pg_dir_addr + if self.arm64: print_out_str('Using 64bit MMU') self.mmu = Armv8MMU(self) @@ -553,6 +661,7 @@ class RamDump(): print_out_str( '!!! This is really bad and probably indicates RAM corruption') print_out_str('!!! Some features may be disabled!') + self.unwind = self.Unwinder(self) def __del__(self): @@ -569,6 +678,9 @@ class RamDump(): file_path = os.path.join(self.outdir, file_name) f = None try: + dir_path = os.path.dirname(file_path) + if not os.path.exists(dir_path) and 'w' in mode: + os.makedirs(dir_path) f = open(file_path, mode) except: print_out_str('Could not open path {0}'.format(file_path)) @@ -604,7 +716,9 @@ class RamDump(): for l in t: self.config.append(l.rstrip().decode('ascii', 'ignore')) if not l.startswith('#') and l.strip() != '': - cfg, val = l.split('=') + eql = l.find('=') + cfg = l[:eql] + val = l[eql+1:] self.config_dict[cfg] = val.strip() return True @@ -622,11 +736,20 @@ class RamDump(): s = config + '=y' return s in self.config + def kernel_virt_to_phys(self, addr): + va_bits = 39 + if self.kimage_voffset is None: + return addr - self.page_offset + self.phys_offset + else: + if addr & (1 << (va_bits - 1)): + return addr - self.page_offset + self.phys_offset + else: + return addr - (self.kimage_voffset) + def get_version(self): banner_addr = self.address_of('linux_banner') if banner_addr is not None: - # Don't try virt to phys yet, compute manually - banner_addr = banner_addr - self.page_offset + self.phys_offset + banner_addr = self.kernel_virt_to_phys(banner_addr) b = self.read_cstring(banner_addr, 256, False) if b is None: print_out_str('!!! Could not read banner address!') @@ -663,79 +786,21 @@ class RamDump(): print_out_str('!!! Could not lookup saved command line address') return False - def get_ddr_base_addr(self, file_path): - if os.path.exists(os.path.join(file_path, 'load.cmm')): - with open (os.path.join(file_path, 'load.cmm'), "r") as myfile: - for line in myfile.readlines(): - words = line.split() - if words[0] == "d.load.binary" and words[1].startswith("DDRCS"): - if words[2][0:2].lower() == '0x': - return int(words[2], 16) - elif os.path.exists(os.path.join(file_path, 'dump_info.txt')): - with open (os.path.join(file_path, 'dump_info.txt'), "r") as myfile: - for line in myfile.readlines(): - words = line.split() - if words[-1].startswith("DDRCS"): - if words[1][0:2].lower() == '0x': - return int(words[1], 16) - def auto_parse(self, file_path): - first_mem_path = None - - for f in first_mem_file_names: - test_path = file_path + '/' + f - if os.path.exists(test_path): - first_mem_path = test_path - break - - if first_mem_path is None: - print_out_str('!!! Could not open a memory file. I give up') - sys.exit(1) - - first_mem = open(first_mem_path, 'rb') - # put some dummy data in for now - self.ebi_files = [(first_mem, 0, 0xffff0000, first_mem_path)] - if not self.get_hw_id(add_offset=False): - return False - - base_addr = self.get_ddr_base_addr(file_path) - if base_addr is not None: - self.ebi_start = base_addr - self.phys_offset = base_addr - else: - print_out_str('!!! WARNING !!! Using Static DDR Base Addresses.') - - first_mem_end = self.ebi_start + os.path.getsize(first_mem_path) - 1 - self.ebi_files = [ - (first_mem, self.ebi_start, first_mem_end, first_mem_path)] - print_out_str( - 'Adding {0} {1:x}--{2:x}'.format(first_mem_path, self.ebi_start, first_mem_end)) - self.ebi_start = self.ebi_start + os.path.getsize(first_mem_path) - - for f in extra_mem_file_names: - extra_path = file_path + '/' + f - - if os.path.exists(extra_path): - extra = open(extra_path, 'rb') - extra_start = self.ebi_start - extra_end = extra_start + os.path.getsize(extra_path) - 1 - self.ebi_start = extra_end + 1 - print_out_str( - 'Adding {0} {1:x}--{2:x}'.format(extra_path, extra_start, extra_end)) - self.ebi_files.append( - (extra, extra_start, extra_end, extra_path)) - - if self.imem_fname is not None: - imemc_path = file_path + '/' + self.imem_fname - if os.path.exists(imemc_path): - imemc = open(imemc_path, 'rb') - imemc_start = self.tz_start - imemc_end = imemc_start + os.path.getsize(imemc_path) - 1 - print_out_str( - 'Adding {0} {1:x}--{2:x}'.format(imemc_path, imemc_start, imemc_end)) - self.ebi_files.append( - (imemc, imemc_start, imemc_end, imemc_path)) - return True + for cls in sorted(AutoDumpInfo.__subclasses__(), + key=lambda x: x.priority, reverse=True): + info = cls(file_path) + info.parse() + if info is not None and len(info.ebi_files) > 0: + self.ebi_files = info.ebi_files + self.phys_offset = self.ebi_files[0][1] + if self.get_hw_id(): + for (f, start, end, filename) in self.ebi_files: + print_out_str('Adding {0} {1:x}--{2:x}'.format( + filename, start, end)) + return True + self.ebi_files = None + return False def create_t32_launcher(self): out_path = self.outdir @@ -773,7 +838,7 @@ class RamDump(): startup_script.write(('title \"' + out_path + '\"\n').encode('ascii', 'ignore')) - is_cortex_a53 = self.hw_id == 8916 or self.hw_id == 8939 or self.hw_id == 8936 + is_cortex_a53 = self.hw_id in ["8916", "8939", "8936"] if self.arm64 and is_cortex_a53: startup_script.write('sys.cpu CORTEXA53\n'.encode('ascii', 'ignore')) @@ -787,7 +852,9 @@ class RamDump(): ebi_path, ram[1]).encode('ascii', 'ignore')) if self.arm64: startup_script.write('Register.Set NS 1\n'.encode('ascii', 'ignore')) - startup_script.write('Data.Set SPR:0x30201 %Quad 0x{0:x}\n'.format(self.swapper_pg_dir_addr + self.phys_offset).encode('ascii', 'ignore')) + startup_script.write('Data.Set SPR:0x30201 %Quad 0x{0:x}\n'.format( + self.kernel_virt_to_phys(self.swapper_pg_dir_addr)) + .encode('ascii', 'ignore')) if is_cortex_a53: startup_script.write('Data.Set SPR:0x30202 %Quad 0x00000012B5193519\n'.encode('ascii', 'ignore')) @@ -972,7 +1039,8 @@ class RamDump(): if board.wdog_addr is not None: print_out_str( 'TZ address: {0:x}'.format(board.wdog_addr)) - self.phys_offset = board.phys_offset + if self.phys_offset is None: + self.phys_offset = board.phys_offset self.tz_addr = board.wdog_addr self.ebi_start = board.ram_start self.tz_start = board.imem_start @@ -1092,7 +1160,7 @@ class RamDump(): # modules are not supported so just print out an address # instead of a confusing symbol - if (addr < self.page_offset): + if (addr < self.modules_end): return ('(No symbol for address {0:x})'.format(addr), 0x0) low = 0 @@ -1345,3 +1413,165 @@ class RamDump(): return self.thread_saved_field_common_64(task, self.field_offset('struct cpu_context', 'fp')) else: return self.thread_saved_field_common_32(task, self.field_offset('struct cpu_context_save', 'fp')) + + +class Struct(object): + """ + Helper class to abstract C structs retrieval by providing a map of fields + to functions on how to retrieve these + + Given C struct:: + + struct my_struct { + char label[MAX_STR_SIZE]; + u32 number; + void *address; + } + + You can abstract as: + + >>> var = Struct(ramdump, var_name, struct_name="struct my_struct", + fields={'label': Struct.get_cstring, + 'number': Struct.get_u32, + 'address': Struct.get_pointer}) + >>> var.label + 'label string' + >>> var.number + 1234 + """ + _struct_name = None + _fields = None + + def __init__(self, ramdump, base, struct_name=None, fields=None): + """ + :param ram_dump: Reference to the ram dump + :param base: The virtual address or variable name of struct + :param struct_name: Name of the structure, should start with 'struct'. + Ex: 'struct my_struct' + :param fields: Dictionary with key being the element name and value + being a function pointer to method used to retrieve it. + """ + self.ramdump = ramdump + self._base = self.ramdump.resolve_virt(base) + self._data = {} + if struct_name: + self._struct_name = struct_name + if fields: + self._fields = fields + + def is_empty(self): + """ + :return: true if struct is empty + """ + return self._base == 0 or self._base is None or self._fields is None + + def get_address(self, key): + """ + :param key: struct field name + :return: returns address of the named field within the struct + """ + return self._base + self.ramdump.field_offset(self._struct_name, key) + + def get_pointer(self, key): + """ + :param key: struct field name + :return: returns the addressed pointed by field within the struct + + example struct:: + + struct { + void *key; + }; + """ + address = self.get_address(key) + return self.ramdump.read_pointer(address) + + def get_struct_sizeof(self, key): + """ + :param key: struct field name + :return: returns the size of a field within struct + + Given C struct:: + + struct my_struct { + char key1[10]; + u32 key2; + }; + + You could do: + + >>> struct = Struct(ramdump, 0, struct="struct my_struct", + fields={"key1": Struct.get_cstring, + "key2": Struct.get_u32}) + >>> struct.get_struct_sizeof(key1) + 10 + >>> struct.get_struct_sizeof(key2) + 4 + """ + return self.ramdump.sizeof('((%s *) 0)->%s' % (self._struct_name, key)) + + def get_cstring(self, key): + """ + :param key: struct field name + :return: returns a string that is contained within struct memory + + Example C struct:: + + struct { + char key[10]; + }; + """ + address = self.get_address(key) + length = self.get_struct_sizeof(key) + return self.ramdump.read_cstring(address, length) + + def get_u32(self, key): + """ + :param key: struct field name + :return: returns a u32 integer within the struct + + Example C struct:: + + struct { + u32 key; + }; + """ + address = self.get_address(key) + return self.ramdump.read_u32(address) + + def get_array_ptrs(self, key): + """ + :param key: struct field name + :return: returns an array of pointers + + Example C struct:: + + struct { + void *key[4]; + }; + """ + ptr_size = self.ramdump.sizeof('void *') + length = self.get_struct_sizeof(key) / ptr_size + address = self.get_address(key) + arr = [] + for i in range(0, length - 1): + ptr = self.ramdump.read_pointer(address + (ptr_size * i)) + arr.append(ptr) + return arr + + def __setattr__(self, key, value): + if self._fields and key in self._fields: + raise ValueError(key + "is read-only") + else: + super(Struct, self).__setattr__(key, value) + + def __getattr__(self, key): + if not self.is_empty(): + if key in self._data: + return self._data[key] + elif key in self._fields: + fn = self._fields[key] + value = fn(self, key) + self._data[key] = value + return value + return None diff --git a/linux-ramdump-parser-v2/ramparse.py b/linux-ramdump-parser-v2/ramparse.py index 4c3cc9866b4892db3c867e61c969400339c56600..78c77cd3052cc426317e803ac9cffb36f021a4cd 100755 --- a/linux-ramdump-parser-v2/ramparse.py +++ b/linux-ramdump-parser-v2/ramparse.py @@ -1,6 +1,6 @@ #!/usr/bin/env python2 -# Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. +# Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 and @@ -115,7 +115,7 @@ if __name__ == '__main__': help='Offset for address space layout randomization') parser.add_option('', '--page-offset', type='int', dest='page_offset', help='use custom page offset') - parser.add_option('', '--force-hardware', type='int', + parser.add_option('', '--force-hardware', dest='force_hardware', help='Force the hardware detection') parser.add_option( '', '--force-version', type='int', dest='force_hardware_version', @@ -134,6 +134,10 @@ if __name__ == '__main__': help='Use QTF tool to parse and save QDSS trace data') parser.add_option('', '--qtf-path', dest='qtf_path', help='QTF tool executable') + parser.add_option('', '--skip-qdss-bin', action='store_true', + dest='skip_qdss_bin', help='Skip QDSS ETF and ETR ' + 'binary data parsing from debug image (may save time ' + 'if large ETM and ETR buffers are present)') parser.add_option('', '--ipc-help', dest='ipc_help', help='Help for IPC Logging', action='store_true', default=False) @@ -142,11 +146,12 @@ if __name__ == '__main__': action='append', default=[]) parser.add_option('', '--ipc-skip', dest='ipc_skip', action='store_true', help='Skip IPC Logging when parsing everything', - default=False) + default=False) parser.add_option('', '--ipc-debug', dest='ipc_debug', action='store_true', help='Debug Mode for IPC Logging', default=False) parser.add_option('', '--eval', help='Evaluate some python code directly, or from stdin if "-" is passed. The "dump" variable will be available, as it is with the --shell option.') # noqa + parser.add_option('', '--wlan', dest='wlan', help='wlan.ko path') for p in parser_util.get_parsers(): parser.add_option(p.shortopt or '', @@ -233,6 +238,14 @@ if __name__ == '__main__': 'Ram file {0} does not exist. Exiting...'.format(a[0])) sys.exit(1) + if options.wlan is None: + options.wlan = "INTEGRATED" + else: + if not os.path.exists(options.wlan): + print_out_str('{} does not exist.'.format(options.wlan)) + print_out_str('Cannot proceed without wlan.ko Exiting') + sys.exit(1) + gdb_path = options.gdb nm_path = options.nm objdump_path = options.objdump diff --git a/linux-ramdump-parser-v2/scandump_reader.py b/linux-ramdump-parser-v2/scandump_reader.py new file mode 100755 index 0000000000000000000000000000000000000000..eb2e3da87882305457240bbf95cfc20b213cad39 --- /dev/null +++ b/linux-ramdump-parser-v2/scandump_reader.py @@ -0,0 +1,53 @@ +# Copyright (c) 2016, The Linux Foundation. All rights reserved. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 and +# only version 2 as published by the Free Software Foundation. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + + +import re +import os +from print_out import print_out_str + + +class Scandump_v2(): + + def __init__(self, core, ramdump, version): + self.core = core + self.regs = {} + self.version = version + self.ramdump = ramdump + self.init_missing_regs() + + def init_missing_regs(self): + self.regs['currentEL'] = 0 + self.regs['spsr_el1'] = 0 + self.regs['spsr_el2'] = 0 + self.regs['spsr_el3'] = 0 + self.regs['cpu_state_0'] = 0 + self.regs['cpu_state_1'] = 0 + self.regs['cpu_state_3'] = 0 + self.regs['cpu_state_4'] = 0 + self.regs['cpu_state_5'] = 0 + + + def prepare_dict(self): + input_file = "scandump" + input_file_name = "{0}_core{1}.cmm".format(input_file, (self.core - 4)) + output = os.path.join(self.ramdump.outdir, input_file_name) + fd = open(output, "r") + for line in fd: + matchObj = re.match('^REGISTER.SET ([xse].*[0-9]+)\s(0x[0-9a-f]{0,})', line, re.M | re.I) + if matchObj: + regVal = matchObj.group(2) + if regVal == "0x": + regVal = "0x0000000000000000" + self.regs[(matchObj.group(1)).lower()] = int(regVal, 16) + else: + continue + return self.regs diff --git a/linux-ramdump-parser-v2/sizes.py b/linux-ramdump-parser-v2/sizes.py index 0f1a937f7ce6124ab8d26504692949943fd1309a..eab78df7fd2092d81061800734854c7a2150582a 100644 --- a/linux-ramdump-parser-v2/sizes.py +++ b/linux-ramdump-parser-v2/sizes.py @@ -67,6 +67,7 @@ SZ_512M = 0x20000000 SZ_1G = 0x40000000 SZ_2G = 0x80000000 +SZ_256G = 0x8000000000 size_strings = OrderedDict(( (SZ_1, "1"), @@ -101,6 +102,7 @@ size_strings = OrderedDict(( (SZ_512M, "512M"), (SZ_1G, "1G"), (SZ_2G, "2G"), + (SZ_256G, "256G"), )) def is_power_of_two(n): diff --git a/linux-ramdump-parser-v2/watchdog_v2.py b/linux-ramdump-parser-v2/watchdog_v2.py old mode 100644 new mode 100755 index 8801d352b3537907359203cf980acac58197ddf1..dba99d615f277bc521011a4f76643fce2c763767 --- a/linux-ramdump-parser-v2/watchdog_v2.py +++ b/linux-ramdump-parser-v2/watchdog_v2.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. +# Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 and @@ -11,6 +11,8 @@ import struct import re + +from scandump_reader import Scandump_v2 from print_out import print_out_str from bitops import is_set @@ -693,7 +695,7 @@ class TZCpuCtx_v2(): class TZRegDump_v2(): - def __init__(self): + def __init__(self, has_scan_dump): self.core_regs = None self.sec_regs = None self.neon_regs = {} @@ -703,6 +705,7 @@ class TZRegDump_v2(): self.core = 0 self.status = [] self.neon_fields = [] + self.has_scan_dump = has_scan_dump def dump_all_regs(self, ram_dump): coren_regs = ram_dump.open_file('core{0}_regs.cmm'.format(self.core)) @@ -815,8 +818,21 @@ class TZRegDump_v2(): self.start_addr += struct.calcsize( sysdbg_cpu32_ctxt_regs_type[self.version]) + if self.has_scan_dump: + if core > 3: + self.scan_data = Scandump_v2(self.core, ram_dump, self.version) + self.scan_regs = self.scan_data.prepare_dict() + else: + print_out_str("No Scan dump data to be processed...") + self.core_regs = TZCpuCtx_v2(self.version, sc_regs, self.neon_regs, ram_dump) + + if core > 3: + if self.has_scan_dump: + self.scan_regs['pc'] = self.core_regs.regs['pc'] + self.core_regs.regs = self.scan_regs + self.sec_regs = TZCpuCtx_v2(self.version, sc_secure, self.neon_regs, ram_dump) return True