| @@ -0,0 +1,3 @@ | |||||
| source ./scripts/gdb/pretty_printers.py | |||||
| source ./scripts/gdb/xmethods.py | |||||
| catch throw | |||||
| @@ -137,7 +137,7 @@ Handle ChannelImpl::put(const HostTensorND& value, bool no_cache) { | |||||
| auto& state = get_channel_state(); | auto& state = get_channel_state(); | ||||
| auto _ = StackManager::Guard{"Put", &state.stack_manager}; | auto _ = StackManager::Guard{"Put", &state.stack_manager}; | ||||
| auto info = put_impl(value, no_cache); | auto info = put_impl(value, no_cache); | ||||
| return info; | |||||
| return reinterpret_cast<Handle>(info); | |||||
| } | } | ||||
| TensorInfo* ChannelImpl::put_impl(const HostTensorND& value, bool no_cache) { | TensorInfo* ChannelImpl::put_impl(const HostTensorND& value, bool no_cache) { | ||||
| @@ -161,7 +161,7 @@ TensorInfo* ChannelImpl::put_impl(const HostTensorND& value, bool no_cache) { | |||||
| Handle ChannelImpl::put(const DeviceTensorND& data, const HostTensorND& hvalue) { | Handle ChannelImpl::put(const DeviceTensorND& data, const HostTensorND& hvalue) { | ||||
| MGB_LOCK_GUARD(m_spin); | MGB_LOCK_GUARD(m_spin); | ||||
| mgb_assert(check_available(), "Channel already closed"); | mgb_assert(check_available(), "Channel already closed"); | ||||
| return put_impl(data, hvalue); | |||||
| return reinterpret_cast<Handle>(put_impl(data, hvalue)); | |||||
| } | } | ||||
| TensorInfo* ChannelImpl::put_impl(const DeviceTensorND& data, const HostTensorND& hvalue) { | TensorInfo* ChannelImpl::put_impl(const DeviceTensorND& data, const HostTensorND& hvalue) { | ||||
| auto& state = get_channel_state(); | auto& state = get_channel_state(); | ||||
| @@ -287,7 +287,7 @@ void ChannelImpl::dispatch_default_cpu( | |||||
| auto info = reinterpret_cast<TensorInfo*>(put_impl(host_tensornd, false)); | auto info = reinterpret_cast<TensorInfo*>(put_impl(host_tensornd, false)); | ||||
| mgb_assert(info->desc.layout.ndim != 0); | mgb_assert(info->desc.layout.ndim != 0); | ||||
| output_infos.push_back(info); | output_infos.push_back(info); | ||||
| outputs->push_back(info); | |||||
| outputs->push_back(reinterpret_cast<Handle>(info)); | |||||
| } | } | ||||
| auto op_info_getter = [op]{ | auto op_info_getter = [op]{ | ||||
| std::unordered_map<std::string, std::string> op_info; | std::unordered_map<std::string, std::string> op_info; | ||||
| @@ -330,7 +330,7 @@ void ChannelImpl::dispatch_kernel( | |||||
| .proxy_to_comp_node(desc.comp_node); | .proxy_to_comp_node(desc.comp_node); | ||||
| } | } | ||||
| cmd.outputs.push_back(info); | cmd.outputs.push_back(info); | ||||
| outputs->push_back(info); | |||||
| outputs->push_back(reinterpret_cast<Handle>(info)); | |||||
| } | } | ||||
| auto op_info_getter = [op=cmd.op]{ | auto op_info_getter = [op=cmd.op]{ | ||||
| std::unordered_map<std::string, std::string> op_info; | std::unordered_map<std::string, std::string> op_info; | ||||
| @@ -519,7 +519,7 @@ TensorInfo* ChannelImpl::alloc() { | |||||
| } | } | ||||
| void ChannelImpl::init(TensorInfo* info, LogicalTensorDesc desc) { | void ChannelImpl::init(TensorInfo* info, LogicalTensorDesc desc) { | ||||
| m_valid_handle.insert(info); | |||||
| m_valid_handle.insert(reinterpret_cast<Handle>(info)); | |||||
| MGB_RECORD_EVENT(TensorDeclareEvent, info->id, info->name); | MGB_RECORD_EVENT(TensorDeclareEvent, info->id, info->name); | ||||
| info->status = TensorInfo::Allocated; | info->status = TensorInfo::Allocated; | ||||
| info->desc = std::move(desc); | info->desc = std::move(desc); | ||||
| @@ -28,7 +28,8 @@ struct AsyncError : std::nested_exception, std::exception { | |||||
| }; | }; | ||||
| struct Interpreter { | struct Interpreter { | ||||
| using Handle = void*; | |||||
| struct HandleImpl{}; | |||||
| using Handle = HandleImpl*; | |||||
| struct Channel { | struct Channel { | ||||
| virtual ~Channel() = default; | virtual ~Channel() = default; | ||||
| @@ -33,7 +33,7 @@ struct Expr { | |||||
| template <typename T> | template <typename T> | ||||
| struct ToStringTrait<Expr<T>> { | struct ToStringTrait<Expr<T>> { | ||||
| std::string operator()(const Expr<T>& expr) { | std::string operator()(const Expr<T>& expr) { | ||||
| return ssprintf("%s = %s %s\n", to_string(expr.inputs).c_str(), to_string(expr.op.get()).c_str(), to_string(expr.outputs).c_str()); | |||||
| return ssprintf("%s = %s %s\n", to_string(expr.outputs).c_str(), to_string(expr.op.get()).c_str(), to_string(expr.inputs).c_str()); | |||||
| } | } | ||||
| }; | }; | ||||
| @@ -0,0 +1,172 @@ | |||||
| import gdb | |||||
| import gdb.printing | |||||
| import gdb.types | |||||
| def eval_on_val(val, eval_str): | |||||
| eval_str = "(*({}*)({})).{}".format(val.type, val.address, eval_str) | |||||
| return gdb.parse_and_eval(eval_str) | |||||
| class SmallVectorPrinter: | |||||
| def __init__(self, val): | |||||
| t = val.type.template_argument(0) | |||||
| self.begin = val['m_begin_ptr'].cast(t.pointer()) | |||||
| self.end = val['m_end_ptr'].cast(t.pointer()) | |||||
| self.size = self.end - self.begin | |||||
| self.capacity = val['m_capacity_ptr'].cast(t.pointer()) - val['m_begin_ptr'].cast(t.pointer()) | |||||
| def to_string(self): | |||||
| return 'SmallVector of Size {}'.format(self.size) | |||||
| def display_hint(self): | |||||
| return 'array' | |||||
| def children(self): | |||||
| for i in range(self.size): | |||||
| yield "[{}]".format(i), (self.begin+i).dereference() | |||||
| class MaybePrinter: | |||||
| def __init__(self, val): | |||||
| self.val = val['m_ptr'] | |||||
| def to_string(self): | |||||
| if self.val: | |||||
| return 'Some {}'.format(self.val) | |||||
| else: | |||||
| return 'None' | |||||
| def display_hint(self): | |||||
| return 'array' | |||||
| def children(self): | |||||
| if self.val: | |||||
| yield '[0]', self.val.dereference() | |||||
| class ToStringPrinter: | |||||
| def __init__(self, val): | |||||
| self.val = val | |||||
| def to_string(self): | |||||
| return eval_on_val(self.val, "to_string().c_str()").string() | |||||
| class ReprPrinter: | |||||
| def __init__(self, val): | |||||
| self.val = val | |||||
| def to_string(self): | |||||
| return eval_on_val(self.val, "repr().c_str()").string() | |||||
| class HandlePrinter: | |||||
| def __init__(self, val): | |||||
| impl = gdb.lookup_type("mgb::imperative::interpreter::intl::TensorInfo") | |||||
| self.val = val.cast(impl.pointer()) | |||||
| def to_string(self): | |||||
| if self.val: | |||||
| return 'Handle of TensorInfo at {}'.format(self.val) | |||||
| else: | |||||
| return 'Empty Handle' | |||||
| def display_hint(self): | |||||
| return 'array' | |||||
| def children(self): | |||||
| if self.val: | |||||
| yield '[0]', self.val.dereference() | |||||
| def print_small_tensor(device_nd): | |||||
| size = device_nd["m_storage"]["m_size"] | |||||
| ndim = device_nd["m_layout"]["ndim"] | |||||
| dim0 = device_nd["m_layout"]["shape"][0] | |||||
| stride0 = device_nd["m_layout"]["stride"][0] | |||||
| dtype = device_nd["m_layout"]["dtype"] | |||||
| if size == 0: | |||||
| return "<empty>" | |||||
| if ndim > 1: | |||||
| return "<ndim > 1>" | |||||
| if dim0 > 64: | |||||
| return "<size tool large>" | |||||
| raw_ptr = device_nd["m_storage"]["m_data"]["_M_ptr"] | |||||
| dtype_name = dtype["m_trait"]["name"].string() | |||||
| dtype_map = { | |||||
| "Float32": (gdb.lookup_type("float"), float), | |||||
| "Int32": (gdb.lookup_type("int"), int), | |||||
| } | |||||
| if dtype_name not in dtype_map: | |||||
| return "<dtype unsupported>" | |||||
| else: | |||||
| ctype, pytype = dtype_map[dtype_name] | |||||
| ptr = raw_ptr.cast(ctype.pointer()) | |||||
| array = [] | |||||
| for i in range(dim0): | |||||
| array.append((pytype)((ptr + i * int(stride0)).dereference())) | |||||
| return str(array) | |||||
| class LogicalTensorDescPrinter: | |||||
| def __init__(self, val): | |||||
| self.layout = val['layout'] | |||||
| self.comp_node = val['comp_node'] | |||||
| self.value = val['value'] | |||||
| def to_string(self): | |||||
| return 'LogicalTensorDesc' | |||||
| def children(self): | |||||
| yield 'layout', self.layout | |||||
| yield 'comp_node', self.comp_node | |||||
| yield 'value', print_small_tensor(self.value) | |||||
| class OpDefPrinter: | |||||
| def __init__(self, val): | |||||
| self.val = val | |||||
| def to_string(self): | |||||
| return self.val.dynamic_type.name | |||||
| def children(self): | |||||
| concrete_val = self.val.address.cast(self.val.dynamic_type.pointer()).dereference() | |||||
| for field in concrete_val.type.fields(): | |||||
| if field.is_base_class or field.artificial: | |||||
| continue | |||||
| if field.name == 'sm_typeinfo': | |||||
| continue | |||||
| yield field.name, concrete_val[field.name] | |||||
| pp = gdb.printing.RegexpCollectionPrettyPrinter("MegEngine") | |||||
| # megdnn | |||||
| pp.add_printer('megdnn::SmallVectorImpl', '^megdnn::SmallVector(Impl)?<.*>$', SmallVectorPrinter) | |||||
| pp.add_printer('megdnn::TensorLayout', '^megdnn::TensorLayout$', ToStringPrinter) | |||||
| pp.add_printer('megdnn::TensorShape', '^megdnn::TensorShape$', ToStringPrinter) | |||||
| # megbrain | |||||
| pp.add_printer('mgb::CompNode', '^mgb::CompNode$', ToStringPrinter) | |||||
| pp.add_printer('mgb::Maybe', '^mgb::Maybe<.*>$', MaybePrinter) | |||||
| # imperative | |||||
| pp.add_printer('mgb::imperative::LogicalTensorDesc', '^mgb::imperative::LogicalTensorDesc$', LogicalTensorDescPrinter) | |||||
| pp.add_printer('mgb::imperative::OpDef', '^mgb::imperative::OpDef$', OpDefPrinter) | |||||
| pp.add_printer('mgb::imperative::Subgraph', '^mgb::imperative::Subgraph$', ReprPrinter) | |||||
| pp.add_printer('mgb::imperative::EncodedSubgraph', '^mgb::imperative::EncodedSubgraph$', ReprPrinter) | |||||
| gdb.printing.register_pretty_printer(gdb.current_objfile(), pp) | |||||
| def override_pretty_printer_for(val): | |||||
| type = val.type.strip_typedefs() | |||||
| if type.code == gdb.TYPE_CODE_PTR: | |||||
| if not val: | |||||
| return None | |||||
| target_typename = str(type.target().strip_typedefs()) | |||||
| if target_typename == "mgb::imperative::OpDef": | |||||
| return OpDefPrinter(val.dereference()) | |||||
| if target_typename == "mgb::imperative::interpreter::Interpreter::HandleImpl": | |||||
| return HandlePrinter(val) | |||||
| gdb.pretty_printers.append(override_pretty_printer_for) | |||||
| @@ -0,0 +1,51 @@ | |||||
| import re | |||||
| import gdb | |||||
| import gdb.types | |||||
| import gdb.xmethod | |||||
| class SmallVectorImplWorker_at(gdb.xmethod.XMethodWorker): | |||||
| def __init__(self, t): | |||||
| self.t = t | |||||
| def get_arg_types(self): | |||||
| return gdb.lookup_type('int') | |||||
| def get_result_type(self, *args): | |||||
| return self.t | |||||
| def __call__(self, obj, i): | |||||
| return (obj['m_begin_ptr'].cast(self.t.pointer()) + i).dereference() | |||||
| class SmallVectorImplWorker_size(gdb.xmethod.XMethodWorker): | |||||
| def __init__(self, t): | |||||
| self.t = t | |||||
| def get_arg_types(self): | |||||
| return None | |||||
| def get_result_type(self, *args): | |||||
| return gdb.lookup_type('int') | |||||
| def __call__(self, obj): | |||||
| return obj['m_end_ptr'].cast(self.t.pointer()) - obj['m_begin_ptr'].cast(self.t.pointer()) | |||||
| class SmallVectorImplMatcher(gdb.xmethod.XMethodMatcher): | |||||
| def __init__(self): | |||||
| super().__init__('SmallVectorImplMatcher') | |||||
| def match(self, class_type, method_name): | |||||
| if re.match('^megdnn::SmallVector(Impl)?<.*>', | |||||
| class_type.tag): | |||||
| if method_name == 'at': | |||||
| return SmallVectorImplWorker_at(class_type.template_argument(0)) | |||||
| if method_name == 'operator[]': | |||||
| return SmallVectorImplWorker_at(class_type.template_argument(0)) | |||||
| if method_name == 'size': | |||||
| return SmallVectorImplWorker_size(class_type.template_argument(0)) | |||||
| gdb.xmethod.register_xmethod_matcher(None, SmallVectorImplMatcher()) | |||||