GitOrigin-RevId: 94a40798ab
tags/v1.0.0
| @@ -295,7 +295,6 @@ if(MGE_WITH_TEST) | |||||
| endif() | endif() | ||||
| if(MGE_BUILD_IMPERATIVE_RT) | if(MGE_BUILD_IMPERATIVE_RT) | ||||
| add_compile_definitions(MGB_ENABLE_IMPERATIVE_RUNTIME) | |||||
| set(CMAKE_CXX_STANDARD 17) | set(CMAKE_CXX_STANDARD 17) | ||||
| endif() | endif() | ||||
| @@ -711,7 +710,6 @@ endif() | |||||
| set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MARCH}") | set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MARCH}") | ||||
| set(MGB_ENABLE_IMPERATIVE ${MGE_BUILD_IMPERATIVE_RT}) | |||||
| # Write out megbrain_build_config.h | # Write out megbrain_build_config.h | ||||
| # It defines macros needed by both megbrain and dnn | # It defines macros needed by both megbrain and dnn | ||||
| configure_file(src/megbrain_build_config.h.in ${CMAKE_CURRENT_BINARY_DIR}/genfiles/megbrain_build_config.h) | configure_file(src/megbrain_build_config.h.in ${CMAKE_CURRENT_BINARY_DIR}/genfiles/megbrain_build_config.h) | ||||
| @@ -125,9 +125,7 @@ public: | |||||
| template<typename T> static ComputingGraphImpl* downcast(T* ptr) = delete; | template<typename T> static ComputingGraphImpl* downcast(T* ptr) = delete; | ||||
| inline static ComputingGraphImpl* downcast(ComputingGraph* graph) { | inline static ComputingGraphImpl* downcast(ComputingGraph* graph) { | ||||
| #ifdef MGB_ENABLE_IMPERATIVE_RUNTIME | |||||
| mgb_assert(!graph->options().imperative_proxy_graph); | mgb_assert(!graph->options().imperative_proxy_graph); | ||||
| #endif | |||||
| return static_cast<ComputingGraphImpl*>(graph); | return static_cast<ComputingGraphImpl*>(graph); | ||||
| } | } | ||||
| @@ -34,8 +34,6 @@ | |||||
| #cmakedefine01 MGB_ENABLE_FBS_SERIALIZATION | #cmakedefine01 MGB_ENABLE_FBS_SERIALIZATION | ||||
| #cmakedefine01 MGB_IS_DEV | #cmakedefine01 MGB_IS_DEV | ||||
| #cmakedefine01 MGB_ENABLE_IMPERATIVE | |||||
| // DNN related flags | // DNN related flags | ||||
| // Platform macro's | // Platform macro's | ||||
| #cmakedefine01 MEGDNN_WITH_CUDA | #cmakedefine01 MEGDNN_WITH_CUDA | ||||
| @@ -140,7 +140,6 @@ void BatchNormForward::scn_do_execute() { | |||||
| auto &&y = output(4)->dev_tensor(); | auto &&y = output(4)->dev_tensor(); | ||||
| mgb_assert(x.layout().is_contiguous() && | mgb_assert(x.layout().is_contiguous() && | ||||
| y.layout().is_contiguous()); | y.layout().is_contiguous()); | ||||
| #if MGB_ENABLE_IMPERATIVE | |||||
| if (input().size() == 5) { // need running mean/variance | if (input().size() == 5) { // need running mean/variance | ||||
| auto &&o0 = output(0)->dev_tensor(), | auto &&o0 = output(0)->dev_tensor(), | ||||
| &&o1 = output(1)->dev_tensor(), | &&o1 = output(1)->dev_tensor(), | ||||
| @@ -163,7 +162,6 @@ void BatchNormForward::scn_do_execute() { | |||||
| && o1.raw_ptr() == i1.raw_ptr()); | && o1.raw_ptr() == i1.raw_ptr()); | ||||
| } | } | ||||
| } | } | ||||
| #endif | |||||
| auto scale = input(1)->dev_tensor().as_megdnn(); | auto scale = input(1)->dev_tensor().as_megdnn(); | ||||
| auto bias = input(2)->dev_tensor().as_megdnn(); | auto bias = input(2)->dev_tensor().as_megdnn(); | ||||
| auto mean = output(0)->dev_tensor().as_megdnn(); | auto mean = output(0)->dev_tensor().as_megdnn(); | ||||