You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

grad.cpp 4.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145
  1. /**
  2. * \file imperative/python/src/grad.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "./grad.h"
  12. #include "megbrain/imperative/backward_graph_opt.h"
  13. #include "megbrain/imperative/ops/autogen.h"
  14. #include "megbrain/imperative/proxy_graph_detail.h"
  15. #include "megbrain/imperative/resource_manager.h"
  16. #include "megbrain/utils/mempool.h"
  17. #include "range/v3/all.hpp"
  18. #include "./transformation.h"
  19. namespace py = pybind11;
  20. namespace views = ranges::views;
  21. namespace mgb::imperative::python {
  22. namespace {
  23. std::unordered_map<std::shared_ptr<GradKey>, GradKeyWrapper*> grad_key_map;
  24. }
  25. GradKeyWrapper::GradKeyWrapper() : m_key(std::make_shared<GradKey>()) {
  26. grad_key_map[m_key] = this;
  27. }
  28. void GradKeyWrapper::attach(PyObject* const* args, size_t nargs) {
  29. if (nargs != 2) {
  30. throw py::type_error("expect 2 arguments");
  31. }
  32. auto* tw = TensorWrapper::try_cast(args[0]);
  33. if (!tw) {
  34. throw py::type_error("argument 1 must be Tensor");
  35. }
  36. py::object callback;
  37. if (args[1] != Py_None) {
  38. callback = py::reinterpret_borrow<py::object>(args[1]);
  39. }
  40. GenericFunction generic_callback =
  41. [=](Span<ValueRef> inputs) -> std::vector<ValueRef> {
  42. mgb_assert(inputs.size() == 1);
  43. if (callback) {
  44. callback(TensorWrapper::make(py_tensor_type, inputs[0]));
  45. }
  46. return {};
  47. };
  48. tw->m_tensor->reset(imperative::apply(
  49. AttachGrad(m_key), tw->m_tensor->data(),
  50. FunctionValue::make(generic_callback))[0]);
  51. }
  52. void GradKeyWrapper::backward(GradKeyWrapper* self, py::list tensors, py::list grads) {
  53. std::vector<ValueRef> args;
  54. mgb_assert(tensors.size() == grads.size());
  55. for (auto&& tensor : tensors) {
  56. args.push_back(TensorWrapper::try_cast(tensor.ptr())->m_tensor->data());
  57. }
  58. for (auto&& grad : grads) {
  59. args.push_back(TensorWrapper::try_cast(grad.ptr())->m_tensor->data());
  60. }
  61. imperative::apply(GradBackward(self->m_key), {args.data(), args.size()});
  62. }
  63. pybind11::function GradKeyWrapper::get_backward_closure(
  64. GradKeyWrapper* self, py::list tensors) {
  65. std::vector<ValueRef> args;
  66. for (auto&& tensor : tensors) {
  67. args.push_back(TensorWrapper::try_cast(tensor.ptr())->m_tensor->data());
  68. }
  69. auto closure = imperative::apply(GetBackwardColsure(self->m_key), args)[0]
  70. .as<FunctionValue>();
  71. auto py_function = [closure](std::vector<TensorWrapper*> tensors) {
  72. std::vector<ValueRef> args;
  73. for (auto* tw : tensors) {
  74. args.push_back(tw->m_tensor->data());
  75. }
  76. (*closure)(args);
  77. };
  78. return pybind11::cpp_function(py_function);
  79. }
  80. PyObject* GradKeyWrapper::get_name() {
  81. return py::cast(m_key->name()).release().ptr();
  82. }
  83. void GradKeyWrapper::set_name(py::handle name) {
  84. m_key->name(py::cast<std::string>(name));
  85. }
  86. PyObject* GradKeyWrapper::is_attached_to(PyObject* const* args, size_t nargs) {
  87. if (nargs != 1) {
  88. PyErr_SetString(PyExc_TypeError, "expect 1 argument");
  89. return nullptr;
  90. }
  91. auto* tw = TensorWrapper::try_cast(args[0]);
  92. if (!tw) {
  93. PyErr_SetString(PyExc_TypeError, "expect Tensor");
  94. return nullptr;
  95. }
  96. if (imperative::apply(IsAttachedTo(m_key), tw->m_tensor->data())[0]
  97. .cast<BoolValue>()) {
  98. Py_RETURN_TRUE;
  99. }
  100. Py_RETURN_FALSE;
  101. }
  102. void GradKeyWrapper::enter() {
  103. m_transformation = std::make_shared<GradTransformation>(m_key);
  104. TransformationManager::get_instance().register_at<TransformationManager::Grad>(
  105. m_transformation);
  106. }
  107. void GradKeyWrapper::exit() {
  108. TransformationManager::get_instance().unregister<TransformationManager::Grad>(
  109. m_transformation);
  110. m_transformation.reset();
  111. }
  112. void GradKeyWrapper::suppress() {
  113. m_transformation->suppress();
  114. }
  115. void GradKeyWrapper::resume() {
  116. m_transformation->resume();
  117. }
  118. GradKeyWrapper* GradKeyWrapper::get(std::shared_ptr<GradKey> key) {
  119. return grad_key_map.at(key);
  120. }
  121. GradKeyWrapper::~GradKeyWrapper() {
  122. grad_key_map.erase(m_key);
  123. }
  124. } // namespace mgb::imperative::python