You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

debugger.cc 19 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <fstream>
  17. #include <tuple>
  18. #include <vector>
  19. #include <algorithm>
  20. #include "debug/debugger/debugger.h"
  21. #include "pipeline/jit/pipeline.h"
  22. #include "backend/session/anf_runtime_algorithm.h"
  23. #include "runtime/device/kernel_runtime_manager.h"
  24. using debugger::EventReply;
  25. using debugger::GraphProto;
  26. using debugger::ModelProto;
  27. using debugger::TensorProto;
  28. using debugger::WatchCondition;
  29. using debugger::WatchCondition_Condition_inf;
  30. using debugger::WatchCondition_Condition_nan;
  31. using debugger::WatchNode;
  32. using debugger::WatchpointHit;
  33. namespace mindspore {
  34. DebuggerPtr Debugger::debugger_ = nullptr;
  35. std::mutex Debugger::instance_lock_;
  36. Debugger::Debugger()
  37. : grpc_client_(nullptr),
  38. debug_services_(nullptr),
  39. device_id_(0),
  40. device_target_(""),
  41. num_step_(0),
  42. debugger_enabled_(false),
  43. is_dataset_graph_(false),
  44. partial_memory_(false) {}
  45. void Debugger::Init(const uint32_t device_id, const std::string device_target) {
  46. // access lock for public method
  47. std::lock_guard<std::mutex> a_lock(access_lock_);
  48. // save device_id
  49. MS_LOG(INFO) << "Debugger got device_id: " << device_id;
  50. device_id_ = device_id;
  51. MS_LOG(INFO) << "Debugger got device_target: " << device_target;
  52. device_target_ = device_target;
  53. }
  54. void Debugger::EnableDebugger() {
  55. // reset some of the class members
  56. num_step_ = 0;
  57. debugger_enabled_ = false;
  58. partial_memory_ = false;
  59. grpc_client_ = nullptr;
  60. debug_services_ = nullptr;
  61. // see if dump is enabled
  62. bool dump_enabled = false;
  63. if (device_target_ == kGPUDevice) {
  64. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  65. MS_EXCEPTION_IF_NULL(runtime_instance);
  66. dump_enabled = runtime_instance->DumpDataEnabled();
  67. }
  68. // get env variables to configure debugger
  69. const char *env_enable_str = std::getenv("ENABLE_MS_DEBUGGER");
  70. if (env_enable_str != nullptr) {
  71. MS_LOG(INFO) << "Getenv ENABLE_MS_DEBUGGER: " << env_enable_str;
  72. if (std::strcmp(env_enable_str, "1") == 0) {
  73. debugger_enabled_ = true;
  74. }
  75. }
  76. if (!debugger_enabled_ && !dump_enabled) {
  77. MS_LOG(WARNING) << "Not enabling debugger. Set environment variable ENABLE_MS_DEBUGGER=1 to enable debugger.";
  78. return;
  79. }
  80. // configure grpc host
  81. const char *env_host_str = std::getenv("MS_DEBUGGER_HOST");
  82. std::string host;
  83. if (env_host_str != nullptr) {
  84. MS_LOG(INFO) << "Getenv MS_DEBUGGER_HOST: " << env_host_str;
  85. host = std::string(env_host_str);
  86. } else {
  87. MS_LOG(WARNING) << "Environment variable MS_DEBUGGER_HOST doesn't exist. Using default debugger host: localhost";
  88. host = "localhost";
  89. }
  90. // configure grpc port
  91. const char *env_port_str = std::getenv("MS_DEBUGGER_PORT");
  92. std::string port;
  93. if (env_port_str != nullptr) {
  94. MS_LOG(INFO) << "Getenv MS_DEBUGGER_PORT: " << env_port_str;
  95. port = std::string(env_port_str);
  96. } else {
  97. MS_LOG(WARNING) << "Environment variable MS_DEBUGGER_PORT doesn't exist. Using default debugger port: 50051";
  98. port = "50051";
  99. }
  100. // configure partial memory reuse
  101. const char *env_partial_mem_str = std::getenv("MS_DEBUGGER_PARTIAL_MEM");
  102. if (env_partial_mem_str != nullptr) {
  103. MS_LOG(INFO) << "Getenv MS_DEBUGGER_PARTIAL_MEM: " << env_partial_mem_str;
  104. if (std::strcmp(env_partial_mem_str, "1") == 0) {
  105. partial_memory_ = true;
  106. }
  107. }
  108. // switch memory reuse on or off
  109. auto context_ptr = MsContext::GetInstance();
  110. MS_EXCEPTION_IF_NULL(context_ptr);
  111. context_ptr->set_enable_mem_reuse(partial_memory_);
  112. // print some message about memory reuse to user
  113. if (partial_memory_) {
  114. MS_LOG(WARNING) << "Partial Memory Reuse is enabled. Note: 1. Please only set watchpoints before running the first "
  115. "step. 2. Tensor values are only available for nodes that are watched by any watchpoint.";
  116. } else {
  117. MS_LOG(WARNING) << "Memory Reuse is disabled. Set environment variable MS_DEBUGGER_PARTIAL_MEM=1 to reduce memory "
  118. "usage for large models.";
  119. }
  120. // initialize grpc client
  121. if (debugger_enabled_) {
  122. grpc_client_ = std::make_unique<GrpcClient>(host, port);
  123. }
  124. debug_services_ = std::make_unique<DebugServices>();
  125. }
  126. void Debugger::Reset() {
  127. // access lock for public method
  128. std::lock_guard<std::mutex> a_lock(access_lock_);
  129. // reset components
  130. device_id_ = 0;
  131. device_target_ = "";
  132. num_step_ = 0;
  133. debugger_enabled_ = false;
  134. is_dataset_graph_ = false;
  135. partial_memory_ = false;
  136. graph_ptr_ = nullptr;
  137. grpc_client_ = nullptr;
  138. debug_services_ = nullptr;
  139. }
  140. void Debugger::PreExecute(const KernelGraphPtr &graph_ptr) {
  141. // access lock for public method
  142. std::lock_guard<std::mutex> a_lock(access_lock_);
  143. // check and save graph_ptr, suspend if graph is new
  144. CheckGraphPtr(graph_ptr);
  145. }
  146. void Debugger::PostExecute() {
  147. // access lock for public method
  148. std::lock_guard<std::mutex> a_lock(access_lock_);
  149. // analyze tensor data and send the watchpoints been hit
  150. if (debugger_enabled_ && !is_dataset_graph_) {
  151. num_step_++;
  152. MS_LOG(INFO) << "Debugger suspend at end of step; number of steps executed: " << num_step_;
  153. SendWatchpointsAndSuspend(CheckWatchpoints());
  154. }
  155. }
  156. void Debugger::PostDebugOp() {
  157. // access lock for public method
  158. std::lock_guard<std::mutex> a_lock(access_lock_);
  159. // suspend if debugger is enabled
  160. if (debugger_enabled_ && !is_dataset_graph_) {
  161. MS_LOG(INFO) << "Debugger suspend at debug_op";
  162. CommandLoop();
  163. }
  164. }
  165. void Debugger::CheckGraphPtr(const KernelGraphPtr &graph_ptr) {
  166. if (graph_ptr_ != graph_ptr) {
  167. MS_LOG(INFO) << "Debugger got new graph: " << graph_ptr->graph_id();
  168. // save new graph_ptr
  169. graph_ptr_ = graph_ptr;
  170. // check if it is dataset graph
  171. CheckDatasetGraph();
  172. if (!is_dataset_graph_) {
  173. // only try to enable debugger if it is not a dataset graph
  174. EnableDebugger();
  175. if (debugger_enabled_) {
  176. // get graph proto and send to mindinsight
  177. SendGraphAndSuspend(GetGraphProto());
  178. }
  179. }
  180. }
  181. }
  182. void Debugger::CheckDatasetGraph() {
  183. // print parameter node names
  184. const auto &params = graph_ptr_->inputs();
  185. for (const auto &param : params) {
  186. MS_LOG(INFO) << "param: " << param->fullname_with_scope();
  187. }
  188. // check if there is GetNext or InitDataSetQueue node
  189. const auto &nodes = graph_ptr_->execution_order();
  190. for (const auto &node : nodes) {
  191. auto node_name = AnfAlgo::GetCNodeName(node);
  192. MS_LOG(INFO) << "node: " << node->fullname_with_scope();
  193. if (node_name == "GetNext" || node_name == "InitDataSetQueue") {
  194. MS_LOG(WARNING) << "Not enabling debugger for graph " << graph_ptr_->graph_id() << ": found dataset graph node "
  195. << node_name;
  196. is_dataset_graph_ = true;
  197. return;
  198. }
  199. }
  200. is_dataset_graph_ = false;
  201. }
  202. GraphProto Debugger::GetGraphProto() const {
  203. // convert kernel graph to debugger modelproto
  204. ModelProto model = GetDebuggerFuncGraphProto(graph_ptr_);
  205. return model.graph();
  206. }
  207. void Debugger::SendGraphAndSuspend(const GraphProto &graph_proto) {
  208. // prepare metadata
  209. std::string device_name = std::to_string(device_id_) + ":" + std::to_string(graph_ptr_->graph_id());
  210. Metadata metadata;
  211. metadata.set_device_name(device_name);
  212. metadata.set_cur_step(num_step_);
  213. EventReply reply_metadata = grpc_client_->SendMetadata(metadata);
  214. if (reply_metadata.status() != reply_metadata.OK) {
  215. MS_LOG(ERROR) << "Error: SendMetadata failed";
  216. }
  217. // send graph to mindinght server
  218. EventReply reply = grpc_client_->SendGraph(graph_proto);
  219. if (reply.status() != reply.OK) {
  220. MS_LOG(ERROR) << "Error: SendGraph failed";
  221. }
  222. // enter command loop, wait and process commands
  223. CommandLoop();
  224. }
  225. void Debugger::CommandLoop() {
  226. // prepare metadata
  227. std::string device_name = std::to_string(device_id_) + ":" + std::to_string(graph_ptr_->graph_id());
  228. Metadata metadata;
  229. metadata.set_device_name(device_name);
  230. metadata.set_cur_step(num_step_);
  231. // loop exit flag
  232. bool run = false;
  233. int num_wait_fail = 0;
  234. const int max_num_wait_fail = 5;
  235. while (!run) {
  236. // wait for command
  237. EventReply reply = grpc_client_->WaitForCommand(metadata);
  238. if (reply.status() != reply.OK) {
  239. MS_LOG(ERROR) << "Error: WaitForCommand failed";
  240. num_wait_fail++;
  241. if (num_wait_fail > max_num_wait_fail) {
  242. MS_LOG(ERROR) << "Maximum number of WaitForCommand retry reached: exiting training session";
  243. Exit();
  244. }
  245. MS_LOG(ERROR) << "Number of consecutive WaitForCommand fail:" << num_wait_fail << "; Retry after "
  246. << num_wait_fail << "s";
  247. std::this_thread::sleep_for(std::chrono::milliseconds(1000 * num_wait_fail));
  248. continue;
  249. }
  250. // get type of the command in reply
  251. DebuggerCommand cmd = GetCommand(reply);
  252. if (cmd == DebuggerCommand::kUnknownCMD) {
  253. MS_LOG(ERROR) << "Error: debugger recieved unknown command";
  254. continue;
  255. }
  256. MS_LOG(INFO) << "recieved command: ";
  257. switch (cmd) {
  258. case DebuggerCommand::kUnknownCMD:
  259. MS_LOG(INFO) << "UnknownCMD";
  260. break;
  261. case DebuggerCommand::kExitCMD:
  262. MS_LOG(INFO) << "ExitCMD";
  263. Exit();
  264. break;
  265. case DebuggerCommand::kRunCMD:
  266. MS_LOG(INFO) << "RunCMD";
  267. // exit loop
  268. run = true;
  269. break;
  270. case DebuggerCommand::kSetCMD:
  271. MS_LOG(INFO) << "SetCMD";
  272. {
  273. // print set cmd content
  274. ProtoVector<WatchNode> recieved_nodes = GetWatchnodes(reply);
  275. for (auto node : recieved_nodes) {
  276. MS_LOG(INFO) << "node name: " << node.node_name();
  277. MS_LOG(INFO) << "node type: " << node.node_type();
  278. }
  279. MS_LOG(INFO) << "condition: " << GetWatchcondition(reply).condition();
  280. MS_LOG(INFO) << "id: " << GetWatchpointID(reply);
  281. MS_LOG(INFO) << "delete: " << GetWatchpointDelete(reply);
  282. }
  283. MS_LOG(INFO) << "Setting watchpoint";
  284. if (GetWatchpointDelete(reply)) {
  285. RemoveWatchpoint(GetWatchpointID(reply));
  286. } else {
  287. SetWatchpoint(GetWatchnodes(reply), GetWatchcondition(reply), GetWatchpointID(reply));
  288. }
  289. break;
  290. case DebuggerCommand::kViewCMD:
  291. MS_LOG(INFO) << "ViewCMD";
  292. {
  293. // print view cmd content
  294. ProtoVector<TensorProto> received_tensors = GetTensors(reply);
  295. for (auto tensor : received_tensors) {
  296. MS_LOG(INFO) << "tensor node name: " << tensor.node_name();
  297. MS_LOG(INFO) << "tensor slot: " << tensor.slot();
  298. MS_LOG(INFO) << "tensor finished: " << std::boolalpha << tensor.finished() << std::noboolalpha;
  299. MS_LOG(INFO) << "tensor iter: " << tensor.iter();
  300. MS_LOG(INFO) << "tensor truncate: " << std::boolalpha << tensor.truncate() << std::noboolalpha;
  301. }
  302. }
  303. MS_LOG(INFO) << "Sending tensors";
  304. std::list<TensorProto> tensors = LoadTensors(GetTensors(reply));
  305. {
  306. // print view cmd reply
  307. for (auto tensor : tensors) {
  308. MS_LOG(INFO) << "tensor node name: " << tensor.node_name();
  309. MS_LOG(INFO) << "tensor slot: " << tensor.slot();
  310. MS_LOG(INFO) << "tensor finished: " << std::boolalpha << tensor.finished() << std::noboolalpha;
  311. MS_LOG(INFO) << "tensor iter: " << tensor.iter();
  312. MS_LOG(INFO) << "tensor truncate: " << std::boolalpha << tensor.truncate() << std::noboolalpha;
  313. MS_LOG(INFO) << "tensor dims: ";
  314. for (auto dim : tensor.dims()) {
  315. MS_LOG(INFO) << dim << ",";
  316. }
  317. MS_LOG(INFO) << "tensor dtype: " << tensor.data_type();
  318. }
  319. }
  320. EventReply send_tensors_reply = grpc_client_->SendTensors(tensors);
  321. if (send_tensors_reply.status() != send_tensors_reply.OK) {
  322. MS_LOG(ERROR) << "Error: SendTensors failed";
  323. }
  324. break;
  325. }
  326. }
  327. }
  328. void Debugger::SetWatchpoint(const ProtoVector<WatchNode> &nodes, const WatchCondition &condition, const int32_t id) {
  329. std::vector<std::tuple<std::string, bool>> check_node_list;
  330. std::transform(nodes.begin(), nodes.end(), std::back_inserter(check_node_list),
  331. [](WatchNode node) -> std::tuple<std::string, bool> {
  332. return make_tuple(node.node_name(), node.node_type() == "scope");
  333. });
  334. debug_services_->AddWatchpoint(id, condition.condition(), check_node_list);
  335. }
  336. void Debugger::RemoveWatchpoint(const int32_t id) { debug_services_->RemoveWatchpoint(id); }
  337. std::list<TensorProto> Debugger::LoadTensors(const ProtoVector<TensorProto> &tensors) const {
  338. std::vector<std::string> name;
  339. std::vector<std::string> ret_name;
  340. std::vector<char *> data_ptr;
  341. std::vector<unsigned int> data_size;
  342. std::vector<TypePtr> dtype;
  343. std::vector<std::vector<int>> shape;
  344. std::transform(tensors.begin(), tensors.end(), std::back_inserter(name), GetTensorFullName);
  345. // ret_name will contain tensor names that are found in TensorLoader
  346. // items in ret_name will be in the same order with tensors if found
  347. debug_services_->ReadNodesTensors(name, &ret_name, &data_ptr, &data_size, &dtype, &shape);
  348. std::list<TensorProto> tensor_list;
  349. unsigned int result_index = 0;
  350. for (auto tensor : tensors) {
  351. TensorProto tensor_item;
  352. tensor_item.set_node_name(tensor.node_name());
  353. tensor_item.set_slot(tensor.slot());
  354. tensor_item.set_iter(tensor.iter());
  355. tensor_item.set_truncate(tensor.truncate());
  356. tensor_item.clear_tensor_content();
  357. tensor_item.clear_data_type();
  358. tensor_item.clear_dims();
  359. // always set finished to true before big tensor splitting is supported
  360. tensor_item.set_finished(true);
  361. // return empty tensor if didn't find the requested tensor
  362. if (result_index >= ret_name.size() || ret_name[result_index] != GetTensorFullName(tensor)) {
  363. tensor_list.push_back(tensor_item);
  364. continue;
  365. }
  366. tensor_item.set_tensor_content(data_ptr[result_index], data_size[result_index]);
  367. tensor_item.set_data_type(GetDebuggerNumberDataType(dtype[result_index]));
  368. for (auto &elem : shape[result_index]) {
  369. tensor_item.add_dims(elem);
  370. }
  371. // add tensor to result list and increment result_index to check next item in ret_name
  372. tensor_list.push_back(tensor_item);
  373. result_index++;
  374. }
  375. return tensor_list;
  376. }
  377. void Debugger::Exit() {
  378. // clear resource before exit
  379. pipeline::ClearResAtexit();
  380. std::exit(EXIT_FAILURE);
  381. }
  382. std::list<WatchpointHit> Debugger::CheckWatchpoints() const {
  383. std::vector<std::string> name;
  384. std::vector<std::string> slot;
  385. std::vector<char *> data_ptr;
  386. std::vector<unsigned int> data_size;
  387. std::vector<int> condition;
  388. std::vector<unsigned int> watchpoint_id;
  389. debug_services_->CheckWatchpoints(&name, &slot, &data_ptr, &data_size, &condition, &watchpoint_id);
  390. std::list<WatchpointHit> hits;
  391. for (unsigned int i = 0; i < name.size(); i++) {
  392. WatchpointHit hit;
  393. hit.set_id(watchpoint_id[i]);
  394. // here TensorProto act as a tensor indicator, not sending tensor content
  395. TensorProto *tensor_item = hit.mutable_tensor();
  396. tensor_item->set_node_name(name[i]);
  397. tensor_item->set_slot(slot[i]);
  398. tensor_item->set_finished(true);
  399. WatchCondition *condition_item = hit.mutable_watch_condition();
  400. condition_item->set_condition(debugger::WatchCondition_Condition(condition[i]));
  401. hits.push_back(hit);
  402. }
  403. return hits;
  404. }
  405. void Debugger::SendWatchpointsAndSuspend(const std::list<WatchpointHit> &points) {
  406. // send info about watchpoint
  407. if (!points.empty()) {
  408. EventReply reply = grpc_client_->SendWatchpointHits(points);
  409. if (reply.status() != reply.OK) {
  410. MS_LOG(ERROR) << "Error: SendWatchpointHits failed";
  411. }
  412. }
  413. // enter command loop
  414. CommandLoop();
  415. }
  416. DebugServices *Debugger::debug_services() const { return debug_services_.get(); }
  417. bool Debugger::debugger_enabled() const { return debugger_enabled_; }
  418. DebuggerCommand GetCommand(const EventReply &reply) {
  419. DebuggerCommand cmd = DebuggerCommand::kUnknownCMD;
  420. switch (reply.cmd_case()) {
  421. case debugger::EventReply::CmdCase::kExit:
  422. cmd = DebuggerCommand::kExitCMD;
  423. break;
  424. case debugger::EventReply::CmdCase::kRunCmd:
  425. cmd = DebuggerCommand::kRunCMD;
  426. break;
  427. case debugger::EventReply::CmdCase::kSetCmd:
  428. cmd = DebuggerCommand::kSetCMD;
  429. break;
  430. case debugger::EventReply::CmdCase::kViewCmd:
  431. cmd = DebuggerCommand::kViewCMD;
  432. break;
  433. default:
  434. MS_LOG(ERROR) << "Error: UnknownCMD";
  435. break;
  436. }
  437. return cmd;
  438. }
  439. ProtoVector<WatchNode> GetWatchnodes(const EventReply &reply) {
  440. if (!reply.has_set_cmd()) {
  441. MS_LOG(ERROR) << "Error: Not SetCMD, can not get WatchNodes. Returning default value: ProtoVector<WatchNode>().";
  442. return ProtoVector<WatchNode>();
  443. }
  444. return reply.set_cmd().watch_nodes();
  445. }
  446. WatchCondition GetWatchcondition(const EventReply &reply) {
  447. if (!reply.has_set_cmd() || !reply.set_cmd().has_watch_condition()) {
  448. MS_LOG(ERROR) << "Error: Can not get WatchCondition from command. Returning default value: WatchCondition().";
  449. return WatchCondition();
  450. }
  451. return reply.set_cmd().watch_condition();
  452. }
  453. int32_t GetWatchpointID(const EventReply &reply) {
  454. if (!reply.has_set_cmd()) {
  455. MS_LOG(ERROR) << "Error: Not SetCMD, can not get Watchpoint ID. Returning default value: 0.";
  456. return 0;
  457. }
  458. return reply.set_cmd().id();
  459. }
  460. bool GetWatchpointDelete(const EventReply &reply) {
  461. if (!reply.has_set_cmd()) {
  462. MS_LOG(ERROR) << "Error: Not SetCMD, can not get Watchpoint delete flag. Returning default value: false.";
  463. return false;
  464. }
  465. return reply.set_cmd().delete_();
  466. }
  467. ProtoVector<TensorProto> GetTensors(const EventReply &reply) {
  468. if (!reply.has_view_cmd()) {
  469. MS_LOG(ERROR) << "Error: Not ViewCMD, can not get Tensors. Returning default value: ProtoVector<TensorProto>().";
  470. return ProtoVector<TensorProto>();
  471. }
  472. return reply.view_cmd().tensors();
  473. }
  474. std::string GetTensorFullName(const TensorProto &tensor) {
  475. string node_name = tensor.node_name();
  476. if (tensor.truncate()) {
  477. // scopes in node name are seperated by '/'
  478. // use the name without scope if truncate is true
  479. std::size_t found = node_name.find_last_of("/");
  480. node_name = node_name.substr(found + 1);
  481. }
  482. return node_name + ":" + tensor.slot() + (tensor.iter() == "" ? "" : ":" + tensor.iter());
  483. }
  484. bool Debugger::partial_memory() { return partial_memory_; }
  485. } // namespace mindspore